aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorBenoit Germain <bnt.germain@gmail.com>2024-04-14 18:27:10 +0200
committerBenoit Germain <bnt.germain@gmail.com>2024-04-14 18:27:10 +0200
commit69d40c81d8343a1af7e0fe61fbf20a4cf5880c25 (patch)
treecbf7aa525868040820ce6743f1a30fbb59926407 /src
parent0d9c9bae120f92274e1c68f7abdebfcf2c24405d (diff)
parent00970610dc8fbd00a11d3b69e4702933a592ce9f (diff)
downloadlanes-69d40c81d8343a1af7e0fe61fbf20a4cf5880c25.tar.gz
lanes-69d40c81d8343a1af7e0fe61fbf20a4cf5880c25.tar.bz2
lanes-69d40c81d8343a1af7e0fe61fbf20a4cf5880c25.zip
Merge branch 'master' of https://github.com/LuaLanes/lanes
Diffstat (limited to 'src')
-rw-r--r--src/cancel.cpp213
-rw-r--r--src/cancel.h8
-rw-r--r--src/compat.cpp42
-rw-r--r--src/compat.h27
-rw-r--r--src/deep.cpp53
-rw-r--r--src/deep.h12
-rw-r--r--src/keeper.cpp153
-rw-r--r--src/keeper.h39
-rw-r--r--src/lanes.cpp650
-rw-r--r--src/lanes.h2
-rw-r--r--src/lanes.lua456
-rw-r--r--src/lanes_private.h74
-rw-r--r--src/linda.cpp153
-rw-r--r--src/macros_and_utils.h50
-rw-r--r--src/state.cpp103
-rw-r--r--src/state.h7
-rw-r--r--src/threading.cpp887
-rw-r--r--src/threading.h262
-rw-r--r--src/threading_osx.h10
-rw-r--r--src/tools.cpp691
-rw-r--r--src/tools.h33
-rw-r--r--src/uniquekey.h2
-rw-r--r--src/universe.cpp37
-rw-r--r--src/universe.h34
24 files changed, 1638 insertions, 2360 deletions
diff --git a/src/cancel.cpp b/src/cancel.cpp
index 4667f07..b3e52b6 100644
--- a/src/cancel.cpp
+++ b/src/cancel.cpp
@@ -1,6 +1,6 @@
1/* 1/*
2-- 2--
3-- CANCEL.C 3-- CANCEL.CPP
4-- 4--
5-- Lane cancellation support 5-- Lane cancellation support
6-- 6--
@@ -9,7 +9,7 @@
9--[[ 9--[[
10=============================================================================== 10===============================================================================
11 11
12Copyright (C) 2011-2019 Benoit Germain <bnt.germain@gmail.com> 12Copyright (C) 2011-2024 Benoit Germain <bnt.germain@gmail.com>
13 13
14Permission is hereby granted, free of charge, to any person obtaining a copy 14Permission is hereby granted, free of charge, to any person obtaining a copy
15of this software and associated documentation files (the "Software"), to deal 15of this software and associated documentation files (the "Software"), to deal
@@ -33,13 +33,11 @@ THE SOFTWARE.
33]]-- 33]]--
34*/ 34*/
35 35
36#include <assert.h> 36#include "cancel.h"
37#include <string.h>
38 37
38#include "lanes_private.h"
39#include "threading.h" 39#include "threading.h"
40#include "cancel.h"
41#include "tools.h" 40#include "tools.h"
42#include "lanes_private.h"
43 41
44// ################################################################################################ 42// ################################################################################################
45// ################################################################################################ 43// ################################################################################################
@@ -53,7 +51,7 @@ THE SOFTWARE.
53* Returns CANCEL_SOFT/HARD if any locks are to be exited, and 'raise_cancel_error()' called, 51* Returns CANCEL_SOFT/HARD if any locks are to be exited, and 'raise_cancel_error()' called,
54* to make execution of the lane end. 52* to make execution of the lane end.
55*/ 53*/
56static inline CancelRequest cancel_test(lua_State* L) 54[[nodiscard]] static inline CancelRequest cancel_test(lua_State* L)
57{ 55{
58 Lane* const lane{ LANE_POINTER_REGKEY.readLightUserDataValue<Lane>(L) }; 56 Lane* const lane{ LANE_POINTER_REGKEY.readLightUserDataValue<Lane>(L) };
59 // 'lane' is nullptr for the original main state (and no-one can cancel that) 57 // 'lane' is nullptr for the original main state (and no-one can cancel that)
@@ -78,7 +76,7 @@ LUAG_FUNC( cancel_test)
78// ################################################################################################ 76// ################################################################################################
79// ################################################################################################ 77// ################################################################################################
80 78
81static void cancel_hook(lua_State* L, [[maybe_unused]] lua_Debug* ar) 79[[nodiscard]] static void cancel_hook(lua_State* L, [[maybe_unused]] lua_Debug* ar)
82{ 80{
83 DEBUGSPEW_CODE(fprintf(stderr, "cancel_hook\n")); 81 DEBUGSPEW_CODE(fprintf(stderr, "cancel_hook\n"));
84 if (cancel_test(L) != CancelRequest::None) 82 if (cancel_test(L) != CancelRequest::None)
@@ -92,7 +90,7 @@ static void cancel_hook(lua_State* L, [[maybe_unused]] lua_Debug* ar)
92// ################################################################################################ 90// ################################################################################################
93 91
94//--- 92//---
95// = thread_cancel( lane_ud [,timeout_secs=0.0] [,force_kill_bool=false] ) 93// = thread_cancel( lane_ud [,timeout_secs=0.0] [,wake_lindas_bool=false] )
96// 94//
97// The originator thread asking us specifically to cancel the other thread. 95// The originator thread asking us specifically to cancel the other thread.
98// 96//
@@ -100,88 +98,58 @@ static void cancel_hook(lua_State* L, [[maybe_unused]] lua_Debug* ar)
100// 0.0: just signal it to cancel, no time waited 98// 0.0: just signal it to cancel, no time waited
101// >0: time to wait for the lane to detect cancellation 99// >0: time to wait for the lane to detect cancellation
102// 100//
103// 'force_kill': if true, and lane does not detect cancellation within timeout, 101// 'wake_lindas_bool': if true, signal any linda the thread is waiting on
104// it is forcefully killed. Using this with 0.0 timeout means just kill 102// instead of waiting for its timeout (if any)
105// (unless the lane is already finished).
106// 103//
107// Returns: true if the lane was already finished (DONE/ERROR_ST/CANCELLED) or if we 104// Returns: true if the lane was already finished (Done/Error/Cancelled) or if we
108// managed to cancel it. 105// managed to cancel it.
109// false if the cancellation timed out, or a kill was needed. 106// false if the cancellation timed out, or a kill was needed.
110// 107//
111 108
112// ################################################################################################ 109// ################################################################################################
113 110
114static CancelResult thread_cancel_soft(Lane* lane_, double secs_, bool wake_lindas_) 111[[nodiscard]] static CancelResult thread_cancel_soft(Lane* lane_, lua_Duration duration_, bool wake_lane_)
115{ 112{
116 lane_->cancel_request = CancelRequest::Soft; // it's now signaled to stop 113 lane_->cancel_request = CancelRequest::Soft; // it's now signaled to stop
117 // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own 114 // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own
118 if (wake_lindas_) // wake the thread so that execution returns from any pending linda operation if desired 115 if (wake_lane_) // wake the thread so that execution returns from any pending linda operation if desired
119 { 116 {
120 SIGNAL_T* const waiting_on{ lane_->waiting_on }; 117 std::condition_variable* const waiting_on{ lane_->m_waiting_on };
121 if (lane_->status == WAITING && waiting_on != nullptr) 118 if (lane_->m_status == Lane::Waiting && waiting_on != nullptr)
122 { 119 {
123 SIGNAL_ALL( waiting_on); 120 waiting_on->notify_all();
124 } 121 }
125 } 122 }
126 123
127 return THREAD_WAIT(&lane_->thread, secs_, &lane_->done_signal, &lane_->done_lock, &lane_->status) ? CancelResult::Cancelled : CancelResult::Timeout; 124 return lane_->waitForCompletion(duration_) ? CancelResult::Cancelled : CancelResult::Timeout;
128} 125}
129 126
130// ################################################################################################ 127// ################################################################################################
131 128
132static CancelResult thread_cancel_hard(lua_State* L, Lane* lane_, double secs_, bool force_, double waitkill_timeout_) 129[[nodiscard]] static CancelResult thread_cancel_hard(Lane* lane_, lua_Duration duration_, bool wake_lane_)
133{ 130{
134 lane_->cancel_request = CancelRequest::Hard; // it's now signaled to stop 131 lane_->cancel_request = CancelRequest::Hard; // it's now signaled to stop
132 //lane_->m_thread.get_stop_source().request_stop();
133 if (wake_lane_) // wake the thread so that execution returns from any pending linda operation if desired
135 { 134 {
136 SIGNAL_T* waiting_on = lane_->waiting_on; 135 std::condition_variable* waiting_on = lane_->m_waiting_on;
137 if (lane_->status == WAITING && waiting_on != nullptr) 136 if (lane_->m_status == Lane::Waiting && waiting_on != nullptr)
138 { 137 {
139 SIGNAL_ALL( waiting_on); 138 waiting_on->notify_all();
140 } 139 }
141 } 140 }
142 141
143 CancelResult result{ THREAD_WAIT(&lane_->thread, secs_, &lane_->done_signal, &lane_->done_lock, &lane_->status) ? CancelResult::Cancelled : CancelResult::Timeout }; 142 CancelResult result{ lane_->waitForCompletion(duration_) ? CancelResult::Cancelled : CancelResult::Timeout };
144
145 if ((result == CancelResult::Timeout) && force_)
146 {
147 // Killing is asynchronous; we _will_ wait for it to be done at
148 // GC, to make sure the data structure can be released (alternative
149 // would be use of "cancellation cleanup handlers" that at least
150 // PThread seems to have).
151 //
152 THREAD_KILL(&lane_->thread);
153#if THREADAPI == THREADAPI_PTHREAD
154 // pthread: make sure the thread is really stopped!
155 // note that this may block forever if the lane doesn't call a cancellation point and pthread doesn't honor PTHREAD_CANCEL_ASYNCHRONOUS
156 result = THREAD_WAIT(&lane_->thread, waitkill_timeout_, &lane_->done_signal, &lane_->done_lock, &lane_->status) ? CancelResult::Killed : CancelResult::Timeout;
157 if (result == CancelResult::Timeout)
158 {
159 std::ignore = luaL_error( L, "force-killed lane failed to terminate within %f second%s", waitkill_timeout_, waitkill_timeout_ > 1 ? "s" : "");
160 }
161#else
162 (void) waitkill_timeout_; // unused
163 (void) L; // unused
164#endif // THREADAPI == THREADAPI_PTHREAD
165 lane_->mstatus = Lane::Killed; // mark 'gc' to wait for it
166 // note that lane_->status value must remain to whatever it was at the time of the kill
167 // because we need to know if we can lua_close() the Lua State or not.
168 result = CancelResult::Killed;
169 }
170 return result; 143 return result;
171} 144}
172 145
173// ################################################################################################ 146// ################################################################################################
174 147
175CancelResult thread_cancel(lua_State* L, Lane* lane_, CancelOp op_, double secs_, bool force_, double waitkill_timeout_) 148CancelResult thread_cancel(Lane* lane_, CancelOp op_, int hook_count_, lua_Duration duration_, bool wake_lane_)
176{ 149{
177 // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here 150 // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here
178 // We can read 'lane_->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN) 151 // We can read 'lane_->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN)
179 if (lane_->mstatus == Lane::Killed) 152 if (lane_->m_status >= Lane::Done)
180 {
181 return CancelResult::Killed;
182 }
183
184 if (lane_->status >= DONE)
185 { 153 {
186 // say "ok" by default, including when lane is already done 154 // say "ok" by default, including when lane is already done
187 return CancelResult::Cancelled; 155 return CancelResult::Cancelled;
@@ -191,52 +159,61 @@ CancelResult thread_cancel(lua_State* L, Lane* lane_, CancelOp op_, double secs_
191 // let us hope we never land here with a pointer on a linda that has been destroyed... 159 // let us hope we never land here with a pointer on a linda that has been destroyed...
192 if (op_ == CancelOp::Soft) 160 if (op_ == CancelOp::Soft)
193 { 161 {
194 return thread_cancel_soft(lane_, secs_, force_); 162 return thread_cancel_soft(lane_, duration_, wake_lane_);
163 }
164 else if (static_cast<int>(op_) > static_cast<int>(CancelOp::Soft))
165 {
166 lua_sethook(lane_->L, cancel_hook, static_cast<int>(op_), hook_count_);
195 } 167 }
196 168
197 return thread_cancel_hard(L, lane_, secs_, force_, waitkill_timeout_); 169 return thread_cancel_hard(lane_, duration_, wake_lane_);
198} 170}
199 171
200// ################################################################################################ 172// ################################################################################################
201// ################################################################################################ 173// ################################################################################################
202 174
203// > 0: the mask 175CancelOp which_cancel_op(char const* op_string_)
204// = 0: soft 176{
205// < 0: hard 177 CancelOp op{ CancelOp::Invalid };
206static CancelOp which_op(lua_State* L, int idx_) 178 if (strcmp(op_string_, "hard") == 0)
179 {
180 op = CancelOp::Hard;
181 }
182 else if (strcmp(op_string_, "soft") == 0)
183 {
184 op = CancelOp::Soft;
185 }
186 else if (strcmp(op_string_, "call") == 0)
187 {
188 op = CancelOp::MaskCall;
189 }
190 else if (strcmp(op_string_, "ret") == 0)
191 {
192 op = CancelOp::MaskRet;
193 }
194 else if (strcmp(op_string_, "line") == 0)
195 {
196 op = CancelOp::MaskLine;
197 }
198 else if (strcmp(op_string_, "count") == 0)
199 {
200 op = CancelOp::MaskCount;
201 }
202 return op;
203}
204
205// ################################################################################################
206
207[[nodiscard]] static CancelOp which_cancel_op(lua_State* L, int idx_)
207{ 208{
208 if (lua_type(L, idx_) == LUA_TSTRING) 209 if (lua_type(L, idx_) == LUA_TSTRING)
209 { 210 {
210 CancelOp op{ CancelOp::Invalid }; 211 char const* const str{ lua_tostring(L, idx_) };
211 char const* str = lua_tostring(L, idx_); 212 CancelOp op{ which_cancel_op(str) };
212 if (strcmp(str, "hard") == 0)
213 {
214 op = CancelOp::Hard;
215 }
216 else if (strcmp(str, "soft") == 0)
217 {
218 op = CancelOp::Soft;
219 }
220 else if (strcmp(str, "call") == 0)
221 {
222 op = CancelOp::MaskCall;
223 }
224 else if (strcmp(str, "ret") == 0)
225 {
226 op = CancelOp::MaskRet;
227 }
228 else if (strcmp(str, "line") == 0)
229 {
230 op = CancelOp::MaskLine;
231 }
232 else if (strcmp(str, "count") == 0)
233 {
234 op = CancelOp::MaskCount;
235 }
236 lua_remove(L, idx_); // argument is processed, remove it 213 lua_remove(L, idx_); // argument is processed, remove it
237 if (op == CancelOp::Invalid) 214 if (op == CancelOp::Invalid)
238 { 215 {
239 std::ignore = luaL_error(L, "invalid hook option %s", str); 216 luaL_error(L, "invalid hook option %s", str); // doesn't return
240 } 217 }
241 return op; 218 return op;
242 } 219 }
@@ -245,53 +222,61 @@ static CancelOp which_op(lua_State* L, int idx_)
245 222
246// ################################################################################################ 223// ################################################################################################
247 224
248// bool[,reason] = lane_h:cancel( [mode, hookcount] [, timeout] [, force [, forcekill_timeout]]) 225// bool[,reason] = lane_h:cancel( [mode, hookcount] [, timeout] [, wake_lindas])
249LUAG_FUNC(thread_cancel) 226LUAG_FUNC(thread_cancel)
250{ 227{
251 Lane* const lane{ lua_toLane(L, 1) }; 228 Lane* const lane{ lua_toLane(L, 1) };
252 CancelOp const op{ which_op(L, 2) }; // this removes the op string from the stack 229 CancelOp const op{ which_cancel_op(L, 2) }; // this removes the op string from the stack
253 230
231 int hook_count{ 0 };
254 if (static_cast<int>(op) > static_cast<int>(CancelOp::Soft)) // hook is requested 232 if (static_cast<int>(op) > static_cast<int>(CancelOp::Soft)) // hook is requested
255 { 233 {
256 int const hook_count{ static_cast<int>(lua_tointeger(L, 2)) }; 234 hook_count = static_cast<int>(luaL_checkinteger(L, 2));
257 lua_remove(L, 2); // argument is processed, remove it 235 lua_remove(L, 2); // argument is processed, remove it
258 if (hook_count < 1) 236 if (hook_count < 1)
259 { 237 {
260 return luaL_error(L, "hook count cannot be < 1"); 238 return luaL_error(L, "hook count cannot be < 1");
261 } 239 }
262 lua_sethook(lane->L, cancel_hook, static_cast<int>(op), hook_count);
263 } 240 }
264 241
265 double secs{ 0.0 }; 242 lua_Duration wait_timeout{ 0.0 };
266 if (lua_type(L, 2) == LUA_TNUMBER) 243 if (lua_type(L, 2) == LUA_TNUMBER)
267 { 244 {
268 secs = lua_tonumber(L, 2); 245 wait_timeout = lua_Duration{ lua_tonumber(L, 2) };
269 lua_remove(L, 2); // argument is processed, remove it 246 lua_remove(L, 2); // argument is processed, remove it
270 if (secs < 0.0) 247 if (wait_timeout.count() < 0.0)
271 { 248 {
272 return luaL_error(L, "cancel timeout cannot be < 0"); 249 return luaL_error(L, "cancel timeout cannot be < 0");
273 } 250 }
274 } 251 }
275 252 // we wake by default in "hard" mode (remember that hook is hard too), but this can be turned off if desired
276 bool const force{ lua_toboolean(L, 2) ? true : false }; // false if nothing there 253 bool wake_lane{ op != CancelOp::Soft };
277 double const forcekill_timeout{ luaL_optnumber(L, 3, 0.0) }; 254 if (lua_gettop(L) >= 2)
278 switch (thread_cancel(L, lane, op, secs, force, forcekill_timeout))
279 { 255 {
256 if (!lua_isboolean(L, 2))
257 {
258 return luaL_error(L, "wake_lindas parameter is not a boolean");
259 }
260 wake_lane = lua_toboolean(L, 2);
261 lua_remove(L, 2); // argument is processed, remove it
262 }
263 STACK_CHECK_START_REL(L, 0);
264 switch (thread_cancel(lane, op, hook_count, wait_timeout, wake_lane))
265 {
266 default: // should never happen unless we added a case and forgot to handle it
267 ASSERT_L(false);
268 break;
269
280 case CancelResult::Timeout: 270 case CancelResult::Timeout:
281 lua_pushboolean(L, 0); 271 lua_pushboolean(L, 0); // false
282 lua_pushstring(L, "timeout"); 272 lua_pushstring(L, "timeout"); // false "timeout"
283 return 2; 273 break;
284 274
285 case CancelResult::Cancelled: 275 case CancelResult::Cancelled:
286 lua_pushboolean(L, 1); 276 lua_pushboolean(L, 1); // true
287 push_thread_status(L, lane); 277 push_thread_status(L, lane); // true status
288 return 2; 278 break;
289
290 case CancelResult::Killed:
291 lua_pushboolean(L, 1);
292 push_thread_status(L, lane);
293 return 2;
294 } 279 }
295 // should never happen, only here to prevent the compiler from complaining of "not all control paths returning a value" 280 STACK_CHECK(L, 2);
296 return 0; 281 return 2;
297} 282}
diff --git a/src/cancel.h b/src/cancel.h
index 884e193..060edb3 100644
--- a/src/cancel.h
+++ b/src/cancel.h
@@ -13,6 +13,8 @@ extern "C" {
13#include "uniquekey.h" 13#include "uniquekey.h"
14#include "macros_and_utils.h" 14#include "macros_and_utils.h"
15 15
16#include <chrono>
17
16// ################################################################################################ 18// ################################################################################################
17 19
18class Lane; // forward 20class Lane; // forward
@@ -30,8 +32,7 @@ enum class CancelRequest
30enum class CancelResult 32enum class CancelResult
31{ 33{
32 Timeout, 34 Timeout,
33 Cancelled, 35 Cancelled
34 Killed
35}; 36};
36 37
37enum class CancelOp 38enum class CancelOp
@@ -48,7 +49,8 @@ enum class CancelOp
48// crc64/we of string "CANCEL_ERROR" generated at http://www.nitrxgen.net/hashgen/ 49// crc64/we of string "CANCEL_ERROR" generated at http://www.nitrxgen.net/hashgen/
49static constexpr UniqueKey CANCEL_ERROR{ 0xe97d41626cc97577ull }; // 'raise_cancel_error' sentinel 50static constexpr UniqueKey CANCEL_ERROR{ 0xe97d41626cc97577ull }; // 'raise_cancel_error' sentinel
50 51
51CancelResult thread_cancel(lua_State* L, Lane* lane_, CancelOp op_, double secs_, bool force_, double waitkill_timeout_); 52[[nodiscard]] CancelOp which_cancel_op(char const* op_string_);
53[[nodiscard]] CancelResult thread_cancel(Lane* lane_, CancelOp op_, int hook_count_, lua_Duration secs_, bool wake_lindas_);
52 54
53[[noreturn]] static inline void raise_cancel_error(lua_State* L) 55[[noreturn]] static inline void raise_cancel_error(lua_State* L)
54{ 56{
diff --git a/src/compat.cpp b/src/compat.cpp
index 47fe37e..73d0f6b 100644
--- a/src/compat.cpp
+++ b/src/compat.cpp
@@ -1,6 +1,6 @@
1/* 1/*
2 * ############################################################################################### 2 * ###############################################################################################
3 * ######################################### Lua 5.1/5.2 ######################################### 3 * ####################################### Lua 5.1/5.2/5.3 #######################################
4 * ############################################################################################### 4 * ###############################################################################################
5 */ 5 */
6#include "compat.h" 6#include "compat.h"
@@ -9,8 +9,13 @@
9/* 9/*
10** Copied from Lua 5.2 loadlib.c 10** Copied from Lua 5.2 loadlib.c
11*/ 11*/
12// ################################################################################################
13// ################################################################################################
12#if LUA_VERSION_NUM == 501 14#if LUA_VERSION_NUM == 501
13static int luaL_getsubtable (lua_State *L, int idx, const char *fname) 15// ################################################################################################
16// ################################################################################################
17
18static int luaL_getsubtable(lua_State* L, int idx, const char* fname)
14{ 19{
15 lua_getfield(L, idx, fname); 20 lua_getfield(L, idx, fname);
16 if (lua_istable(L, -1)) 21 if (lua_istable(L, -1))
@@ -26,7 +31,9 @@ static int luaL_getsubtable (lua_State *L, int idx, const char *fname)
26 } 31 }
27} 32}
28 33
29void luaL_requiref (lua_State *L, const char *modname, lua_CFunction openf, int glb) 34// ################################################################################################
35
36void luaL_requiref(lua_State *L, const char *modname, lua_CFunction openf, int glb)
30{ 37{
31 lua_pushcfunction(L, openf); 38 lua_pushcfunction(L, openf);
32 lua_pushstring(L, modname); /* argument to open function */ 39 lua_pushstring(L, modname); /* argument to open function */
@@ -43,24 +50,30 @@ void luaL_requiref (lua_State *L, const char *modname, lua_CFunction openf, int
43} 50}
44#endif // LUA_VERSION_NUM 51#endif // LUA_VERSION_NUM
45 52
53// ################################################################################################
54// ################################################################################################
46#if LUA_VERSION_NUM < 504 55#if LUA_VERSION_NUM < 504
56// ################################################################################################
57// ################################################################################################
47 58
48void* lua_newuserdatauv( lua_State* L, size_t sz, int nuvalue) 59void* lua_newuserdatauv( lua_State* L, size_t sz, int nuvalue)
49{ 60{
50 ASSERT_L( nuvalue <= 1); 61 ASSERT_L( nuvalue <= 1);
51 return lua_newuserdata( L, sz); 62 return lua_newuserdata(L, sz);
52} 63}
53 64
65// ################################################################################################
66
54// push on stack uservalue #n of full userdata at idx 67// push on stack uservalue #n of full userdata at idx
55int lua_getiuservalue(lua_State* L, int idx, int n) 68int lua_getiuservalue(lua_State* L, int idx, int n)
56{ 69{
57 // full userdata can have only 1 uservalue before 5.4 70 // full userdata can have only 1 uservalue before 5.4
58 if( n > 1) 71 if( n > 1)
59 { 72 {
60 lua_pushnil( L); 73 lua_pushnil(L);
61 return LUA_TNONE; 74 return LUA_TNONE;
62 } 75 }
63 lua_getuservalue( L, idx); 76 lua_getuservalue(L, idx);
64 77
65#if LUA_VERSION_NUM == 501 78#if LUA_VERSION_NUM == 501
66 /* default environment is not a nil (see lua_getfenv) */ 79 /* default environment is not a nil (see lua_getfenv) */
@@ -68,30 +81,33 @@ int lua_getiuservalue(lua_State* L, int idx, int n)
68 if (lua_rawequal(L, -2, -1) || lua_rawequal(L, -2, LUA_GLOBALSINDEX)) 81 if (lua_rawequal(L, -2, -1) || lua_rawequal(L, -2, LUA_GLOBALSINDEX))
69 { 82 {
70 lua_pop(L, 2); 83 lua_pop(L, 2);
71 lua_pushnil( L); 84 lua_pushnil(L);
72 85
73 return LUA_TNONE; 86 return LUA_TNONE;
74 } 87 }
75 lua_pop(L, 1); /* remove package */ 88 lua_pop(L, 1); /* remove package */
76#endif 89#endif
77 90
78 return lua_type( L, -1); 91 return lua_type(L, -1);
79} 92}
80 93
81// pop stack top, sets it a uservalue #n of full userdata at idx 94// ################################################################################################
82int lua_setiuservalue( lua_State* L, int idx, int n) 95
96// Pops a value from the stack and sets it as the new n-th user value associated to the full userdata at the given index.
97// Returns 0 if the userdata does not have that value.
98int lua_setiuservalue(lua_State* L, int idx, int n)
83{ 99{
84 if( n > 1 100 if( n > 1
85#if LUA_VERSION_NUM == 501 101#if LUA_VERSION_NUM == 501
86 || lua_type( L, -1) != LUA_TTABLE 102 || lua_type(L, -1) != LUA_TTABLE
87#endif 103#endif
88 ) 104 )
89 { 105 {
90 lua_pop( L, 1); 106 lua_pop(L, 1);
91 return 0; 107 return 0;
92 } 108 }
93 109
94 (void) lua_setuservalue( L, idx); 110 lua_setuservalue(L, idx);
95 return 1; // I guess anything non-0 is ok 111 return 1; // I guess anything non-0 is ok
96} 112}
97 113
diff --git a/src/compat.h b/src/compat.h
index 8ef1b6c..8d10e78 100644
--- a/src/compat.h
+++ b/src/compat.h
@@ -98,3 +98,30 @@ int lua_setiuservalue( lua_State* L, int idx, int n);
98#define LUA_ERRGCMM 666 // doesn't exist in Lua 5.4, we don't care about the actual value 98#define LUA_ERRGCMM 666 // doesn't exist in Lua 5.4, we don't care about the actual value
99 99
100#endif // LUA_VERSION_NUM == 504 100#endif // LUA_VERSION_NUM == 504
101
102// #################################################################################################
103
104// a wrapper over lua types to see them easier in a debugger
105enum class LuaType
106{
107 NONE = LUA_TNONE,
108 NIL = LUA_TNIL,
109 BOOLEAN = LUA_TBOOLEAN,
110 LIGHTUSERDATA = LUA_TLIGHTUSERDATA,
111 NUMBER = LUA_TNUMBER,
112 STRING = LUA_TSTRING,
113 TABLE = LUA_TTABLE,
114 FUNCTION = LUA_TFUNCTION,
115 USERDATA = LUA_TUSERDATA,
116 THREAD = LUA_TTHREAD,
117 CDATA = 10 // LuaJIT CDATA
118};
119
120inline LuaType lua_type_as_enum(lua_State* L, int idx_)
121{
122 return static_cast<LuaType>(lua_type(L, idx_));
123}
124inline char const* lua_typename(lua_State* L, LuaType t_)
125{
126 return lua_typename(L, static_cast<int>(t_));
127}
diff --git a/src/deep.cpp b/src/deep.cpp
index ac2905e..d0b8123 100644
--- a/src/deep.cpp
+++ b/src/deep.cpp
@@ -1,5 +1,5 @@
1/* 1/*
2 * DEEP.C Copyright (c) 2017, Benoit Germain 2 * DEEP.CPP Copyright (c) 2024, Benoit Germain
3 * 3 *
4 * Deep userdata support, separate in its own source file to help integration 4 * Deep userdata support, separate in its own source file to help integration
5 * without enforcing a Lanes dependency 5 * without enforcing a Lanes dependency
@@ -9,7 +9,7 @@
9=============================================================================== 9===============================================================================
10 10
11Copyright (C) 2002-10 Asko Kauppi <akauppi@gmail.com> 11Copyright (C) 2002-10 Asko Kauppi <akauppi@gmail.com>
12 2011-17 Benoit Germain <bnt.germain@gmail.com> 12 2011-24 Benoit Germain <bnt.germain@gmail.com>
13 13
14Permission is hereby granted, free of charge, to any person obtaining a copy 14Permission is hereby granted, free of charge, to any person obtaining a copy
15of this software and associated documentation files (the "Software"), to deal 15of this software and associated documentation files (the "Software"), to deal
@@ -41,13 +41,6 @@ THE SOFTWARE.
41 41
42#include <bit> 42#include <bit>
43#include <cassert> 43#include <cassert>
44#include <ctype.h>
45#include <stdio.h>
46#include <string.h>
47#include <stdlib.h>
48#if !defined(__APPLE__)
49#include <malloc.h>
50#endif
51 44
52/*-- Metatable copying --*/ 45/*-- Metatable copying --*/
53 46
@@ -73,7 +66,7 @@ static constexpr UniqueKey DEEP_PROXY_CACHE_KEY{ 0x05773d6fc26be106ull };
73* Sets up [-1]<->[-2] two-way lookups, and ensures the lookup table exists. 66* Sets up [-1]<->[-2] two-way lookups, and ensures the lookup table exists.
74* Pops the both values off the stack. 67* Pops the both values off the stack.
75*/ 68*/
76static void set_deep_lookup( lua_State* L) 69static void set_deep_lookup(lua_State* L)
77{ 70{
78 STACK_GROW( L, 3); 71 STACK_GROW( L, 3);
79 STACK_CHECK_START_REL(L, 2); // a b 72 STACK_CHECK_START_REL(L, 2); // a b
@@ -88,11 +81,13 @@ static void set_deep_lookup( lua_State* L)
88 STACK_CHECK( L, 0); 81 STACK_CHECK( L, 0);
89} 82}
90 83
84// ################################################################################################
85
91/* 86/*
92* Pops the key (metatable or idfunc) off the stack, and replaces with the 87* Pops the key (metatable or idfunc) off the stack, and replaces with the
93* deep lookup value (idfunc/metatable/nil). 88* deep lookup value (idfunc/metatable/nil).
94*/ 89*/
95static void get_deep_lookup( lua_State* L) 90static void get_deep_lookup(lua_State* L)
96{ 91{
97 STACK_GROW( L, 1); 92 STACK_GROW( L, 1);
98 STACK_CHECK_START_REL(L, 1); // a 93 STACK_CHECK_START_REL(L, 1); // a
@@ -106,11 +101,13 @@ static void get_deep_lookup( lua_State* L)
106 STACK_CHECK( L, 1); 101 STACK_CHECK( L, 1);
107} 102}
108 103
104// ################################################################################################
105
109/* 106/*
110* Return the registered ID function for 'index' (deep userdata proxy), 107* Return the registered ID function for 'index' (deep userdata proxy),
111* or nullptr if 'index' is not a deep userdata proxy. 108* or nullptr if 'index' is not a deep userdata proxy.
112*/ 109*/
113static inline luaG_IdFunction get_idfunc( lua_State* L, int index, LookupMode mode_) 110[[nodiscard]] static inline luaG_IdFunction get_idfunc(lua_State* L, int index, LookupMode mode_)
114{ 111{
115 // when looking inside a keeper, we are 100% sure the object is a deep userdata 112 // when looking inside a keeper, we are 100% sure the object is a deep userdata
116 if (mode_ == LookupMode::FromKeeper) 113 if (mode_ == LookupMode::FromKeeper)
@@ -142,8 +139,9 @@ static inline luaG_IdFunction get_idfunc( lua_State* L, int index, LookupMode mo
142 } 139 }
143} 140}
144 141
142// ################################################################################################
145 143
146void free_deep_prelude( lua_State* L, DeepPrelude* prelude_) 144void free_deep_prelude(lua_State* L, DeepPrelude* prelude_)
147{ 145{
148 ASSERT_L(prelude_->idfunc); 146 ASSERT_L(prelude_->idfunc);
149 STACK_CHECK_START_REL(L, 0); 147 STACK_CHECK_START_REL(L, 0);
@@ -154,6 +152,7 @@ void free_deep_prelude( lua_State* L, DeepPrelude* prelude_)
154 STACK_CHECK(L, 0); 152 STACK_CHECK(L, 0);
155} 153}
156 154
155// ################################################################################################
157 156
158/* 157/*
159 * void= mt.__gc( proxy_ud ) 158 * void= mt.__gc( proxy_ud )
@@ -161,7 +160,7 @@ void free_deep_prelude( lua_State* L, DeepPrelude* prelude_)
161 * End of life for a proxy object; reduce the deep reference count and clean it up if reaches 0. 160 * End of life for a proxy object; reduce the deep reference count and clean it up if reaches 0.
162 * 161 *
163 */ 162 */
164static int deep_userdata_gc( lua_State* L) 163[[nodiscard]] static int deep_userdata_gc(lua_State* L)
165{ 164{
166 DeepPrelude** const proxy{ lua_tofulluserdata<DeepPrelude*>(L, 1) }; 165 DeepPrelude** const proxy{ lua_tofulluserdata<DeepPrelude*>(L, 1) };
167 DeepPrelude* p = *proxy; 166 DeepPrelude* p = *proxy;
@@ -193,6 +192,7 @@ static int deep_userdata_gc( lua_State* L)
193 return 0; 192 return 0;
194} 193}
195 194
195// ################################################################################################
196 196
197/* 197/*
198 * Push a proxy userdata on the stack. 198 * Push a proxy userdata on the stack.
@@ -203,7 +203,7 @@ static int deep_userdata_gc( lua_State* L)
203 * used in this Lua state (metatable, registring it). Otherwise, increments the 203 * used in this Lua state (metatable, registring it). Otherwise, increments the
204 * reference count. 204 * reference count.
205 */ 205 */
206char const* push_deep_proxy(lua_State* L, DeepPrelude* prelude, int nuv_, LookupMode mode_) 206char const* push_deep_proxy(Dest L, DeepPrelude* prelude, int nuv_, LookupMode mode_)
207{ 207{
208 // Check if a proxy already exists 208 // Check if a proxy already exists
209 push_registry_subtable_mode( L, DEEP_PROXY_CACHE_KEY, "v"); // DPC 209 push_registry_subtable_mode( L, DEEP_PROXY_CACHE_KEY, "v"); // DPC
@@ -278,7 +278,7 @@ char const* push_deep_proxy(lua_State* L, DeepPrelude* prelude, int nuv_, Lookup
278 // this is needed because we must make sure the shared library is still loaded as long as we hold a pointer on the idfunc 278 // this is needed because we must make sure the shared library is still loaded as long as we hold a pointer on the idfunc
279 { 279 {
280 int oldtop_module = lua_gettop( L); 280 int oldtop_module = lua_gettop( L);
281 modname = (char const*) prelude->idfunc( L, DeepOp::Module); // DPC proxy metatable 281 modname = (char const*) prelude->idfunc( L, DeepOp::Module); // DPC proxy metatable
282 // make sure the function pushed nothing on the stack! 282 // make sure the function pushed nothing on the stack!
283 if( lua_gettop( L) - oldtop_module != 0) 283 if( lua_gettop( L) - oldtop_module != 0)
284 { 284 {
@@ -348,6 +348,8 @@ char const* push_deep_proxy(lua_State* L, DeepPrelude* prelude, int nuv_, Lookup
348 return nullptr; 348 return nullptr;
349} 349}
350 350
351// ################################################################################################
352
351/* 353/*
352* Create a deep userdata 354* Create a deep userdata
353* 355*
@@ -370,7 +372,7 @@ char const* push_deep_proxy(lua_State* L, DeepPrelude* prelude, int nuv_, Lookup
370* 372*
371* Returns: 'proxy' userdata for accessing the deep data via 'luaG_todeep()' 373* Returns: 'proxy' userdata for accessing the deep data via 'luaG_todeep()'
372*/ 374*/
373int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc, int nuv_) 375int luaG_newdeepuserdata(Dest L, luaG_IdFunction idfunc, int nuv_)
374{ 376{
375 STACK_GROW( L, 1); 377 STACK_GROW( L, 1);
376 STACK_CHECK_START_REL(L, 0); 378 STACK_CHECK_START_REL(L, 0);
@@ -409,6 +411,7 @@ int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc, int nuv_)
409 return 1; 411 return 1;
410} 412}
411 413
414// ################################################################################################
412 415
413/* 416/*
414* Access deep userdata through a proxy. 417* Access deep userdata through a proxy.
@@ -430,6 +433,7 @@ DeepPrelude* luaG_todeep(lua_State* L, luaG_IdFunction idfunc, int index)
430 return *proxy; 433 return *proxy;
431} 434}
432 435
436// ################################################################################################
433 437
434/* 438/*
435 * Copy deep userdata between two separate Lua states (from L to L2) 439 * Copy deep userdata between two separate Lua states (from L to L2)
@@ -438,7 +442,7 @@ DeepPrelude* luaG_todeep(lua_State* L, luaG_IdFunction idfunc, int index)
438 * the id function of the copied value, or nullptr for non-deep userdata 442 * the id function of the copied value, or nullptr for non-deep userdata
439 * (not copied) 443 * (not copied)
440 */ 444 */
441bool copydeep(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, int i, LookupMode mode_, char const* upName_) 445bool copydeep(Universe* U, Dest L2, int L2_cache_i, Source L, int i, LookupMode mode_, char const* upName_)
442{ 446{
443 luaG_IdFunction const idfunc { get_idfunc(L, i, mode_) }; 447 luaG_IdFunction const idfunc { get_idfunc(L, i, mode_) };
444 if (idfunc == nullptr) 448 if (idfunc == nullptr)
@@ -466,22 +470,25 @@ bool copydeep(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, int i, L
466 int const clone_i = lua_gettop( L2); 470 int const clone_i = lua_gettop( L2);
467 while( nuv) 471 while( nuv)
468 { 472 {
469 inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), VT::NORMAL, mode_, upName_); // u uv 473 if (!inter_copy_one(U, L2, L2_cache_i, L, lua_absindex(L, -1), VT::NORMAL, mode_, upName_)) // u uv
474 {
475 return luaL_error(L, "Cannot copy upvalue type '%s'", luaL_typename(L, -1));
476 }
470 lua_pop( L, 1); // ... u [uv]* 477 lua_pop( L, 1); // ... u [uv]*
471 // this pops the value from the stack 478 // this pops the value from the stack
472 lua_setiuservalue( L2, clone_i, nuv); // u 479 lua_setiuservalue(L2, clone_i, nuv); // u
473 -- nuv; 480 -- nuv;
474 } 481 }
475 } 482 }
476 483
477 STACK_CHECK( L2, 1); 484 STACK_CHECK(L2, 1);
478 STACK_CHECK( L, 0); 485 STACK_CHECK(L, 0);
479 486
480 if (errmsg != nullptr) 487 if (errmsg != nullptr)
481 { 488 {
482 // raise the error in the proper state (not the keeper) 489 // raise the error in the proper state (not the keeper)
483 lua_State* const errL{ (mode_ == LookupMode::FromKeeper) ? L2 : L }; 490 lua_State* const errL{ (mode_ == LookupMode::FromKeeper) ? L2 : L };
484 std::ignore = luaL_error(errL, errmsg); 491 luaL_error(errL, errmsg); // doesn't return
485 } 492 }
486 return true; 493 return true;
487} \ No newline at end of file 494} \ No newline at end of file
diff --git a/src/deep.h b/src/deep.h
index 1799cf0..7be5c5d 100644
--- a/src/deep.h
+++ b/src/deep.h
@@ -19,7 +19,7 @@ extern "C" {
19#include <atomic> 19#include <atomic>
20 20
21// forwards 21// forwards
22struct Universe; 22class Universe;
23 23
24enum class LookupMode 24enum class LookupMode
25{ 25{
@@ -36,7 +36,7 @@ enum class DeepOp
36 Module, 36 Module,
37}; 37};
38 38
39using luaG_IdFunction = void*(*)( lua_State* L, DeepOp op_); 39using luaG_IdFunction = void*(*)(lua_State* L, DeepOp op_);
40 40
41// ################################################################################################ 41// ################################################################################################
42 42
@@ -54,8 +54,8 @@ struct DeepPrelude
54 std::atomic<int> m_refcount{ 0 }; 54 std::atomic<int> m_refcount{ 0 };
55}; 55};
56 56
57char const* push_deep_proxy(lua_State* L, DeepPrelude* prelude, int nuv_, LookupMode mode_); 57[[nodiscard]] char const* push_deep_proxy(Dest L, DeepPrelude* prelude, int nuv_, LookupMode mode_);
58void free_deep_prelude( lua_State* L, DeepPrelude* prelude_); 58void free_deep_prelude(lua_State* L, DeepPrelude* prelude_);
59 59
60LANES_API int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc, int nuv_); 60LANES_API [[nodiscard]] int luaG_newdeepuserdata(Dest L, luaG_IdFunction idfunc, int nuv_);
61LANES_API DeepPrelude* luaG_todeep(lua_State* L, luaG_IdFunction idfunc, int index); 61LANES_API [[nodiscard]] DeepPrelude* luaG_todeep(lua_State* L, luaG_IdFunction idfunc, int index);
diff --git a/src/keeper.cpp b/src/keeper.cpp
index 244cb6a..f56c50c 100644
--- a/src/keeper.cpp
+++ b/src/keeper.cpp
@@ -14,7 +14,7 @@
14 --[[ 14 --[[
15 =============================================================================== 15 ===============================================================================
16 16
17 Copyright (C) 2011-2023 Benoit Germain <bnt.germain@gmail.com> 17 Copyright (C) 2011-2024 Benoit Germain <bnt.germain@gmail.com>
18 18
19 Permission is hereby granted, free of charge, to any person obtaining a copy 19 Permission is hereby granted, free of charge, to any person obtaining a copy
20 of this software and associated documentation files (the "Software"), to deal 20 of this software and associated documentation files (the "Software"), to deal
@@ -61,12 +61,12 @@ class keeper_fifo
61 int limit{ -1 }; 61 int limit{ -1 };
62 62
63 // a fifo full userdata has one uservalue, the table that holds the actual fifo contents 63 // a fifo full userdata has one uservalue, the table that holds the actual fifo contents
64 static void* operator new([[maybe_unused]] size_t size_, lua_State* L) noexcept { return lua_newuserdatauv<keeper_fifo>(L, 1); } 64 [[nodiscard]] static void* operator new([[maybe_unused]] size_t size_, lua_State* L) noexcept { return lua_newuserdatauv<keeper_fifo>(L, 1); }
65 // always embedded somewhere else or "in-place constructed" as a full userdata 65 // always embedded somewhere else or "in-place constructed" as a full userdata
66 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception 66 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception
67 static void operator delete([[maybe_unused]] void* p_, lua_State* L) { ASSERT_L(!"should never be called") }; 67 static void operator delete([[maybe_unused]] void* p_, lua_State* L) { ASSERT_L(!"should never be called") };
68 68
69 static keeper_fifo* getPtr(lua_State* L, int idx_) 69 [[nodiscard]] static keeper_fifo* getPtr(lua_State* L, int idx_)
70 { 70 {
71 return lua_tofulluserdata<keeper_fifo>(L, idx_); 71 return lua_tofulluserdata<keeper_fifo>(L, idx_);
72 } 72 }
@@ -77,7 +77,7 @@ static constexpr int CONTENTS_TABLE{ 1 };
77// ################################################################################################## 77// ##################################################################################################
78 78
79// replaces the fifo ud by its uservalue on the stack 79// replaces the fifo ud by its uservalue on the stack
80static keeper_fifo* prepare_fifo_access(lua_State* L, int idx_) 80[[nodiscard]] static keeper_fifo* prepare_fifo_access(lua_State* L, int idx_)
81{ 81{
82 keeper_fifo* const fifo{ keeper_fifo::getPtr(L, idx_) }; 82 keeper_fifo* const fifo{ keeper_fifo::getPtr(L, idx_) };
83 if (fifo != nullptr) 83 if (fifo != nullptr)
@@ -95,7 +95,7 @@ static keeper_fifo* prepare_fifo_access(lua_State* L, int idx_)
95 95
96// in: nothing 96// in: nothing
97// out: { first = 1, count = 0, limit = -1} 97// out: { first = 1, count = 0, limit = -1}
98static keeper_fifo* fifo_new(lua_State* L) 98[[nodiscard]] static keeper_fifo* fifo_new(lua_State* L)
99{ 99{
100 STACK_GROW(L, 2); 100 STACK_GROW(L, 2);
101 STACK_CHECK_START_REL(L, 0); 101 STACK_CHECK_START_REL(L, 0);
@@ -207,52 +207,52 @@ static void push_table(lua_State* L, int idx_)
207 207
208// ################################################################################################## 208// ##################################################################################################
209 209
210int keeper_push_linda_storage(Universe* U, lua_State* L, void* ptr_, uintptr_t magic_) 210int keeper_push_linda_storage(Universe* U, Dest L, void* ptr_, uintptr_t magic_)
211{ 211{
212 Keeper* const K{ which_keeper(U->keepers, magic_) }; 212 Keeper* const K{ which_keeper(U->keepers, magic_) };
213 lua_State* const KL{ K ? K->L : nullptr }; 213 Source const KL{ K ? K->L : nullptr };
214 if (KL == nullptr) 214 if (KL == nullptr)
215 return 0; 215 return 0;
216 STACK_GROW(KL, 4); 216 STACK_GROW(KL, 4); // KEEPER MAIN
217 STACK_CHECK_START_REL(KL, 0); 217 STACK_CHECK_START_REL(KL, 0);
218 FIFOS_KEY.pushValue(KL); // fifos 218 FIFOS_KEY.pushValue(KL); // fifos
219 lua_pushlightuserdata(KL, ptr_); // fifos ud 219 lua_pushlightuserdata(KL, ptr_); // fifos ud
220 lua_rawget(KL, -2); // fifos storage 220 lua_rawget(KL, -2); // fifos storage
221 lua_remove(KL, -2); // storage 221 lua_remove(KL, -2); // storage
222 if (!lua_istable(KL, -1)) 222 if (!lua_istable(KL, -1))
223 { 223 {
224 lua_pop(KL, 1); // 224 lua_pop(KL, 1); //
225 STACK_CHECK(KL, 0); 225 STACK_CHECK(KL, 0);
226 return 0; 226 return 0;
227 } 227 }
228 // move data from keeper to destination state KEEPER MAIN 228 // move data from keeper to destination state
229 lua_pushnil(KL); // storage nil 229 lua_pushnil(KL); // storage nil
230 STACK_GROW(L, 5); 230 STACK_GROW(L, 5);
231 STACK_CHECK_START_REL(L, 0); 231 STACK_CHECK_START_REL(L, 0);
232 lua_newtable(L); // out 232 lua_newtable(L); // out
233 while (lua_next(KL, -2)) // storage key fifo 233 while (lua_next(KL, -2)) // storage key fifo
234 { 234 {
235 keeper_fifo* fifo = prepare_fifo_access(KL, -1); // storage key fifotbl 235 keeper_fifo* fifo = prepare_fifo_access(KL, -1); // storage key fifotbl
236 lua_pushvalue(KL, -2); // storage key fifotbl key 236 lua_pushvalue(KL, -2); // storage key fifotbl key
237 luaG_inter_move(U, KL, L, 1, LookupMode::FromKeeper); // storage key fifotbl // out key 237 std::ignore = luaG_inter_move(U, KL, L, 1, LookupMode::FromKeeper); // storage key fifotbl // out key
238 STACK_CHECK(L, 2); 238 STACK_CHECK(L, 2);
239 lua_newtable(L); // out key keyout 239 lua_newtable(L); // out key keyout
240 luaG_inter_move(U, KL, L, 1, LookupMode::FromKeeper); // storage key // out key keyout fifotbl 240 std::ignore = luaG_inter_move(U, KL, L, 1, LookupMode::FromKeeper); // storage key // out key keyout fifotbl
241 lua_pushinteger(L, fifo->first); // out key keyout fifotbl first 241 lua_pushinteger(L, fifo->first); // out key keyout fifotbl first
242 STACK_CHECK(L, 5); 242 STACK_CHECK(L, 5);
243 lua_setfield(L, -3, "first"); // out key keyout fifotbl 243 lua_setfield(L, -3, "first"); // out key keyout fifotbl
244 lua_pushinteger(L, fifo->count); // out key keyout fifobtl count 244 lua_pushinteger(L, fifo->count); // out key keyout fifobtl count
245 STACK_CHECK(L, 5); 245 STACK_CHECK(L, 5);
246 lua_setfield(L, -3, "count"); // out key keyout fifotbl 246 lua_setfield(L, -3, "count"); // out key keyout fifotbl
247 lua_pushinteger(L, fifo->limit); // out key keyout fifotbl limit 247 lua_pushinteger(L, fifo->limit); // out key keyout fifotbl limit
248 STACK_CHECK(L, 5); 248 STACK_CHECK(L, 5);
249 lua_setfield(L, -3, "limit"); // out key keyout fifotbl 249 lua_setfield(L, -3, "limit"); // out key keyout fifotbl
250 lua_setfield(L, -2, "fifo"); // out key keyout 250 lua_setfield(L, -2, "fifo"); // out key keyout
251 lua_rawset(L, -3); // out 251 lua_rawset(L, -3); // out
252 STACK_CHECK(L, 1); 252 STACK_CHECK(L, 1);
253 } 253 }
254 STACK_CHECK(L, 1); 254 STACK_CHECK(L, 1);
255 lua_pop(KL, 1); // 255 lua_pop(KL, 1); //
256 STACK_CHECK(KL, 0); 256 STACK_CHECK(KL, 0);
257 return 1; 257 return 1;
258} 258}
@@ -287,7 +287,7 @@ int keepercall_send(lua_State* L)
287 if( lua_isnil(L, -1)) 287 if( lua_isnil(L, -1))
288 { 288 {
289 lua_pop(L, 1); // ud key ... fifos 289 lua_pop(L, 1); // ud key ... fifos
290 fifo_new(L); // ud key ... fifos fifo 290 std::ignore = fifo_new(L); // ud key ... fifos fifo
291 lua_pushvalue(L, 2); // ud key ... fifos fifo key 291 lua_pushvalue(L, 2); // ud key ... fifos fifo key
292 lua_pushvalue(L, -2); // ud key ... fifos fifo key fifo 292 lua_pushvalue(L, -2); // ud key ... fifos fifo key fifo
293 lua_rawset(L, -4); // ud key ... fifos fifo 293 lua_rawset(L, -4); // ud key ... fifos fifo
@@ -412,7 +412,7 @@ int keepercall_limit(lua_State* L)
412 // set the new limit 412 // set the new limit
413 fifo->limit = limit; 413 fifo->limit = limit;
414 // return 0 or 1 value 414 // return 0 or 1 value
415 return lua_gettop( L); 415 return lua_gettop(L);
416} 416}
417 417
418// ################################################################################################## 418// ##################################################################################################
@@ -465,7 +465,7 @@ int keepercall_set(lua_State* L)
465 { // fifos key [val [, ...]] nil 465 { // fifos key [val [, ...]] nil
466 // no need to wake writers in that case, because a writer can't wait on an inexistent key 466 // no need to wake writers in that case, because a writer can't wait on an inexistent key
467 lua_pop(L, 1); // fifos key [val [, ...]] 467 lua_pop(L, 1); // fifos key [val [, ...]]
468 fifo_new(L); // fifos key [val [, ...]] fifo 468 std::ignore = fifo_new(L); // fifos key [val [, ...]] fifo
469 lua_pushvalue(L, 2); // fifos key [val [, ...]] fifo key 469 lua_pushvalue(L, 2); // fifos key [val [, ...]] fifo key
470 lua_pushvalue(L, -2); // fifos key [val [, ...]] fifo key fifo 470 lua_pushvalue(L, -2); // fifos key [val [, ...]] fifo key fifo
471 lua_rawset(L, 1); // fifos key [val [, ...]] fifo 471 lua_rawset(L, 1); // fifos key [val [, ...]] fifo
@@ -485,7 +485,7 @@ int keepercall_set(lua_State* L)
485 lua_insert(L, 3); // fifos key fifotbl [val [, ...]] 485 lua_insert(L, 3); // fifos key fifotbl [val [, ...]]
486 fifo_push(L, fifo, count); // fifos key fifotbl 486 fifo_push(L, fifo, count); // fifos key fifotbl
487 } 487 }
488 return should_wake_writers ? (lua_pushboolean( L, 1), 1) : 0; 488 return should_wake_writers ? (lua_pushboolean(L, 1), 1) : 0;
489} 489}
490 490
491// ################################################################################################## 491// ##################################################################################################
@@ -627,7 +627,7 @@ void close_keepers(Universe* U)
627 } 627 }
628 for (int i = 0; i < nbKeepers; ++i) 628 for (int i = 0; i < nbKeepers; ++i)
629 { 629 {
630 MUTEX_FREE(&U->keepers->keeper_array[i].keeper_cs); 630 U->keepers->keeper_array[i].~Keeper();
631 } 631 }
632 // free the keeper bookkeeping structure 632 // free the keeper bookkeeping structure
633 U->internal_allocator.free(U->keepers, sizeof(Keepers) + (nbKeepers - 1) * sizeof(Keeper)); 633 U->internal_allocator.free(U->keepers, sizeof(Keepers) + (nbKeepers - 1) * sizeof(Keeper));
@@ -652,12 +652,18 @@ void init_keepers(Universe* U, lua_State* L)
652{ 652{
653 STACK_CHECK_START_REL(L, 0); // L K 653 STACK_CHECK_START_REL(L, 0); // L K
654 lua_getfield(L, 1, "nb_keepers"); // nb_keepers 654 lua_getfield(L, 1, "nb_keepers"); // nb_keepers
655 int nb_keepers{ static_cast<int>(lua_tointeger(L, -1)) }; 655 int const nb_keepers{ static_cast<int>(lua_tointeger(L, -1)) };
656 lua_pop(L, 1); // 656 lua_pop(L, 1); //
657 if (nb_keepers < 1) 657 if (nb_keepers < 1)
658 { 658 {
659 std::ignore = luaL_error(L, "Bad number of keepers (%d)", nb_keepers); 659 luaL_error(L, "Bad number of keepers (%d)", nb_keepers); // doesn't return
660 } 660 }
661 STACK_CHECK(L, 0);
662
663 lua_getfield(L, 1, "keepers_gc_threshold"); // keepers_gc_threshold
664 int const keepers_gc_threshold{ static_cast<int>(lua_tointeger(L, -1)) };
665 lua_pop(L, 1); //
666 STACK_CHECK(L, 0);
661 667
662 // Keepers contains an array of 1 Keeper, adjust for the actual number of keeper states 668 // Keepers contains an array of 1 Keeper, adjust for the actual number of keeper states
663 { 669 {
@@ -665,10 +671,16 @@ void init_keepers(Universe* U, lua_State* L)
665 U->keepers = static_cast<Keepers*>(U->internal_allocator.alloc(bytes)); 671 U->keepers = static_cast<Keepers*>(U->internal_allocator.alloc(bytes));
666 if (U->keepers == nullptr) 672 if (U->keepers == nullptr)
667 { 673 {
668 std::ignore = luaL_error(L, "init_keepers() failed while creating keeper array; out of memory"); 674 luaL_error(L, "init_keepers() failed while creating keeper array; out of memory"); // doesn't return
669 } 675 }
670 memset(U->keepers, 0, bytes); 676 U->keepers->Keepers::Keepers();
677 U->keepers->gc_threshold = keepers_gc_threshold;
671 U->keepers->nb_keepers = nb_keepers; 678 U->keepers->nb_keepers = nb_keepers;
679
680 for (int i = 0; i < nb_keepers; ++i)
681 {
682 U->keepers->keeper_array[i].Keeper::Keeper();
683 }
672 } 684 }
673 for (int i = 0; i < nb_keepers; ++i) // keepersUD 685 for (int i = 0; i < nb_keepers; ++i) // keepersUD
674 { 686 {
@@ -676,14 +688,15 @@ void init_keepers(Universe* U, lua_State* L)
676 lua_State* const K{ create_state(U, L) }; 688 lua_State* const K{ create_state(U, L) };
677 if (K == nullptr) 689 if (K == nullptr)
678 { 690 {
679 std::ignore = luaL_error(L, "init_keepers() failed while creating keeper states; out of memory"); 691 luaL_error(L, "init_keepers() failed while creating keeper states; out of memory"); // doesn't return
680 } 692 }
681 693
682 U->keepers->keeper_array[i].L = K; 694 U->keepers->keeper_array[i].L = K;
683 // we can trigger a GC from inside keeper_call(), where a keeper is acquired 695
684 // from there, GC can collect a linda, which would acquire the keeper again, and deadlock the thread. 696 if (U->keepers->gc_threshold >= 0)
685 // therefore, we need a recursive mutex. 697 {
686 MUTEX_RECURSIVE_INIT(&U->keepers->keeper_array[i].keeper_cs); 698 lua_gc(K, LUA_GCSTOP, 0);
699 }
687 700
688 STACK_CHECK_START_ABS(K, 0); 701 STACK_CHECK_START_ABS(K, 0);
689 702
@@ -704,7 +717,7 @@ void init_keepers(Universe* U, lua_State* L)
704 if (!lua_isnil(L, -1)) 717 if (!lua_isnil(L, -1))
705 { 718 {
706 // when copying with mode LookupMode::ToKeeper, error message is pushed at the top of the stack, not raised immediately 719 // when copying with mode LookupMode::ToKeeper, error message is pushed at the top of the stack, not raised immediately
707 if (luaG_inter_copy_package(U, L, K, -1, LookupMode::ToKeeper)) 720 if (luaG_inter_copy_package(U, Source{ L }, Dest{ K }, -1, LookupMode::ToKeeper) != InterCopyResult::Success)
708 { 721 {
709 // if something went wrong, the error message is at the top of the stack 722 // if something went wrong, the error message is at the top of the stack
710 lua_remove(L, -2); // error_msg 723 lua_remove(L, -2); // error_msg
@@ -735,8 +748,12 @@ void init_keepers(Universe* U, lua_State* L)
735Keeper* which_keeper(Keepers* keepers_, uintptr_t magic_) 748Keeper* which_keeper(Keepers* keepers_, uintptr_t magic_)
736{ 749{
737 int const nbKeepers{ keepers_->nb_keepers }; 750 int const nbKeepers{ keepers_->nb_keepers };
738 unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); 751 if (nbKeepers)
739 return &keepers_->keeper_array[i]; 752 {
753 unsigned int i = (unsigned int) ((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers);
754 return &keepers_->keeper_array[i];
755 }
756 return nullptr;
740} 757}
741 758
742// ################################################################################################## 759// ##################################################################################################
@@ -745,11 +762,7 @@ Keeper* keeper_acquire(Keepers* keepers_, uintptr_t magic_)
745{ 762{
746 int const nbKeepers{ keepers_->nb_keepers }; 763 int const nbKeepers{ keepers_->nb_keepers };
747 // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers) 764 // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers)
748 if( nbKeepers == 0) 765 if (nbKeepers)
749 {
750 return nullptr;
751 }
752 else
753 { 766 {
754 /* 767 /*
755 * Any hashing will do that maps pointers to 0..GNbKeepers-1 768 * Any hashing will do that maps pointers to 0..GNbKeepers-1
@@ -760,11 +773,11 @@ Keeper* keeper_acquire(Keepers* keepers_, uintptr_t magic_)
760 */ 773 */
761 unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); 774 unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers);
762 Keeper* K = &keepers_->keeper_array[i]; 775 Keeper* K = &keepers_->keeper_array[i];
763 776 K->m_mutex.lock();
764 MUTEX_LOCK( &K->keeper_cs);
765 //++ K->count; 777 //++ K->count;
766 return K; 778 return K;
767 } 779 }
780 return nullptr;
768} 781}
769 782
770// ################################################################################################## 783// ##################################################################################################
@@ -774,7 +787,7 @@ void keeper_release(Keeper* K)
774 //-- K->count; 787 //-- K->count;
775 if (K) 788 if (K)
776 { 789 {
777 MUTEX_UNLOCK(&K->keeper_cs); 790 K->m_mutex.unlock();
778 } 791 }
779} 792}
780 793
@@ -827,21 +840,45 @@ int keeper_call(Universe* U, lua_State* K, keeper_api_t func_, lua_State* L, voi
827 840
828 lua_pushlightuserdata(K, linda); 841 lua_pushlightuserdata(K, linda);
829 842
830 if ((args == 0) || luaG_inter_copy(U, L, K, args, LookupMode::ToKeeper) == 0) // L->K 843 if ((args == 0) || luaG_inter_copy(U, Source{ L }, Dest{ K }, args, LookupMode::ToKeeper) == InterCopyResult::Success) // L->K
831 { 844 {
832 lua_call(K, 1 + args, LUA_MULTRET); 845 lua_call(K, 1 + args, LUA_MULTRET);
833
834 retvals = lua_gettop(K) - Ktos; 846 retvals = lua_gettop(K) - Ktos;
835 // note that this can raise a luaL_error while the keeper state (and its mutex) is acquired 847 // note that this can raise a luaL_error while the keeper state (and its mutex) is acquired
836 // this may interrupt a lane, causing the destruction of the underlying OS thread 848 // this may interrupt a lane, causing the destruction of the underlying OS thread
837 // after this, another lane making use of this keeper can get an error code from the mutex-locking function 849 // after this, another lane making use of this keeper can get an error code from the mutex-locking function
838 // when attempting to grab the mutex again (WINVER <= 0x400 does this, but locks just fine, I don't know about pthread) 850 // when attempting to grab the mutex again (WINVER <= 0x400 does this, but locks just fine, I don't know about pthread)
839 if ((retvals > 0) && luaG_inter_move(U, K, L, retvals, LookupMode::FromKeeper) != 0) // K->L 851 if ((retvals > 0) && luaG_inter_move(U, Source{ K }, Dest{ L }, retvals, LookupMode::FromKeeper) != InterCopyResult::Success) // K->L
840 { 852 {
841 retvals = -1; 853 retvals = -1;
842 } 854 }
843 } 855 }
844 // whatever happens, restore the stack to where it was at the origin 856 // whatever happens, restore the stack to where it was at the origin
845 lua_settop(K, Ktos); 857 lua_settop(K, Ktos);
858
859 // don't do this for this particular function, as it is only called during Linda destruction, and we don't want to raise an error, ever
860 if (func_ != KEEPER_API(clear)) [[unlikely]]
861 {
862 // since keeper state GC is stopped, let's run a step once in a while if required
863 int const gc_threshold{ U->keepers->gc_threshold };
864 if (gc_threshold == 0) [[unlikely]]
865 {
866 lua_gc(K, LUA_GCSTEP, 0);
867 }
868 else if (gc_threshold > 0) [[likely]]
869 {
870 int const gc_usage{ lua_gc(K, LUA_GCCOUNT, 0) };
871 if (gc_usage >= gc_threshold)
872 {
873 lua_gc(K, LUA_GCCOLLECT, 0);
874 int const gc_usage_after{ lua_gc(K, LUA_GCCOUNT, 0) };
875 if (gc_usage_after > gc_threshold) [[unlikely]]
876 {
877 luaL_error(L, "Keeper GC threshold is too low, need at least %d", gc_usage_after);
878 }
879 }
880 }
881 }
882
846 return retvals; 883 return retvals;
847} 884}
diff --git a/src/keeper.h b/src/keeper.h
index e081bea..627c7ea 100644
--- a/src/keeper.h
+++ b/src/keeper.h
@@ -11,20 +11,23 @@ extern "C" {
11#include "threading.h" 11#include "threading.h"
12#include "uniquekey.h" 12#include "uniquekey.h"
13 13
14#include <mutex>
15
14// forwards 16// forwards
15enum class LookupMode; 17enum class LookupMode;
16struct Universe; 18class Universe;
17 19
18struct Keeper 20struct Keeper
19{ 21{
20 MUTEX_T keeper_cs; 22 std::mutex m_mutex;
21 lua_State* L; 23 lua_State* L{ nullptr };
22 // int count; 24 // int count;
23}; 25};
24 26
25struct Keepers 27struct Keepers
26{ 28{
27 int nb_keepers; 29 int gc_threshold{ 0 };
30 int nb_keepers{ 0 };
28 Keeper keeper_array[1]; 31 Keeper keeper_array[1];
29}; 32};
30 33
@@ -35,23 +38,23 @@ static constexpr UniqueKey NIL_SENTINEL{ 0x7eaafa003a1d11a1ull };
35void init_keepers(Universe* U, lua_State* L); 38void init_keepers(Universe* U, lua_State* L);
36void close_keepers(Universe* U); 39void close_keepers(Universe* U);
37 40
38Keeper* which_keeper(Keepers* keepers_, uintptr_t magic_); 41[[nodiscard]] Keeper* which_keeper(Keepers* keepers_, uintptr_t magic_);
39Keeper* keeper_acquire(Keepers* keepers_, uintptr_t magic_); 42[[nodiscard]] Keeper* keeper_acquire(Keepers* keepers_, uintptr_t magic_);
40void keeper_release(Keeper* K); 43void keeper_release(Keeper* K_);
41void keeper_toggle_nil_sentinels(lua_State* L, int val_i_, LookupMode const mode_); 44void keeper_toggle_nil_sentinels(lua_State* L, int val_i_, LookupMode const mode_);
42int keeper_push_linda_storage(Universe* U, lua_State* L, void* ptr_, uintptr_t magic_); 45[[nodiscard]] int keeper_push_linda_storage(Universe* U, Dest L, void* ptr_, uintptr_t magic_);
43 46
44using keeper_api_t = lua_CFunction; 47using keeper_api_t = lua_CFunction;
45#define KEEPER_API(_op) keepercall_##_op 48#define KEEPER_API(_op) keepercall_##_op
46#define PUSH_KEEPER_FUNC lua_pushcfunction 49#define PUSH_KEEPER_FUNC lua_pushcfunction
47// lua_Cfunctions to run inside a keeper state 50// lua_Cfunctions to run inside a keeper state
48int keepercall_clear(lua_State* L); 51[[nodiscard]] int keepercall_clear(lua_State* L);
49int keepercall_send(lua_State* L); 52[[nodiscard]] int keepercall_send(lua_State* L);
50int keepercall_receive(lua_State* L); 53[[nodiscard]] int keepercall_receive(lua_State* L);
51int keepercall_receive_batched(lua_State* L); 54[[nodiscard]] int keepercall_receive_batched(lua_State* L);
52int keepercall_limit(lua_State* L); 55[[nodiscard]] int keepercall_limit(lua_State* L);
53int keepercall_get(lua_State* L); 56[[nodiscard]] int keepercall_get(lua_State* L);
54int keepercall_set(lua_State* L); 57[[nodiscard]] int keepercall_set(lua_State* L);
55int keepercall_count(lua_State* L); 58[[nodiscard]] int keepercall_count(lua_State* L);
56 59
57int keeper_call(Universe* U, lua_State* K, keeper_api_t _func, lua_State* L, void* linda, int starting_index); 60[[nodiscard]] int keeper_call(Universe* U, lua_State* K, keeper_api_t _func, lua_State* L, void* linda, int starting_index);
diff --git a/src/lanes.cpp b/src/lanes.cpp
index 47ca79a..1f795cc 100644
--- a/src/lanes.cpp
+++ b/src/lanes.cpp
@@ -99,6 +99,8 @@ THE SOFTWARE.
99# include <sys/types.h> 99# include <sys/types.h>
100#endif 100#endif
101 101
102#include <atomic>
103
102// forwarding (will do things better later) 104// forwarding (will do things better later)
103static void tracking_add(Lane* lane_); 105static void tracking_add(Lane* lane_);
104 106
@@ -106,11 +108,6 @@ Lane::Lane(Universe* U_, lua_State* L_)
106: U{ U_ } 108: U{ U_ }
107, L{ L_ } 109, L{ L_ }
108{ 110{
109#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
110 MUTEX_INIT(&done_lock);
111 SIGNAL_INIT(&done_signal);
112#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
113
114#if HAVE_LANE_TRACKING() 111#if HAVE_LANE_TRACKING()
115 if (U->tracking_first) 112 if (U->tracking_first)
116 { 113 {
@@ -119,6 +116,29 @@ Lane::Lane(Universe* U_, lua_State* L_)
119#endif // HAVE_LANE_TRACKING() 116#endif // HAVE_LANE_TRACKING()
120} 117}
121 118
119bool Lane::waitForCompletion(lua_Duration duration_)
120{
121 std::chrono::time_point<std::chrono::steady_clock> until{ std::chrono::time_point<std::chrono::steady_clock>::max() };
122 if (duration_.count() >= 0.0)
123 {
124 until = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration_);
125 }
126
127 std::unique_lock lock{ m_done_mutex };
128 //std::stop_token token{ m_thread.get_stop_token() };
129 //return m_done_signal.wait_until(lock, token, secs_, [this](){ return m_status >= Lane::Done; });
130 return m_done_signal.wait_until(lock, until, [this](){ return m_status >= Lane::Done; });
131}
132
133static void lane_main(Lane* lane);
134void Lane::startThread(int priority_)
135{
136 m_thread = std::jthread([this]() { lane_main(this); });
137 if (priority_ != THREAD_PRIO_DEFAULT)
138 {
139 JTHREAD_SET_PRIORITY(m_thread, priority_, U->m_sudo);
140 }
141}
122 142
123/* Do you want full call stacks, or just the line where the error happened? 143/* Do you want full call stacks, or just the line where the error happened?
124* 144*
@@ -142,7 +162,7 @@ static void securize_debug_threadname(lua_State* L, Lane* lane_)
142} 162}
143 163
144#if ERROR_FULL_STACK 164#if ERROR_FULL_STACK
145static int lane_error( lua_State* L); 165[[nodiscard]] static int lane_error(lua_State* L);
146// crc64/we of string "STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ 166// crc64/we of string "STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/
147static constexpr UniqueKey STACKTRACE_REGKEY{ 0x534af7d3226a429full }; 167static constexpr UniqueKey STACKTRACE_REGKEY{ 0x534af7d3226a429full };
148#endif // ERROR_FULL_STACK 168#endif // ERROR_FULL_STACK
@@ -168,7 +188,7 @@ static constexpr UniqueKey FINALIZER_REGKEY{ 0x188fccb8bf348e09ull };
168* Returns: true if a table was pushed 188* Returns: true if a table was pushed
169* false if no table found, not created, and nothing pushed 189* false if no table found, not created, and nothing pushed
170*/ 190*/
171static bool push_registry_table( lua_State* L, UniqueKey key, bool create) 191[[nodiscard]] static bool push_registry_table(lua_State* L, UniqueKey key, bool create)
172{ 192{
173 STACK_GROW(L, 3); 193 STACK_GROW(L, 3);
174 STACK_CHECK_START_REL(L, 0); 194 STACK_CHECK_START_REL(L, 0);
@@ -217,7 +237,7 @@ static void tracking_add(Lane* lane_)
217/* 237/*
218 * A free-running lane has ended; remove it from tracking chain 238 * A free-running lane has ended; remove it from tracking chain
219 */ 239 */
220static bool tracking_remove(Lane* lane_) 240[[nodiscard]] static bool tracking_remove(Lane* lane_)
221{ 241{
222 bool found{ false }; 242 bool found{ false };
223 std::lock_guard<std::mutex> guard{ lane_->U->tracking_cs }; 243 std::lock_guard<std::mutex> guard{ lane_->U->tracking_cs };
@@ -253,16 +273,11 @@ Lane::~Lane()
253{ 273{
254 // Clean up after a (finished) thread 274 // Clean up after a (finished) thread
255 // 275 //
256#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
257 SIGNAL_FREE(&done_signal);
258 MUTEX_FREE(&done_lock);
259#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
260
261#if HAVE_LANE_TRACKING() 276#if HAVE_LANE_TRACKING()
262 if (U->tracking_first != nullptr) 277 if (U->tracking_first != nullptr)
263 { 278 {
264 // Lane was cleaned up, no need to handle at process termination 279 // Lane was cleaned up, no need to handle at process termination
265 tracking_remove(this); 280 std::ignore = tracking_remove(this);
266 } 281 }
267#endif // HAVE_LANE_TRACKING() 282#endif // HAVE_LANE_TRACKING()
268} 283}
@@ -285,10 +300,10 @@ LUAG_FUNC( set_finalizer)
285{ 300{
286 luaL_argcheck(L, lua_isfunction(L, 1), 1, "finalizer should be a function"); 301 luaL_argcheck(L, lua_isfunction(L, 1), 1, "finalizer should be a function");
287 luaL_argcheck(L, lua_gettop( L) == 1, 1, "too many arguments"); 302 luaL_argcheck(L, lua_gettop( L) == 1, 1, "too many arguments");
288 // Get the current finalizer table (if any) 303 // Get the current finalizer table (if any), create one if it doesn't exist
289 push_registry_table(L, FINALIZER_REGKEY, true /*do create if none*/); // finalizer {finalisers} 304 std::ignore = push_registry_table(L, FINALIZER_REGKEY, true); // finalizer {finalisers}
290 STACK_GROW(L, 2); 305 STACK_GROW(L, 2);
291 lua_pushinteger(L, lua_rawlen(L, -1) + 1); // finalizer {finalisers} idx 306 lua_pushinteger(L, lua_rawlen(L, -1) + 1); // finalizer {finalisers} idx
292 lua_pushvalue(L, 1); // finalizer {finalisers} idx finalizer 307 lua_pushvalue(L, 1); // finalizer {finalisers} idx finalizer
293 lua_rawset(L, -3); // finalizer {finalisers} 308 lua_rawset(L, -3); // finalizer {finalisers}
294 lua_pop(L, 2); // 309 lua_pop(L, 2); //
@@ -311,7 +326,7 @@ LUAG_FUNC( set_finalizer)
311// 326//
312static void push_stack_trace( lua_State* L, int rc_, int stk_base_); 327static void push_stack_trace( lua_State* L, int rc_, int stk_base_);
313 328
314static int run_finalizers( lua_State* L, int lua_rc) 329[[nodiscard]] static int run_finalizers(lua_State* L, int lua_rc)
315{ 330{
316 int finalizers_index; 331 int finalizers_index;
317 int n; 332 int n;
@@ -415,7 +430,7 @@ static void selfdestruct_add(Lane* lane_)
415/* 430/*
416 * A free-running lane has ended; remove it from selfdestruct chain 431 * A free-running lane has ended; remove it from selfdestruct chain
417 */ 432 */
418static bool selfdestruct_remove(Lane* lane_) 433[[nodiscard]] static bool selfdestruct_remove(Lane* lane_)
419{ 434{
420 bool found{ false }; 435 bool found{ false };
421 std::lock_guard<std::mutex> guard{ lane_->U->selfdestruct_cs }; 436 std::lock_guard<std::mutex> guard{ lane_->U->selfdestruct_cs };
@@ -434,7 +449,7 @@ static bool selfdestruct_remove(Lane* lane_)
434 *ref = lane_->selfdestruct_next; 449 *ref = lane_->selfdestruct_next;
435 lane_->selfdestruct_next = nullptr; 450 lane_->selfdestruct_next = nullptr;
436 // the terminal shutdown should wait until the lane is done with its lua_close() 451 // the terminal shutdown should wait until the lane is done with its lua_close()
437 ++lane_->U->selfdestructing_count; 452 lane_->U->selfdestructing_count.fetch_add(1, std::memory_order_release);
438 found = true; 453 found = true;
439 break; 454 break;
440 } 455 }
@@ -450,29 +465,30 @@ static bool selfdestruct_remove(Lane* lane_)
450/* 465/*
451* Process end; cancel any still free-running threads 466* Process end; cancel any still free-running threads
452*/ 467*/
453static int universe_gc( lua_State* L) 468[[nodiscard]] static int universe_gc(lua_State* L)
454{ 469{
455 Universe* const U{ lua_tofulluserdata<Universe>(L, 1) }; 470 Universe* const U{ lua_tofulluserdata<Universe>(L, 1) };
471 lua_Duration const shutdown_timeout{ lua_tonumber(L, lua_upvalueindex(1)) };
472 [[maybe_unused]] char const* const op_string{ lua_tostring(L, lua_upvalueindex(2)) };
473 CancelOp const op{ which_cancel_op(op_string) };
456 474
457 while (U->selfdestruct_first != SELFDESTRUCT_END) // true at most once! 475 if (U->selfdestruct_first != SELFDESTRUCT_END)
458 { 476 {
477
459 // Signal _all_ still running threads to exit (including the timer thread) 478 // Signal _all_ still running threads to exit (including the timer thread)
460 // 479 //
461 { 480 {
462 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs }; 481 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
463 Lane* lane{ U->selfdestruct_first }; 482 Lane* lane{ U->selfdestruct_first };
483 lua_Duration timeout{ 1us };
464 while (lane != SELFDESTRUCT_END) 484 while (lane != SELFDESTRUCT_END)
465 { 485 {
466 // attempt a regular unforced hard cancel with a small timeout 486 // attempt the requested cancel with a small timeout.
467 bool const cancelled{ THREAD_ISNULL(lane->thread) || thread_cancel(L, lane, CancelOp::Hard, 0.0001, false, 0.0) != CancelResult::Timeout }; 487 // if waiting on a linda, they will raise a cancel_error.
468 // if we failed, and we know the thread is waiting on a linda 488 // if a cancellation hook is desired, it will be installed to try to raise an error
469 if (cancelled == false && lane->status == WAITING && lane->waiting_on != nullptr) 489 if (lane->m_thread.joinable())
470 { 490 {
471 // signal the linda to wake up the thread so that it can react to the cancel query 491 std::ignore = thread_cancel(lane, op, 1, timeout, true);
472 // let us hope we never land here with a pointer on a linda that has been destroyed...
473 SIGNAL_T* const waiting_on{ lane->waiting_on };
474 // lane->waiting_on = nullptr; // useful, or not?
475 SIGNAL_ALL(waiting_on);
476 } 492 }
477 lane = lane->selfdestruct_next; 493 lane = lane->selfdestruct_next;
478 } 494 }
@@ -480,98 +496,52 @@ static int universe_gc( lua_State* L)
480 496
481 // When noticing their cancel, the lanes will remove themselves from 497 // When noticing their cancel, the lanes will remove themselves from
482 // the selfdestruct chain. 498 // the selfdestruct chain.
483
484 // TBD: Not sure if Windows (multi core) will require the timed approach,
485 // or single Yield. I don't have machine to test that (so leaving
486 // for timed approach). -- AKa 25-Oct-2008
487
488 // OS X 10.5 (Intel) needs more to avoid segfaults.
489 //
490 // "make test" is okay. 100's of "make require" are okay.
491 //
492 // Tested on MacBook Core Duo 2GHz and 10.5.5:
493 // -- AKa 25-Oct-2008
494 //
495 { 499 {
496 lua_Number const shutdown_timeout = lua_tonumber(L, lua_upvalueindex(1)); 500 std::chrono::time_point<std::chrono::steady_clock> t_until{ std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(shutdown_timeout) };
497 double const t_until = now_secs() + shutdown_timeout;
498 501
499 while (U->selfdestruct_first != SELFDESTRUCT_END) 502 while (U->selfdestruct_first != SELFDESTRUCT_END)
500 { 503 {
501 YIELD(); // give threads time to act on their cancel 504 // give threads time to act on their cancel
505 std::this_thread::yield();
506 // count the number of cancelled thread that didn't have the time to act yet
507 int n{ 0 };
502 { 508 {
503 // count the number of cancelled thread that didn't have the time to act yet 509 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
504 int n = 0; 510 Lane* lane{ U->selfdestruct_first };
505 double t_now = 0.0; 511 while (lane != SELFDESTRUCT_END)
506 {
507 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
508 Lane* lane{ U->selfdestruct_first };
509 while (lane != SELFDESTRUCT_END)
510 {
511 if (lane->cancel_request == CancelRequest::Hard)
512 ++n;
513 lane = lane->selfdestruct_next;
514 }
515 }
516 // if timeout elapsed, or we know all threads have acted, stop waiting
517 t_now = now_secs();
518 if (n == 0 || (t_now >= t_until))
519 { 512 {
520 DEBUGSPEW_CODE(fprintf(stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now))); 513 if (lane->cancel_request != CancelRequest::None)
521 break; 514 ++n;
515 lane = lane->selfdestruct_next;
522 } 516 }
523 } 517 }
518 // if timeout elapsed, or we know all threads have acted, stop waiting
519 std::chrono::time_point<std::chrono::steady_clock> t_now = std::chrono::steady_clock::now();
520 if (n == 0 || (t_now >= t_until))
521 {
522 DEBUGSPEW_CODE(fprintf(stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout.count()));
523 break;
524 }
524 } 525 }
525 } 526 }
526 527
527 // If some lanes are currently cleaning after themselves, wait until they are done. 528 // If some lanes are currently cleaning after themselves, wait until they are done.
528 // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). 529 // They are no longer listed in the selfdestruct chain, but they still have to lua_close().
529 while (U->selfdestructing_count > 0) 530 while (U->selfdestructing_count.load(std::memory_order_acquire) > 0)
530 { 531 {
531 YIELD(); 532 std::this_thread::yield();
532 }
533
534 //---
535 // Kill the still free running threads
536 //
537 if (U->selfdestruct_first != SELFDESTRUCT_END)
538 {
539 unsigned int n = 0;
540 // first thing we did was to raise the linda signals the threads were waiting on (if any)
541 // therefore, any well-behaved thread should be in CANCELLED state
542 // these are not running, and the state can be closed
543 {
544 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
545 Lane* lane{ U->selfdestruct_first };
546 while (lane != SELFDESTRUCT_END)
547 {
548 Lane* const next_s{ lane->selfdestruct_next };
549 lane->selfdestruct_next = nullptr; // detach from selfdestruct chain
550 if (!THREAD_ISNULL(lane->thread)) // can be nullptr if previous 'soft' termination succeeded
551 {
552 THREAD_KILL(&lane->thread);
553#if THREADAPI == THREADAPI_PTHREAD
554 // pthread: make sure the thread is really stopped!
555 THREAD_WAIT(&lane->thread, -1, &lane->done_signal, &lane->done_lock, &lane->status);
556#endif // THREADAPI == THREADAPI_PTHREAD
557 }
558 // NO lua_close() in this case because we don't know where execution of the state was interrupted
559 delete lane;
560 lane = next_s;
561 ++n;
562 }
563 U->selfdestruct_first = SELFDESTRUCT_END;
564 }
565
566 DEBUGSPEW_CODE(fprintf(stderr, "Killed %d lane(s) at process end.\n", n));
567 } 533 }
568 } 534 }
569 535
570 // If some lanes are currently cleaning after themselves, wait until they are done. 536 // If after all this, we still have some free-running lanes, it's an external user error, they should have stopped appropriately
571 // They are no longer listed in the selfdestruct chain, but they still have to lua_close().
572 while( U->selfdestructing_count > 0)
573 { 537 {
574 YIELD(); 538 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
539 Lane* lane{ U->selfdestruct_first };
540 if (lane != SELFDESTRUCT_END)
541 {
542 // this causes a leak because we don't call U's destructor (which could be bad if the still running lanes are accessing it)
543 luaL_error(L, "Zombie thread %s refuses to die!", lane->debug_name); // doesn't return
544 }
575 } 545 }
576 546
577 // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 547 // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1
@@ -668,7 +638,7 @@ LUAG_FUNC( set_error_reporting)
668 return 0; 638 return 0;
669} 639}
670 640
671static int lane_error(lua_State* L) 641[[nodiscard]] static int lane_error(lua_State* L)
672{ 642{
673 // error message (any type) 643 // error message (any type)
674 STACK_CHECK_START_ABS(L, 1); // some_error 644 STACK_CHECK_START_ABS(L, 1); // some_error
@@ -814,7 +784,7 @@ LUAG_FUNC(get_debug_threadname)
814 784
815LUAG_FUNC(set_thread_priority) 785LUAG_FUNC(set_thread_priority)
816{ 786{
817 int const prio{ (int) luaL_checkinteger(L, 1) }; 787 lua_Integer const prio{ luaL_checkinteger(L, 1) };
818 // public Lanes API accepts a generic range -3/+3 788 // public Lanes API accepts a generic range -3/+3
819 // that will be remapped into the platform-specific scheduler priority scheme 789 // that will be remapped into the platform-specific scheduler priority scheme
820 // On some platforms, -3 is equivalent to -2 and +3 to +2 790 // On some platforms, -3 is equivalent to -2 and +3 to +2
@@ -822,7 +792,7 @@ LUAG_FUNC(set_thread_priority)
822 { 792 {
823 return luaL_error(L, "priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio); 793 return luaL_error(L, "priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio);
824 } 794 }
825 THREAD_SET_PRIORITY(prio); 795 THREAD_SET_PRIORITY(static_cast<int>(prio), universe_get(L)->m_sudo);
826 return 0; 796 return 0;
827} 797}
828 798
@@ -872,32 +842,18 @@ static char const* get_errcode_name( int _code)
872} 842}
873#endif // USE_DEBUG_SPEW() 843#endif // USE_DEBUG_SPEW()
874 844
875#if THREADWAIT_METHOD == THREADWAIT_CONDVAR // implies THREADAPI == THREADAPI_PTHREAD 845static void lane_main(Lane* lane)
876static void thread_cleanup_handler(void* opaque)
877{
878 Lane* lane{ (Lane*) opaque };
879 MUTEX_LOCK(&lane->done_lock);
880 lane->status = CANCELLED;
881 SIGNAL_ONE(&lane->done_signal); // wake up master (while 'lane->done_lock' is on)
882 MUTEX_UNLOCK(&lane->done_lock);
883}
884#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
885
886static THREAD_RETURN_T THREAD_CALLCONV lane_main(void* vs)
887{ 846{
888 Lane* lane{ (Lane*) vs };
889 lua_State* const L{ lane->L }; 847 lua_State* const L{ lane->L };
890 // wait until the launching thread has finished preparing L 848 // wait until the launching thread has finished preparing L
891 lane->m_ready.wait(); 849 lane->m_ready.wait();
892 int rc{ LUA_ERRRUN }; 850 int rc{ LUA_ERRRUN };
893 if (lane->status == PENDING) // nothing wrong happened during preparation, we can work 851 if (lane->m_status == Lane::Pending) // nothing wrong happened during preparation, we can work
894 { 852 {
895 // At this point, the lane function and arguments are on the stack 853 // At this point, the lane function and arguments are on the stack
896 int const nargs{ lua_gettop(L) - 1 }; 854 int const nargs{ lua_gettop(L) - 1 };
897 DEBUGSPEW_CODE(Universe* U = universe_get(L)); 855 DEBUGSPEW_CODE(Universe* U = universe_get(L));
898 THREAD_MAKE_ASYNCH_CANCELLABLE(); 856 lane->m_status = Lane::Running; // Pending -> Running
899 THREAD_CLEANUP_PUSH(thread_cleanup_handler, lane);
900 lane->status = RUNNING; // PENDING -> RUNNING
901 857
902 // Tie "set_finalizer()" to the state 858 // Tie "set_finalizer()" to the state
903 lua_pushcfunction(L, LG_set_finalizer); 859 lua_pushcfunction(L, LG_set_finalizer);
@@ -947,18 +903,19 @@ static THREAD_RETURN_T THREAD_CALLCONV lane_main(void* vs)
947 // the finalizer generated an error, and left its own error message [and stack trace] on the stack 903 // the finalizer generated an error, and left its own error message [and stack trace] on the stack
948 rc = rc2; // we're overruling the earlier script error or normal return 904 rc = rc2; // we're overruling the earlier script error or normal return
949 } 905 }
950 lane->waiting_on = nullptr; // just in case 906 lane->m_waiting_on = nullptr; // just in case
951 if (selfdestruct_remove(lane)) // check and remove (under lock!) 907 if (selfdestruct_remove(lane)) // check and remove (under lock!)
952 { 908 {
953 // We're a free-running thread and no-one's there to clean us up. 909 // We're a free-running thread and no-one's there to clean us up.
954 //
955 lua_close(lane->L); 910 lua_close(lane->L);
956 911 lane->L = nullptr; // just in case
957 lane->U->selfdestruct_cs.lock(); 912 lane->U->selfdestruct_cs.lock();
958 // done with lua_close(), terminal shutdown sequence may proceed 913 // done with lua_close(), terminal shutdown sequence may proceed
959 --lane->U->selfdestructing_count; 914 lane->U->selfdestructing_count.fetch_sub(1, std::memory_order_release);
960 lane->U->selfdestruct_cs.unlock(); 915 lane->U->selfdestruct_cs.unlock();
961 916
917 // we destroy our jthread member from inside the thread body, so we have to detach so that we don't try to join, as this doesn't seem a good idea
918 lane->m_thread.detach();
962 delete lane; 919 delete lane;
963 lane = nullptr; 920 lane = nullptr;
964 } 921 }
@@ -967,24 +924,15 @@ static THREAD_RETURN_T THREAD_CALLCONV lane_main(void* vs)
967 { 924 {
968 // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them 925 // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them
969 926
970 enum e_status st = (rc == 0) ? DONE : CANCEL_ERROR.equals(L, 1) ? CANCELLED : ERROR_ST; 927 Lane::Status st = (rc == LUA_OK) ? Lane::Done : CANCEL_ERROR.equals(L, 1) ? Lane::Cancelled : Lane::Error;
971 928
972 // Posix no PTHREAD_TIMEDJOIN:
973 // 'done_lock' protects the -> DONE|ERROR_ST|CANCELLED state change
974 //
975#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
976 MUTEX_LOCK(&lane->done_lock);
977 { 929 {
978#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR 930 // 'm_done_mutex' protects the -> Done|Error|Cancelled state change
979 lane->status = st; 931 std::lock_guard lock{ lane->m_done_mutex };
980#if THREADWAIT_METHOD == THREADWAIT_CONDVAR 932 lane->m_status = st;
981 SIGNAL_ONE(&lane->done_signal); // wake up master (while 'lane->done_lock' is on) 933 lane->m_done_signal.notify_one();// wake up master (while 'lane->m_done_mutex' is on)
982 } 934 }
983 MUTEX_UNLOCK(&lane->done_lock);
984#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
985 } 935 }
986 THREAD_CLEANUP_POP(false);
987 return 0; // ignored
988} 936}
989 937
990// ################################################################################################# 938// #################################################################################################
@@ -1000,13 +948,13 @@ LUAG_FUNC(require)
1000 DEBUGSPEW_CODE(Universe* U = universe_get(L)); 948 DEBUGSPEW_CODE(Universe* U = universe_get(L));
1001 STACK_CHECK_START_REL(L, 0); 949 STACK_CHECK_START_REL(L, 0);
1002 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name)); 950 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name));
1003 DEBUGSPEW_CODE(++U->debugspew_indent_depth); 951 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1004 lua_pushvalue(L, lua_upvalueindex(1)); // "name" require 952 lua_pushvalue(L, lua_upvalueindex(1)); // "name" require
1005 lua_insert(L, 1); // require "name" 953 lua_insert(L, 1); // require "name"
1006 lua_call(L, nargs, 1); // module 954 lua_call(L, nargs, 1); // module
1007 populate_func_lookup_table(L, -1, name); 955 populate_func_lookup_table(L, -1, name);
1008 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name)); 956 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name));
1009 DEBUGSPEW_CODE(--U->debugspew_indent_depth); 957 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1010 STACK_CHECK(L, 0); 958 STACK_CHECK(L, 0);
1011 return 1; 959 return 1;
1012} 960}
@@ -1019,17 +967,17 @@ LUAG_FUNC(require)
1019LUAG_FUNC(register) 967LUAG_FUNC(register)
1020{ 968{
1021 char const* name = luaL_checkstring(L, 1); 969 char const* name = luaL_checkstring(L, 1);
1022 int const mod_type = lua_type(L, 2); 970 LuaType const mod_type{ lua_type_as_enum(L, 2) };
1023 // ignore extra parameters, just in case 971 // ignore extra parameters, just in case
1024 lua_settop(L, 2); 972 lua_settop(L, 2);
1025 luaL_argcheck(L, (mod_type == LUA_TTABLE) || (mod_type == LUA_TFUNCTION), 2, "unexpected module type"); 973 luaL_argcheck(L, (mod_type == LuaType::TABLE) || (mod_type == LuaType::FUNCTION), 2, "unexpected module type");
1026 DEBUGSPEW_CODE(Universe* U = universe_get(L)); 974 DEBUGSPEW_CODE(Universe* U = universe_get(L));
1027 STACK_CHECK_START_REL(L, 0); // "name" mod_table 975 STACK_CHECK_START_REL(L, 0); // "name" mod_table
1028 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.register %s BEGIN\n" INDENT_END, name)); 976 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.register %s BEGIN\n" INDENT_END, name));
1029 DEBUGSPEW_CODE(++U->debugspew_indent_depth); 977 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1030 populate_func_lookup_table(L, -1, name); 978 populate_func_lookup_table(L, -1, name);
1031 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.register %s END\n" INDENT_END, name)); 979 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.register %s END\n" INDENT_END, name));
1032 DEBUGSPEW_CODE(--U->debugspew_indent_depth); 980 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1033 STACK_CHECK(L, 0); 981 STACK_CHECK(L, 0);
1034 return 0; 982 return 0;
1035} 983}
@@ -1076,10 +1024,10 @@ LUAG_FUNC(lane_new)
1076 1024
1077 /* --- Create and prepare the sub state --- */ 1025 /* --- Create and prepare the sub state --- */
1078 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: setup\n" INDENT_END)); 1026 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: setup\n" INDENT_END));
1079 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1027 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1080 1028
1081 // populate with selected libraries at the same time 1029 // populate with selected libraries at the same time
1082 lua_State* const L2{ luaG_newstate(U, L, libs_str) }; // L // L2 1030 lua_State* const L2{ luaG_newstate(U, Source{ L }, libs_str) }; // L // L2
1083 1031
1084 // 'lane' is allocated from heap, not Lua, since its life span may surpass the handle's (if free running thread) 1032 // 'lane' is allocated from heap, not Lua, since its life span may surpass the handle's (if free running thread)
1085 Lane* const lane{ new (U) Lane{ U, L2 } }; 1033 Lane* const lane{ new (U) Lane{ U, L2 } };
@@ -1095,13 +1043,15 @@ LUAG_FUNC(lane_new)
1095 lua_State* const m_L; 1043 lua_State* const m_L;
1096 Lane* m_lane{ nullptr }; 1044 Lane* m_lane{ nullptr };
1097 int const m_gc_cb_idx; 1045 int const m_gc_cb_idx;
1046 DEBUGSPEW_CODE(Universe* const U); // for DEBUGSPEW only (hence the absence of m_ prefix)
1098 1047
1099 public: 1048 public:
1100 1049
1101 OnExit(lua_State* L_, Lane* lane_, int gc_cb_idx_) 1050 OnExit(lua_State* L_, Lane* lane_, int gc_cb_idx_ DEBUGSPEW_COMMA_PARAM(Universe* U_))
1102 : m_L{ L_ } 1051 : m_L{ L_ }
1103 , m_lane{ lane_ } 1052 , m_lane{ lane_ }
1104 , m_gc_cb_idx{ gc_cb_idx_ } 1053 , m_gc_cb_idx{ gc_cb_idx_ }
1054 DEBUGSPEW_COMMA_PARAM(U{ U_ })
1105 {} 1055 {}
1106 1056
1107 ~OnExit() 1057 ~OnExit()
@@ -1113,13 +1063,11 @@ LUAG_FUNC(lane_new)
1113 // leave a single cancel_error on the stack for the caller 1063 // leave a single cancel_error on the stack for the caller
1114 lua_settop(m_lane->L, 0); 1064 lua_settop(m_lane->L, 0);
1115 CANCEL_ERROR.pushKey(m_lane->L); 1065 CANCEL_ERROR.pushKey(m_lane->L);
1116#if THREADWAIT_METHOD == THREADWAIT_CONDVAR 1066 {
1117 MUTEX_LOCK(&m_lane->done_lock); 1067 std::lock_guard lock{ m_lane->m_done_mutex };
1118#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR 1068 m_lane->m_status = Lane::Cancelled;
1119 m_lane->status = CANCELLED; 1069 m_lane->m_done_signal.notify_one(); // wake up master (while 'lane->m_done_mutex' is on)
1120#if THREADWAIT_METHOD == THREADWAIT_CONDVAR 1070 }
1121 MUTEX_UNLOCK(&m_lane->done_lock);
1122#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
1123 // unblock the thread so that it can terminate gracefully 1071 // unblock the thread so that it can terminate gracefully
1124 m_lane->m_ready.count_down(); 1072 m_lane->m_ready.count_down();
1125 } 1073 }
@@ -1162,12 +1110,13 @@ LUAG_FUNC(lane_new)
1162 void success() 1110 void success()
1163 { 1111 {
1164 prepareUserData(); 1112 prepareUserData();
1113 m_lane->m_ready.count_down();
1165 m_lane = nullptr; 1114 m_lane = nullptr;
1166 } 1115 }
1167 } onExit{ L, lane, gc_cb_idx }; 1116 } onExit{ L, lane, gc_cb_idx DEBUGSPEW_COMMA_PARAM(U) };
1168 // launch the thread early, it will sync with a std::latch to parallelize OS thread warmup and L2 preparation 1117 // launch the thread early, it will sync with a std::latch to parallelize OS thread warmup and L2 preparation
1169 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END)); 1118 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END));
1170 THREAD_CREATE(&lane->thread, lane_main, lane, priority); 1119 lane->startThread(priority);
1171 1120
1172 STACK_GROW( L2, nargs + 3); // 1121 STACK_GROW( L2, nargs + 3); //
1173 STACK_CHECK_START_REL(L2, 0); 1122 STACK_CHECK_START_REL(L2, 0);
@@ -1185,7 +1134,8 @@ LUAG_FUNC(lane_new)
1185 { 1134 {
1186 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: update 'package'\n" INDENT_END)); 1135 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: update 'package'\n" INDENT_END));
1187 // when copying with mode LookupMode::LaneBody, should raise an error in case of problem, not leave it one the stack 1136 // when copying with mode LookupMode::LaneBody, should raise an error in case of problem, not leave it one the stack
1188 (void) luaG_inter_copy_package(U, L, L2, package_idx, LookupMode::LaneBody); 1137 [[maybe_unused]] InterCopyResult const ret{ luaG_inter_copy_package(U, Source{ L }, Dest{ L2 }, package_idx, LookupMode::LaneBody) };
1138 ASSERT_L(ret == InterCopyResult::Success); // either all went well, or we should not even get here
1189 } 1139 }
1190 1140
1191 // modules to require in the target lane *before* the function is transfered! 1141 // modules to require in the target lane *before* the function is transfered!
@@ -1193,19 +1143,19 @@ LUAG_FUNC(lane_new)
1193 { 1143 {
1194 int nbRequired = 1; 1144 int nbRequired = 1;
1195 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require 'required' list\n" INDENT_END)); 1145 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require 'required' list\n" INDENT_END));
1196 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1146 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1197 // should not happen, was checked in lanes.lua before calling lane_new() 1147 // should not happen, was checked in lanes.lua before calling lane_new()
1198 if (lua_type(L, required_idx) != LUA_TTABLE) 1148 if (lua_type(L, required_idx) != LUA_TTABLE)
1199 { 1149 {
1200 return luaL_error(L, "expected required module list as a table, got %s", luaL_typename(L, required_idx)); 1150 luaL_error(L, "expected required module list as a table, got %s", luaL_typename(L, required_idx)); // doesn't return
1201 } 1151 }
1202 1152
1203 lua_pushnil(L); // func libs priority globals package required gc_cb [... args ...] nil 1153 lua_pushnil(L); // func libs priority globals package required gc_cb [... args ...] nil
1204 while( lua_next(L, required_idx) != 0) // func libs priority globals package required gc_cb [... args ...] n "modname" 1154 while (lua_next(L, required_idx) != 0) // func libs priority globals package required gc_cb [... args ...] n "modname"
1205 { 1155 {
1206 if (lua_type(L, -1) != LUA_TSTRING || lua_type(L, -2) != LUA_TNUMBER || lua_tonumber(L, -2) != nbRequired) 1156 if (lua_type(L, -1) != LUA_TSTRING || lua_type(L, -2) != LUA_TNUMBER || lua_tonumber(L, -2) != nbRequired)
1207 { 1157 {
1208 return luaL_error(L, "required module list should be a list of strings"); 1158 luaL_error(L, "required module list should be a list of strings"); // doesn't return
1209 } 1159 }
1210 else 1160 else
1211 { 1161 {
@@ -1219,7 +1169,7 @@ LUAG_FUNC(lane_new)
1219 if (lua_isnil( L2, -1)) 1169 if (lua_isnil( L2, -1))
1220 { 1170 {
1221 lua_pop( L2, 1); // 1171 lua_pop( L2, 1); //
1222 return luaL_error(L, "cannot pre-require modules without loading 'package' library first"); 1172 luaL_error(L, "cannot pre-require modules without loading 'package' library first"); // doesn't return
1223 } 1173 }
1224 else 1174 else
1225 { 1175 {
@@ -1227,7 +1177,10 @@ LUAG_FUNC(lane_new)
1227 if (lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode 1177 if (lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode
1228 { 1178 {
1229 // propagate error to main state if any 1179 // propagate error to main state if any
1230 luaG_inter_move(U, L2, L, 1, LookupMode::LaneBody); // func libs priority globals package required gc_cb [... args ...] n "modname" error 1180 std::ignore = luaG_inter_move(U
1181 , Source{ L2 }, Dest{ L }
1182 , 1, LookupMode::LaneBody
1183 ); // func libs priority globals package required gc_cb [... args ...] n "modname" error
1231 raise_lua_error(L); 1184 raise_lua_error(L);
1232 } 1185 }
1233 // after requiring the module, register the functions it exported in our name<->function database 1186 // after requiring the module, register the functions it exported in our name<->function database
@@ -1238,7 +1191,7 @@ LUAG_FUNC(lane_new)
1238 lua_pop(L, 1); // func libs priority globals package required gc_cb [... args ...] n 1191 lua_pop(L, 1); // func libs priority globals package required gc_cb [... args ...] n
1239 ++ nbRequired; 1192 ++ nbRequired;
1240 } // func libs priority globals package required gc_cb [... args ...] 1193 } // func libs priority globals package required gc_cb [... args ...]
1241 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 1194 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1242 } 1195 }
1243 STACK_CHECK(L, 0); 1196 STACK_CHECK(L, 0);
1244 STACK_CHECK(L2, 0); // 1197 STACK_CHECK(L2, 0); //
@@ -1251,49 +1204,54 @@ LUAG_FUNC(lane_new)
1251 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer globals\n" INDENT_END)); 1204 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer globals\n" INDENT_END));
1252 if (!lua_istable(L, globals_idx)) 1205 if (!lua_istable(L, globals_idx))
1253 { 1206 {
1254 return luaL_error(L, "Expected table, got %s", luaL_typename(L, globals_idx)); 1207 luaL_error(L, "Expected table, got %s", luaL_typename(L, globals_idx)); // doesn't return
1255 } 1208 }
1256 1209
1257 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1210 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1258 lua_pushnil(L); // func libs priority globals package required gc_cb [... args ...] nil 1211 lua_pushnil(L); // func libs priority globals package required gc_cb [... args ...] nil
1259 // Lua 5.2 wants us to push the globals table on the stack 1212 // Lua 5.2 wants us to push the globals table on the stack
1260 lua_pushglobaltable(L2); // _G 1213 lua_pushglobaltable(L2); // _G
1261 while( lua_next(L, globals_idx)) // func libs priority globals package required gc_cb [... args ...] k v 1214 while( lua_next(L, globals_idx)) // func libs priority globals package required gc_cb [... args ...] k v
1262 { 1215 {
1263 luaG_inter_copy(U, L, L2, 2, LookupMode::LaneBody); // _G k v 1216 std::ignore = luaG_inter_copy(U, Source{ L }, Dest{ L2 }, 2, LookupMode::LaneBody); // _G k v
1264 // assign it in L2's globals table 1217 // assign it in L2's globals table
1265 lua_rawset(L2, -3); // _G 1218 lua_rawset(L2, -3); // _G
1266 lua_pop(L, 1); // func libs priority globals package required gc_cb [... args ...] k 1219 lua_pop(L, 1); // func libs priority globals package required gc_cb [... args ...] k
1267 } // func libs priority globals package required gc_cb [... args ...] 1220 } // func libs priority globals package required gc_cb [... args ...]
1268 lua_pop( L2, 1); // 1221 lua_pop( L2, 1); //
1269 1222
1270 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 1223 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1271 } 1224 }
1272 STACK_CHECK(L, 0); 1225 STACK_CHECK(L, 0);
1273 STACK_CHECK(L2, 0); 1226 STACK_CHECK(L2, 0);
1274 1227
1275 // Lane main function 1228 // Lane main function
1276 if (lua_type(L, 1) == LUA_TFUNCTION) 1229 LuaType const func_type{ lua_type_as_enum(L, 1) };
1230 if (func_type == LuaType::FUNCTION)
1277 { 1231 {
1278 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane body\n" INDENT_END)); 1232 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane body\n" INDENT_END));
1279 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1233 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1280 lua_pushvalue(L, 1); // func libs priority globals package required gc_cb [... args ...] func 1234 lua_pushvalue(L, 1); // func libs priority globals package required gc_cb [... args ...] func
1281 int const res{ luaG_inter_move(U, L, L2, 1, LookupMode::LaneBody) };// func libs priority globals package required gc_cb [... args ...] // func 1235 InterCopyResult const res{ luaG_inter_move(U, Source{ L }, Dest{ L2 }, 1, LookupMode::LaneBody) }; // func libs priority globals package required gc_cb [... args ...] // func
1282 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 1236 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1283 if (res != 0) 1237 if (res != InterCopyResult::Success)
1284 { 1238 {
1285 return luaL_error(L, "tried to copy unsupported types"); 1239 luaL_error(L, "tried to copy unsupported types"); // doesn't return
1286 } 1240 }
1287 } 1241 }
1288 else if (lua_type(L, 1) == LUA_TSTRING) 1242 else if (func_type == LuaType::STRING)
1289 { 1243 {
1290 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: compile lane body\n" INDENT_END)); 1244 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: compile lane body\n" INDENT_END));
1291 // compile the string 1245 // compile the string
1292 if (luaL_loadstring(L2, lua_tostring(L, 1)) != 0) // func 1246 if (luaL_loadstring(L2, lua_tostring(L, 1)) != 0) // func
1293 { 1247 {
1294 return luaL_error(L, "error when parsing lane function code"); 1248 luaL_error(L, "error when parsing lane function code"); // doesn't return
1295 } 1249 }
1296 } 1250 }
1251 else
1252 {
1253 luaL_error(L, "Expected function, got %s", lua_typename(L, func_type)); // doesn't return
1254 }
1297 STACK_CHECK(L, 0); 1255 STACK_CHECK(L, 0);
1298 STACK_CHECK(L2, 1); 1256 STACK_CHECK(L2, 1);
1299 ASSERT_L(lua_isfunction(L2, 1)); 1257 ASSERT_L(lua_isfunction(L2, 1));
@@ -1301,14 +1259,13 @@ LUAG_FUNC(lane_new)
1301 // revive arguments 1259 // revive arguments
1302 if (nargs > 0) 1260 if (nargs > 0)
1303 { 1261 {
1304 int res; 1262 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane arguments\n" INDENT_END));
1305 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane arguments\n" INDENT_END)); 1263 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1306 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1264 InterCopyResult const res{ luaG_inter_move(U, Source{ L }, Dest{ L2 }, nargs, LookupMode::LaneBody) }; // func libs priority globals package required gc_cb // func [... args ...]
1307 res = luaG_inter_move(U, L, L2, nargs, LookupMode::LaneBody); // func libs priority globals package required gc_cb // func [... args ...] 1265 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1308 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 1266 if (res != InterCopyResult::Success)
1309 if (res != 0)
1310 { 1267 {
1311 return luaL_error(L, "tried to copy unsupported types"); 1268 luaL_error(L, "tried to copy unsupported types"); // doesn't return
1312 } 1269 }
1313 } 1270 }
1314 STACK_CHECK(L, -nargs); 1271 STACK_CHECK(L, -nargs);
@@ -1323,8 +1280,7 @@ LUAG_FUNC(lane_new)
1323 onExit.success(); 1280 onExit.success();
1324 // we should have the lane userdata on top of the stack 1281 // we should have the lane userdata on top of the stack
1325 STACK_CHECK(L, 1); 1282 STACK_CHECK(L, 1);
1326 lane->m_ready.count_down(); 1283 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1327 DEBUGSPEW_CODE(--U->debugspew_indent_depth);
1328 return 1; 1284 return 1;
1329} 1285}
1330 1286
@@ -1342,10 +1298,10 @@ LUAG_FUNC(lane_new)
1342// and the issue of canceling/killing threads at gc is not very nice, either 1298// and the issue of canceling/killing threads at gc is not very nice, either
1343// (would easily cause waits at gc cycle, which we don't want). 1299// (would easily cause waits at gc cycle, which we don't want).
1344// 1300//
1345static int lane_gc(lua_State* L) 1301[[nodiscard]] static int lane_gc(lua_State* L)
1346{ 1302{
1347 bool have_gc_cb{ false }; 1303 bool have_gc_cb{ false };
1348 Lane* lane{ lua_toLane(L, 1) }; // ud 1304 Lane* const lane{ lua_toLane(L, 1) }; // ud
1349 1305
1350 // if there a gc callback? 1306 // if there a gc callback?
1351 lua_getiuservalue(L, 1, 1); // ud uservalue 1307 lua_getiuservalue(L, 1, 1); // ud uservalue
@@ -1363,30 +1319,7 @@ static int lane_gc(lua_State* L)
1363 } 1319 }
1364 1320
1365 // We can read 'lane->status' without locks, but not wait for it 1321 // We can read 'lane->status' without locks, but not wait for it
1366 // test Killed state first, as it doesn't need to enter the selfdestruct chain 1322 if (lane->m_status < Lane::Done)
1367 if (lane->mstatus == Lane::Killed)
1368 {
1369 // Make sure a kill has proceeded, before cleaning up the data structure.
1370 //
1371 // NO lua_close() in this case because we don't know where execution of the state was interrupted
1372 DEBUGSPEW_CODE(fprintf(stderr, "** Joining with a killed thread (needs testing) **"));
1373 // make sure the thread is no longer running, just like thread_join()
1374 if (!THREAD_ISNULL(lane->thread))
1375 {
1376 THREAD_WAIT(&lane->thread, -1, &lane->done_signal, &lane->done_lock, &lane->status);
1377 }
1378 if (lane->status >= DONE && lane->L)
1379 {
1380 // we know the thread was killed while the Lua VM was not doing anything: we should be able to close it without crashing
1381 // now, thread_cancel() will not forcefully kill a lane with lane->status >= DONE, so I am not sure it can ever happen
1382 lua_close(lane->L);
1383 lane->L = nullptr;
1384 // just in case, but s will be freed soon so...
1385 lane->debug_name = "<gc>";
1386 }
1387 DEBUGSPEW_CODE(fprintf(stderr, "** Joined ok **"));
1388 }
1389 else if (lane->status < DONE)
1390 { 1323 {
1391 // still running: will have to be cleaned up later 1324 // still running: will have to be cleaned up later
1392 selfdestruct_add(lane); 1325 selfdestruct_add(lane);
@@ -1431,29 +1364,27 @@ static int lane_gc(lua_State* L)
1431// / "error" finished at an error, error value is there 1364// / "error" finished at an error, error value is there
1432// / "cancelled" execution cancelled by M (state gone) 1365// / "cancelled" execution cancelled by M (state gone)
1433// 1366//
1434static char const * thread_status_string(Lane* lane_) 1367[[nodiscard]] static char const* thread_status_string(Lane* lane_)
1435{ 1368{
1436 enum e_status const st{ lane_->status }; // read just once (volatile) 1369 Lane::Status const st{ lane_->m_status }; // read just once (volatile)
1437 char const* str = 1370 char const* str =
1438 (lane_->mstatus == Lane::Killed) ? "killed" : // new to v3.3.0! 1371 (st == Lane::Pending) ? "pending" :
1439 (st == PENDING) ? "pending" : 1372 (st == Lane::Running) ? "running" : // like in 'co.status()'
1440 (st == RUNNING) ? "running" : // like in 'co.status()' 1373 (st == Lane::Waiting) ? "waiting" :
1441 (st == WAITING) ? "waiting" : 1374 (st == Lane::Done) ? "done" :
1442 (st == DONE) ? "done" : 1375 (st == Lane::Error) ? "error" :
1443 (st == ERROR_ST) ? "error" : 1376 (st == Lane::Cancelled) ? "cancelled" : nullptr;
1444 (st == CANCELLED) ? "cancelled" : nullptr;
1445 return str; 1377 return str;
1446} 1378}
1447 1379
1448// ################################################################################################# 1380// #################################################################################################
1449 1381
1450int push_thread_status(lua_State* L, Lane* lane_) 1382void push_thread_status(lua_State* L, Lane* lane_)
1451{ 1383{
1452 char const* const str{ thread_status_string(lane_) }; 1384 char const* const str{ thread_status_string(lane_) };
1453 ASSERT_L(str); 1385 ASSERT_L(str);
1454 1386
1455 lua_pushstring(L, str); 1387 std::ignore = lua_pushstring(L, str);
1456 return 1;
1457} 1388}
1458 1389
1459// ################################################################################################# 1390// #################################################################################################
@@ -1469,9 +1400,10 @@ int push_thread_status(lua_State* L, Lane* lane_)
1469LUAG_FUNC(thread_join) 1400LUAG_FUNC(thread_join)
1470{ 1401{
1471 Lane* const lane{ lua_toLane(L, 1) }; 1402 Lane* const lane{ lua_toLane(L, 1) };
1472 lua_Number const wait_secs{ luaL_optnumber(L, 2, -1.0) }; 1403 lua_Duration const duration{ luaL_optnumber(L, 2, -1.0) };
1473 lua_State* const L2{ lane->L }; 1404 lua_State* const L2{ lane->L };
1474 bool const done{ THREAD_ISNULL(lane->thread) || THREAD_WAIT(&lane->thread, wait_secs, &lane->done_signal, &lane->done_lock, &lane->status) }; 1405
1406 bool const done{ !lane->m_thread.joinable() || lane->waitForCompletion(duration) };
1475 if (!done || !L2) 1407 if (!done || !L2)
1476 { 1408 {
1477 STACK_GROW(L, 2); 1409 STACK_GROW(L, 2);
@@ -1481,61 +1413,50 @@ LUAG_FUNC(thread_join)
1481 } 1413 }
1482 1414
1483 STACK_CHECK_START_REL(L, 0); 1415 STACK_CHECK_START_REL(L, 0);
1484 // Thread is DONE/ERROR_ST/CANCELLED; all ours now 1416 // Thread is Done/Error/Cancelled; all ours now
1485 1417
1486 int ret{ 0 }; 1418 int ret{ 0 };
1487 if (lane->mstatus == Lane::Killed) // OS thread was killed if thread_cancel was forced 1419 Universe* const U{ lane->U };
1488 { 1420 // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed
1489 // in that case, even if the thread was killed while DONE/ERROR_ST/CANCELLED, ignore regular return values 1421 // so store it in the userdata uservalue at a key that can't possibly collide
1490 STACK_GROW(L, 2); 1422 securize_debug_threadname(L, lane);
1491 lua_pushnil(L); 1423 switch (lane->m_status)
1492 lua_pushliteral(L, "killed");
1493 ret = 2;
1494 }
1495 else
1496 { 1424 {
1497 Universe* const U{ lane->U }; 1425 case Lane::Done:
1498 // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed
1499 // so store it in the userdata uservalue at a key that can't possibly collide
1500 securize_debug_threadname(L, lane);
1501 switch (lane->status)
1502 { 1426 {
1503 case DONE: 1427 int const n{ lua_gettop(L2) }; // whole L2 stack
1428 if ((n > 0) && (luaG_inter_move(U, Source{ L2 }, Dest{ L }, n, LookupMode::LaneBody) != InterCopyResult::Success))
1504 { 1429 {
1505 int const n{ lua_gettop(L2) }; // whole L2 stack 1430 luaL_error(L, "tried to copy unsupported types"); // doesn't return
1506 if ((n > 0) && (luaG_inter_move(U, L2, L, n, LookupMode::LaneBody) != 0))
1507 {
1508 return luaL_error(L, "tried to copy unsupported types");
1509 }
1510 ret = n;
1511 } 1431 }
1512 break; 1432 ret = n;
1433 }
1434 break;
1513 1435
1514 case ERROR_ST: 1436 case Lane::Error:
1437 {
1438 int const n{ lua_gettop(L2) };
1439 STACK_GROW(L, 3);
1440 lua_pushnil(L);
1441 // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ...
1442 if (luaG_inter_move(U, Source{ L2 }, Dest{ L }, n, LookupMode::LaneBody) != InterCopyResult::Success) // nil "err" [trace]
1515 { 1443 {
1516 int const n{ lua_gettop(L2) }; 1444 luaL_error(L, "tried to copy unsupported types: %s", lua_tostring(L, -n)); // doesn't return
1517 STACK_GROW(L, 3);
1518 lua_pushnil(L);
1519 // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ...
1520 if (luaG_inter_move(U, L2, L, n, LookupMode::LaneBody) != 0) // nil "err" [trace]
1521 {
1522 return luaL_error(L, "tried to copy unsupported types: %s", lua_tostring(L, -n));
1523 }
1524 ret = 1 + n;
1525 } 1445 }
1526 break; 1446 ret = 1 + n;
1447 }
1448 break;
1527 1449
1528 case CANCELLED: 1450 case Lane::Cancelled:
1529 ret = 0; 1451 ret = 0;
1530 break; 1452 break;
1531 1453
1532 default: 1454 default:
1533 DEBUGSPEW_CODE(fprintf(stderr, "Status: %d\n", lane->status)); 1455 DEBUGSPEW_CODE(fprintf(stderr, "Status: %d\n", lane->m_status));
1534 ASSERT_L(false); 1456 ASSERT_L(false);
1535 ret = 0; 1457 ret = 0;
1536 }
1537 lua_close(L2);
1538 } 1458 }
1459 lua_close(L2);
1539 lane->L = nullptr; 1460 lane->L = nullptr;
1540 STACK_CHECK(L, ret); 1461 STACK_CHECK(L, ret);
1541 return ret; 1462 return ret;
@@ -1591,21 +1512,18 @@ LUAG_FUNC(thread_index)
1591 lua_pushcfunction(L, LG_thread_join); 1512 lua_pushcfunction(L, LG_thread_join);
1592 lua_pushvalue(L, UD); 1513 lua_pushvalue(L, UD);
1593 lua_call(L, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+ 1514 lua_call(L, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+
1594 switch (lane->status) 1515 switch (lane->m_status)
1595 { 1516 {
1596 default: 1517 default:
1597 if (lane->mstatus != Lane::Killed) 1518 // this is an internal error, we probably never get here
1598 { 1519 lua_settop(L, 0);
1599 // this is an internal error, we probably never get here 1520 lua_pushliteral(L, "Unexpected status: ");
1600 lua_settop(L, 0); 1521 lua_pushstring(L, thread_status_string(lane));
1601 lua_pushliteral(L, "Unexpected status: "); 1522 lua_concat(L, 2);
1602 lua_pushstring(L, thread_status_string(lane)); 1523 raise_lua_error(L);
1603 lua_concat(L, 2);
1604 raise_lua_error(L);
1605 }
1606 [[fallthrough]]; // fall through if we are killed, as we got nil, "killed" on the stack 1524 [[fallthrough]]; // fall through if we are killed, as we got nil, "killed" on the stack
1607 1525
1608 case DONE: // got regular return values 1526 case Lane::Done: // got regular return values
1609 { 1527 {
1610 int const nvalues{ lua_gettop(L) - 3 }; 1528 int const nvalues{ lua_gettop(L) - 3 };
1611 for (int i = nvalues; i > 0; --i) 1529 for (int i = nvalues; i > 0; --i)
@@ -1616,7 +1534,7 @@ LUAG_FUNC(thread_index)
1616 } 1534 }
1617 break; 1535 break;
1618 1536
1619 case ERROR_ST: // got 3 values: nil, errstring, callstack table 1537 case Lane::Error: // got 3 values: nil, errstring, callstack table
1620 // me[-2] could carry the stack table, but even 1538 // me[-2] could carry the stack table, but even
1621 // me[-1] is rather unnecessary (and undocumented); 1539 // me[-1] is rather unnecessary (and undocumented);
1622 // use ':join()' instead. --AKa 22-Jan-2009 1540 // use ':join()' instead. --AKa 22-Jan-2009
@@ -1627,7 +1545,7 @@ LUAG_FUNC(thread_index)
1627 lua_rawset(L, USR); 1545 lua_rawset(L, USR);
1628 break; 1546 break;
1629 1547
1630 case CANCELLED: 1548 case Lane::Cancelled:
1631 // do nothing 1549 // do nothing
1632 break; 1550 break;
1633 } 1551 }
@@ -1669,11 +1587,12 @@ LUAG_FUNC(thread_index)
1669 } 1587 }
1670 if (lua_type(L, KEY) == LUA_TSTRING) 1588 if (lua_type(L, KEY) == LUA_TSTRING)
1671 { 1589 {
1672 char const * const keystr = lua_tostring(L, KEY); 1590 char const* const keystr{ lua_tostring(L, KEY) };
1673 lua_settop(L, 2); // keep only our original arguments on the stack 1591 lua_settop(L, 2); // keep only our original arguments on the stack
1674 if (strcmp( keystr, "status") == 0) 1592 if (strcmp( keystr, "status") == 0)
1675 { 1593 {
1676 return push_thread_status(L, lane); // push the string representing the status 1594 push_thread_status(L, lane); // push the string representing the status
1595 return 1;
1677 } 1596 }
1678 // return UD.metatable[key] 1597 // return UD.metatable[key]
1679 lua_getmetatable(L, UD); // UD KEY mt 1598 lua_getmetatable(L, UD); // UD KEY mt
@@ -1713,20 +1632,20 @@ LUAG_FUNC(threads)
1713 { 1632 {
1714 Lane* lane{ U->tracking_first }; 1633 Lane* lane{ U->tracking_first };
1715 int index = 0; 1634 int index = 0;
1716 lua_newtable(L); // {} 1635 lua_newtable(L); // {}
1717 while (lane != TRACKING_END) 1636 while (lane != TRACKING_END)
1718 { 1637 {
1719 // insert a { name, status } tuple, so that several lanes with the same name can't clobber each other 1638 // insert a { name, status } tuple, so that several lanes with the same name can't clobber each other
1720 lua_newtable(L); // {} {} 1639 lua_newtable(L); // {} {}
1721 lua_pushstring(L, lane->debug_name); // {} {} "name" 1640 lua_pushstring(L, lane->debug_name); // {} {} "name"
1722 lua_setfield(L, -2, "name"); // {} {} 1641 lua_setfield(L, -2, "name"); // {} {}
1723 push_thread_status(L, lane); // {} {} "status" 1642 push_thread_status(L, lane); // {} {} "status"
1724 lua_setfield(L, -2, "status"); // {} {} 1643 lua_setfield(L, -2, "status"); // {} {}
1725 lua_rawseti(L, -2, ++index); // {} 1644 lua_rawseti(L, -2, ++index); // {}
1726 lane = lane->tracking_next; 1645 lane = lane->tracking_next;
1727 } 1646 }
1728 } 1647 }
1729 return lua_gettop(L) - top; // 0 or 1 1648 return lua_gettop(L) - top; // 0 or 1
1730} 1649}
1731#endif // HAVE_LANE_TRACKING() 1650#endif // HAVE_LANE_TRACKING()
1732 1651
@@ -1737,13 +1656,17 @@ LUAG_FUNC(threads)
1737 */ 1656 */
1738 1657
1739/* 1658/*
1740* secs= now_secs() 1659* secs = now_secs()
1741* 1660*
1742* Returns the current time, as seconds (millisecond resolution). 1661* Returns the current time, as seconds. Resolution depends on std::system_clock implementation
1662* Can't use std::chrono::steady_clock because we need the same baseline as std::mktime
1743*/ 1663*/
1744LUAG_FUNC(now_secs) 1664LUAG_FUNC(now_secs)
1745{ 1665{
1746 lua_pushnumber(L, now_secs()); 1666 auto const now{ std::chrono::system_clock::now() };
1667 lua_Duration duration { now.time_since_epoch() };
1668
1669 lua_pushnumber(L, duration.count());
1747 return 1; 1670 return 1;
1748} 1671}
1749 1672
@@ -1788,8 +1711,7 @@ LUAG_FUNC(wakeup_conv)
1788 lua_pop(L,1); 1711 lua_pop(L,1);
1789 STACK_CHECK(L, 0); 1712 STACK_CHECK(L, 0);
1790 1713
1791 struct tm t; 1714 std::tm t{};
1792 memset(&t, 0, sizeof(t));
1793 t.tm_year = year - 1900; 1715 t.tm_year = year - 1900;
1794 t.tm_mon= month-1; // 0..11 1716 t.tm_mon= month-1; // 0..11
1795 t.tm_mday= day; // 1..31 1717 t.tm_mday= day; // 1..31
@@ -1798,7 +1720,7 @@ LUAG_FUNC(wakeup_conv)
1798 t.tm_sec= sec; // 0..60 1720 t.tm_sec= sec; // 0..60
1799 t.tm_isdst= isdst; // 0/1/negative 1721 t.tm_isdst= isdst; // 0/1/negative
1800 1722
1801 lua_pushnumber(L, static_cast<lua_Number>(mktime(&t))); // ms=0 1723 lua_pushnumber(L, static_cast<lua_Number>(std::mktime(&t))); // resolution: 1 second
1802 return 1; 1724 return 1;
1803} 1725}
1804 1726
@@ -1809,7 +1731,7 @@ LUAG_FUNC(wakeup_conv)
1809 */ 1731 */
1810 1732
1811extern int LG_linda(lua_State* L); 1733extern int LG_linda(lua_State* L);
1812static const struct luaL_Reg lanes_functions[] = 1734static struct luaL_Reg const lanes_functions[] =
1813{ 1735{
1814 { "linda", LG_linda }, 1736 { "linda", LG_linda },
1815 { "now_secs", LG_now_secs }, 1737 { "now_secs", LG_now_secs },
@@ -1822,116 +1744,46 @@ static const struct luaL_Reg lanes_functions[] =
1822 { nullptr, nullptr } 1744 { nullptr, nullptr }
1823}; 1745};
1824 1746
1825/*
1826 * One-time initializations
1827 * settings table it at position 1 on the stack
1828 * pushes an error string on the stack in case of problem
1829 */
1830static void init_once_LOCKED( void)
1831{
1832#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC)
1833 now_secs(); // initialize 'now_secs()' internal offset
1834#endif
1835
1836#if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU)
1837 chudInitialize();
1838#endif
1839
1840 //---
1841 // Linux needs SCHED_RR to change thread priorities, and that is only
1842 // allowed for sudo'ers. SCHED_OTHER (default) has no priorities.
1843 // SCHED_OTHER threads are always lower priority than SCHED_RR.
1844 //
1845 // ^-- those apply to 2.6 kernel. IF **wishful thinking** these
1846 // constraints will change in the future, non-sudo priorities can
1847 // be enabled also for Linux.
1848 //
1849#ifdef PLATFORM_LINUX
1850 sudo = (geteuid() == 0); // we are root?
1851
1852 // If lower priorities (-2..-1) are wanted, we need to lift the main
1853 // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below
1854 // the launched threads (even -2).
1855 //
1856#ifdef LINUX_SCHED_RR
1857 if (sudo)
1858 {
1859 struct sched_param sp;
1860 sp.sched_priority = _PRIO_0;
1861 PT_CALL(pthread_setschedparam(pthread_self(), SCHED_RR, &sp));
1862 }
1863#endif // LINUX_SCHED_RR
1864#endif // PLATFORM_LINUX
1865}
1866
1867// ################################################################################################# 1747// #################################################################################################
1868 1748
1869static volatile long s_initCount = 0;
1870
1871// upvalue 1: module name 1749// upvalue 1: module name
1872// upvalue 2: module table 1750// upvalue 2: module table
1873// param 1: settings table 1751// param 1: settings table
1874LUAG_FUNC(configure) 1752LUAG_FUNC(configure)
1875{ 1753{
1876 Universe* U = universe_get(L); 1754 // start with one-time initializations.
1877 bool const from_master_state{ U == nullptr };
1878 char const* name = luaL_checkstring(L, lua_upvalueindex(1));
1879 ASSERT_L(lua_type(L, 1) == LUA_TTABLE);
1880
1881 /*
1882 ** Making one-time initializations.
1883 **
1884 ** When the host application is single-threaded (and all threading happens via Lanes)
1885 ** there is no problem. But if the host is multithreaded, we need to lock around the
1886 ** initializations.
1887 */
1888#if THREADAPI == THREADAPI_WINDOWS
1889 { 1755 {
1890 static volatile int /*bool*/ go_ahead; // = 0 1756 // C++ guarantees that the static variable initialization is threadsafe.
1891 if (InterlockedCompareExchange(&s_initCount, 1, 0) == 0) 1757 static auto _ = std::invoke(
1892 { 1758 []()
1893 init_once_LOCKED();
1894 go_ahead = 1; // let others pass
1895 }
1896 else
1897 {
1898 while (!go_ahead)
1899 {
1900 Sleep(1);
1901 } // changes threads
1902 }
1903 }
1904#else // THREADAPI == THREADAPI_PTHREAD
1905 if (s_initCount == 0)
1906 {
1907 static pthread_mutex_t my_lock = PTHREAD_MUTEX_INITIALIZER;
1908 pthread_mutex_lock(&my_lock);
1909 {
1910 // Recheck now that we're within the lock
1911 //
1912 if (s_initCount == 0)
1913 { 1759 {
1914 init_once_LOCKED(); 1760#if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU)
1915 s_initCount = 1; 1761 chudInitialize();
1762#endif
1763 return false;
1916 } 1764 }
1917 } 1765 );
1918 pthread_mutex_unlock(&my_lock);
1919 } 1766 }
1920#endif // THREADAPI == THREADAPI_PTHREAD 1767
1768 Universe* U = universe_get(L);
1769 bool const from_master_state{ U == nullptr };
1770 char const* name = luaL_checkstring(L, lua_upvalueindex(1));
1771 ASSERT_L(lua_type(L, 1) == LUA_TTABLE);
1921 1772
1922 STACK_GROW(L, 4); 1773 STACK_GROW(L, 4);
1923 STACK_CHECK_START_ABS(L, 1); // settings 1774 STACK_CHECK_START_ABS(L, 1); // settings
1924 1775
1925 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); 1776 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L));
1926 DEBUGSPEW_CODE( if (U) ++ U->debugspew_indent_depth); 1777 DEBUGSPEW_CODE(if (U) U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1927 1778
1928 if(U == nullptr) 1779 if (U == nullptr)
1929 { 1780 {
1930 U = universe_create( L); // settings universe 1781 U = universe_create(L); // settings universe
1931 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1782 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1932 lua_newtable( L); // settings universe mt 1783 lua_newtable( L); // settings universe mt
1933 lua_getfield(L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout 1784 lua_getfield(L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout
1934 lua_pushcclosure(L, universe_gc, 1); // settings universe mt universe_gc 1785 lua_getfield(L, 1, "shutdown_mode"); // settings universe mt shutdown_timeout shutdown_mode
1786 lua_pushcclosure(L, universe_gc, 2); // settings universe mt universe_gc
1935 lua_setfield(L, -2, "__gc"); // settings universe mt 1787 lua_setfield(L, -2, "__gc"); // settings universe mt
1936 lua_setmetatable(L, -2); // settings universe 1788 lua_setmetatable(L, -2); // settings universe
1937 lua_pop(L, 1); // settings 1789 lua_pop(L, 1); // settings
@@ -1988,7 +1840,7 @@ LUAG_FUNC(configure)
1988 STACK_CHECK(L, 2); 1840 STACK_CHECK(L, 2);
1989 1841
1990 { 1842 {
1991 char const* errmsg{ push_deep_proxy(L, U->timer_deep, 0, LookupMode::LaneBody) }; // settings M timer_deep 1843 char const* errmsg{ push_deep_proxy(Dest{ L }, U->timer_deep, 0, LookupMode::LaneBody) }; // settings M timer_deep
1992 if (errmsg != nullptr) 1844 if (errmsg != nullptr)
1993 { 1845 {
1994 return luaL_error(L, errmsg); 1846 return luaL_error(L, errmsg);
@@ -2070,7 +1922,7 @@ LUAG_FUNC(configure)
2070 CONFIG_REGKEY.setValue(L, [](lua_State* L) { lua_pushvalue(L, -2); }); 1922 CONFIG_REGKEY.setValue(L, [](lua_State* L) { lua_pushvalue(L, -2); });
2071 STACK_CHECK(L, 1); 1923 STACK_CHECK(L, 1);
2072 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L)); 1924 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L));
2073 DEBUGSPEW_CODE(--U->debugspew_indent_depth); 1925 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
2074 // Return the settings table 1926 // Return the settings table
2075 return 1; 1927 return 1;
2076} 1928}
@@ -2178,9 +2030,9 @@ LANES_API int luaopen_lanes_core( lua_State* L)
2178 return 1; 2030 return 1;
2179} 2031}
2180 2032
2181static int default_luaopen_lanes( lua_State* L) 2033[[nodiscard]] static int default_luaopen_lanes(lua_State* L)
2182{ 2034{
2183 int rc = luaL_loadfile(L, "lanes.lua") || lua_pcall(L, 0, 1, 0); 2035 int const rc{ luaL_loadfile(L, "lanes.lua") || lua_pcall(L, 0, 1, 0) };
2184 if (rc != LUA_OK) 2036 if (rc != LUA_OK)
2185 { 2037 {
2186 return luaL_error(L, "failed to initialize embedded Lanes"); 2038 return luaL_error(L, "failed to initialize embedded Lanes");
diff --git a/src/lanes.h b/src/lanes.h
index 05a0a5c..bc8de55 100644
--- a/src/lanes.h
+++ b/src/lanes.h
@@ -20,7 +20,7 @@ extern "C" {
20#define LANES_VERSION_GREATER_THAN(MAJOR, MINOR, PATCH) ((LANES_VERSION_MAJOR>MAJOR) || (LANES_VERSION_MAJOR==MAJOR && (LANES_VERSION_MINOR>MINOR || (LANES_VERSION_MINOR==MINOR && LANES_VERSION_PATCH>PATCH)))) 20#define LANES_VERSION_GREATER_THAN(MAJOR, MINOR, PATCH) ((LANES_VERSION_MAJOR>MAJOR) || (LANES_VERSION_MAJOR==MAJOR && (LANES_VERSION_MINOR>MINOR || (LANES_VERSION_MINOR==MINOR && LANES_VERSION_PATCH>PATCH))))
21#define LANES_VERSION_GREATER_OR_EQUAL(MAJOR, MINOR, PATCH) ((LANES_VERSION_MAJOR>MAJOR) || (LANES_VERSION_MAJOR==MAJOR && (LANES_VERSION_MINOR>MINOR || (LANES_VERSION_MINOR==MINOR && LANES_VERSION_PATCH>=PATCH)))) 21#define LANES_VERSION_GREATER_OR_EQUAL(MAJOR, MINOR, PATCH) ((LANES_VERSION_MAJOR>MAJOR) || (LANES_VERSION_MAJOR==MAJOR && (LANES_VERSION_MINOR>MINOR || (LANES_VERSION_MINOR==MINOR && LANES_VERSION_PATCH>=PATCH))))
22 22
23LANES_API int luaopen_lanes_core(lua_State* L); 23LANES_API [[nodiscard]] int luaopen_lanes_core(lua_State* L);
24 24
25// Call this to work with embedded Lanes instead of calling luaopen_lanes_core() 25// Call this to work with embedded Lanes instead of calling luaopen_lanes_core()
26LANES_API void luaopen_lanes_embedded(lua_State* L, lua_CFunction _luaopen_lanes); 26LANES_API void luaopen_lanes_embedded(lua_State* L, lua_CFunction _luaopen_lanes);
diff --git a/src/lanes.lua b/src/lanes.lua
index b4c0070..fd3d22b 100644
--- a/src/lanes.lua
+++ b/src/lanes.lua
@@ -70,8 +70,10 @@ lanes.configure = function( settings_)
70 local default_params = 70 local default_params =
71 { 71 {
72 nb_keepers = 1, 72 nb_keepers = 1,
73 keepers_gc_threshold = -1,
73 on_state_create = nil, 74 on_state_create = nil,
74 shutdown_timeout = 0.25, 75 shutdown_timeout = 0.25,
76 shutdown_mode = "hard",
75 with_timers = true, 77 with_timers = true,
76 track_lanes = false, 78 track_lanes = false,
77 demote_full_userdata = nil, 79 demote_full_userdata = nil,
@@ -91,6 +93,10 @@ lanes.configure = function( settings_)
91 -- nb_keepers should be a number > 0 93 -- nb_keepers should be a number > 0
92 return type( val_) == "number" and val_ > 0 94 return type( val_) == "number" and val_ > 0
93 end, 95 end,
96 keepers_gc_threshold = function( val_)
97 -- keepers_gc_threshold should be a number
98 return type( val_) == "number"
99 end,
94 with_timers = boolean_param_checker, 100 with_timers = boolean_param_checker,
95 allocator = function( val_) 101 allocator = function( val_)
96 -- can be nil, "protected", or a function 102 -- can be nil, "protected", or a function
@@ -108,6 +114,11 @@ lanes.configure = function( settings_)
108 -- shutdown_timeout should be a number >= 0 114 -- shutdown_timeout should be a number >= 0
109 return type( val_) == "number" and val_ >= 0 115 return type( val_) == "number" and val_ >= 0
110 end, 116 end,
117 shutdown_mode = function( val_)
118 local valid_hooks = { soft = true, hard = true, call = true, ret = true, line = true, count = true }
119 -- shutdown_mode should be a known hook mask
120 return valid_hooks[val_]
121 end,
111 track_lanes = boolean_param_checker, 122 track_lanes = boolean_param_checker,
112 demote_full_userdata = boolean_param_checker, 123 demote_full_userdata = boolean_param_checker,
113 verbose_errors = boolean_param_checker 124 verbose_errors = boolean_param_checker
@@ -362,262 +373,263 @@ lanes.configure = function( settings_)
362 373
363 374
364 if settings.with_timers ~= false then 375 if settings.with_timers ~= false then
376 --
377 -- On first 'require "lanes"', a timer lane is spawned that will maintain
378 -- timer tables and sleep in between the timer events. All interaction with
379 -- the timer lane happens via a 'timer_gateway' Linda, which is common to
380 -- all that 'require "lanes"'.
381 --
382 -- Linda protocol to timer lane:
383 --
384 -- TGW_KEY: linda_h, key, [wakeup_at_secs], [repeat_secs]
385 --
386 local TGW_KEY= "(timer control)" -- the key does not matter, a 'weird' key may help debugging
387 local TGW_QUERY, TGW_REPLY = "(timer query)", "(timer reply)"
388 local first_time_key= "first time"
365 389
366 -- 390 local first_time = timer_gateway:get( first_time_key) == nil
367 -- On first 'require "lanes"', a timer lane is spawned that will maintain 391 timer_gateway:set( first_time_key, true)
368 -- timer tables and sleep in between the timer events. All interaction with
369 -- the timer lane happens via a 'timer_gateway' Linda, which is common to
370 -- all that 'require "lanes"'.
371 --
372 -- Linda protocol to timer lane:
373 --
374 -- TGW_KEY: linda_h, key, [wakeup_at_secs], [repeat_secs]
375 --
376 local TGW_KEY= "(timer control)" -- the key does not matter, a 'weird' key may help debugging
377 local TGW_QUERY, TGW_REPLY = "(timer query)", "(timer reply)"
378 local first_time_key= "first time"
379
380 local first_time = timer_gateway:get( first_time_key) == nil
381 timer_gateway:set( first_time_key, true)
382
383 --
384 -- Timer lane; initialize only on the first 'require "lanes"' instance (which naturally
385 -- has 'table' always declared)
386 --
387 if first_time then
388 392
389 local now_secs = core.now_secs 393 local now_secs = core.now_secs
390 assert( type( now_secs) == "function") 394 local wakeup_conv = core.wakeup_conv
391 ----- 395
392 -- Snore loop (run as a lane on the background)
393 --
394 -- High priority, to get trustworthy timings.
395 -- 396 --
396 -- We let the timer lane be a "free running" thread; no handle to it 397 -- Timer lane; initialize only on the first 'require "lanes"' instance (which naturally
397 -- remains. 398 -- has 'table' always declared)
398 -- 399 --
399 local timer_body = function() 400 if first_time then
400 set_debug_threadname( "LanesTimer") 401
401 -- 402 assert( type( now_secs) == "function")
402 -- { [deep_linda_lightuserdata]= { [deep_linda_lightuserdata]=linda_h, 403 -----
403 -- [key]= { wakeup_secs [,period_secs] } [, ...] }, 404 -- Snore loop (run as a lane on the background)
404 -- }
405 --
406 -- Collection of all running timers, indexed with linda's & key.
407 -- 405 --
408 -- Note that we need to use the deep lightuserdata identifiers, instead 406 -- High priority, to get trustworthy timings.
409 -- of 'linda_h' themselves as table indices. Otherwise, we'd get multiple
410 -- entries for the same timer.
411 -- 407 --
412 -- The 'hidden' reference to Linda proxy is used in 'check_timers()' but 408 -- We let the timer lane be a "free running" thread; no handle to it
413 -- also important to keep the Linda alive, even if all outside world threw 409 -- remains.
414 -- away pointers to it (which would ruin uniqueness of the deep pointer).
415 -- Now we're safe.
416 -- 410 --
417 local collection = {} 411 local timer_body = function()
418 local table_insert = assert( table.insert) 412 set_debug_threadname( "LanesTimer")
419 413 --
420 local get_timers = function() 414 -- { [deep_linda_lightuserdata]= { [deep_linda_lightuserdata]=linda_h,
421 local r = {} 415 -- [key]= { wakeup_secs [,period_secs] } [, ...] },
422 for deep, t in pairs( collection) do 416 -- }
423 -- WR( tostring( deep)) 417 --
424 local l = t[deep] 418 -- Collection of all running timers, indexed with linda's & key.
425 for key, timer_data in pairs( t) do 419 --
426 if key ~= deep then 420 -- Note that we need to use the deep lightuserdata identifiers, instead
427 table_insert( r, {l, key, timer_data}) 421 -- of 'linda_h' themselves as table indices. Otherwise, we'd get multiple
422 -- entries for the same timer.
423 --
424 -- The 'hidden' reference to Linda proxy is used in 'check_timers()' but
425 -- also important to keep the Linda alive, even if all outside world threw
426 -- away pointers to it (which would ruin uniqueness of the deep pointer).
427 -- Now we're safe.
428 --
429 local collection = {}
430 local table_insert = assert( table.insert)
431
432 local get_timers = function()
433 local r = {}
434 for deep, t in pairs( collection) do
435 -- WR( tostring( deep))
436 local l = t[deep]
437 for key, timer_data in pairs( t) do
438 if key ~= deep then
439 table_insert( r, {l, key, timer_data})
440 end
428 end 441 end
429 end 442 end
430 end 443 return r
431 return r 444 end -- get_timers()
432 end -- get_timers()
433
434 --
435 -- set_timer( linda_h, key [,wakeup_at_secs [,period_secs]] )
436 --
437 local set_timer = function( linda, key, wakeup_at, period)
438 assert( wakeup_at == nil or wakeup_at > 0.0)
439 assert( period == nil or period > 0.0)
440 445
441 local linda_deep = linda:deep()
442 assert( linda_deep)
443
444 -- Find or make a lookup for this timer
445 -- 446 --
446 local t1 = collection[linda_deep] 447 -- set_timer( linda_h, key [,wakeup_at_secs [,period_secs]] )
447 if not t1 then 448 --
448 t1 = { [linda_deep] = linda} -- proxy to use the Linda 449 local set_timer = function( linda, key, wakeup_at, period)
449 collection[linda_deep] = t1 450 assert( wakeup_at == nil or wakeup_at > 0.0)
450 end 451 assert( period == nil or period > 0.0)
451 452
452 if wakeup_at == nil then 453 local linda_deep = linda:deep()
453 -- Clear the timer 454 assert( linda_deep)
454 --
455 t1[key]= nil
456 455
457 -- Remove empty tables from collection; speeds timer checks and 456 -- Find or make a lookup for this timer
458 -- lets our 'safety reference' proxy be gc:ed as well.
459 -- 457 --
460 local empty = true 458 local t1 = collection[linda_deep]
461 for k, _ in pairs( t1) do 459 if not t1 then
462 if k ~= linda_deep then 460 t1 = { [linda_deep] = linda} -- proxy to use the Linda
463 empty = false 461 collection[linda_deep] = t1
464 break
465 end
466 end
467 if empty then
468 collection[linda_deep] = nil
469 end 462 end
470 463
471 -- Note: any unread timer value is left at 'linda[key]' intensionally; 464 if wakeup_at == nil then
472 -- clearing a timer just stops it. 465 -- Clear the timer
473 else 466 --
474 -- New timer or changing the timings 467 t1[key]= nil
475 --
476 local t2 = t1[key]
477 if not t2 then
478 t2= {}
479 t1[key]= t2
480 end
481 468
482 t2[1] = wakeup_at 469 -- Remove empty tables from collection; speeds timer checks and
483 t2[2] = period -- can be 'nil' 470 -- lets our 'safety reference' proxy be gc:ed as well.
484 end 471 --
485 end -- set_timer() 472 local empty = true
473 for k, _ in pairs( t1) do
474 if k ~= linda_deep then
475 empty = false
476 break
477 end
478 end
479 if empty then
480 collection[linda_deep] = nil
481 end
486 482
487 ----- 483 -- Note: any unread timer value is left at 'linda[key]' intensionally;
488 -- [next_wakeup_at]= check_timers() 484 -- clearing a timer just stops it.
489 -- Check timers, and wake up the ones expired (if any) 485 else
490 -- Returns the closest upcoming (remaining) wakeup time (or 'nil' if none). 486 -- New timer or changing the timings
491 local check_timers = function()
492 local now = now_secs()
493 local next_wakeup
494
495 for linda_deep,t1 in pairs(collection) do
496 for key,t2 in pairs(t1) do
497 -- 487 --
498 if key==linda_deep then 488 local t2 = t1[key]
499 -- no 'continue' in Lua :/ 489 if not t2 then
500 else 490 t2= {}
501 -- 't2': { wakeup_at_secs [,period_secs] } 491 t1[key]= t2
492 end
493
494 t2[1] = wakeup_at
495 t2[2] = period -- can be 'nil'
496 end
497 end -- set_timer()
498
499 -----
500 -- [next_wakeup_at]= check_timers()
501 -- Check timers, and wake up the ones expired (if any)
502 -- Returns the closest upcoming (remaining) wakeup time (or 'nil' if none).
503 local check_timers = function()
504 local now = now_secs()
505 local next_wakeup
506
507 for linda_deep,t1 in pairs(collection) do
508 for key,t2 in pairs(t1) do
502 -- 509 --
503 local wakeup_at= t2[1] 510 if key==linda_deep then
504 local period= t2[2] -- may be 'nil' 511 -- no 'continue' in Lua :/
505 512 else
506 if wakeup_at <= now then 513 -- 't2': { wakeup_at_secs [,period_secs] }
507 local linda= t1[linda_deep] 514 --
508 assert(linda) 515 local wakeup_at= t2[1]
509 516 local period= t2[2] -- may be 'nil'
510 linda:set( key, now ) 517
511 518 if wakeup_at <= now then
512 -- 'pairs()' allows the values to be modified (and even 519 local linda= t1[linda_deep]
513 -- removed) as far as keys are not touched 520 assert(linda)
514 521
515 if not period then 522 linda:set( key, now )
516 -- one-time timer; gone 523
517 -- 524 -- 'pairs()' allows the values to be modified (and even
518 t1[key]= nil 525 -- removed) as far as keys are not touched
519 wakeup_at= nil -- no 'continue' in Lua :/ 526
520 else 527 if not period then
521 -- repeating timer; find next wakeup (may jump multiple repeats) 528 -- one-time timer; gone
522 -- 529 --
523 repeat 530 t1[key]= nil
524 wakeup_at= wakeup_at+period 531 wakeup_at= nil -- no 'continue' in Lua :/
525 until wakeup_at > now 532 else
526 533 -- repeating timer; find next wakeup (may jump multiple repeats)
527 t2[1]= wakeup_at 534 --
535 repeat
536 wakeup_at= wakeup_at+period
537 until wakeup_at > now
538
539 t2[1]= wakeup_at
540 end
528 end 541 end
529 end
530 542
531 if wakeup_at and ((not next_wakeup) or (wakeup_at < next_wakeup)) then 543 if wakeup_at and ((not next_wakeup) or (wakeup_at < next_wakeup)) then
532 next_wakeup= wakeup_at 544 next_wakeup= wakeup_at
545 end
533 end 546 end
547 end -- t2 loop
548 end -- t1 loop
549
550 return next_wakeup -- may be 'nil'
551 end -- check_timers()
552
553 local timer_gateway_batched = timer_gateway.batched
554 set_finalizer( function( err, stk)
555 if err and type( err) ~= "userdata" then
556 WR( "LanesTimer error: "..tostring(err))
557 --elseif type( err) == "userdata" then
558 -- WR( "LanesTimer after cancel" )
559 --else
560 -- WR("LanesTimer finalized")
561 end
562 end)
563 while true do
564 local next_wakeup = check_timers()
565
566 -- Sleep until next timer to wake up, or a set/clear command
567 --
568 local secs
569 if next_wakeup then
570 secs = next_wakeup - now_secs()
571 if secs < 0 then secs = 0 end
572 end
573 local key, what = timer_gateway:receive( secs, TGW_KEY, TGW_QUERY)
574
575 if key == TGW_KEY then
576 assert( getmetatable( what) == "Linda") -- 'what' should be a linda on which the client sets a timer
577 local _, key, wakeup_at, period = timer_gateway:receive( 0, timer_gateway_batched, TGW_KEY, 3)
578 assert( key)
579 set_timer( what, key, wakeup_at, period and period > 0 and period or nil)
580 elseif key == TGW_QUERY then
581 if what == "get_timers" then
582 timer_gateway:send( TGW_REPLY, get_timers())
583 else
584 timer_gateway:send( TGW_REPLY, "unknown query " .. what)
534 end 585 end
535 end -- t2 loop 586 --elseif secs == nil then -- got no value while block-waiting?
536 end -- t1 loop 587 -- WR( "timer lane: no linda, aborted?")
537 588 end
538 return next_wakeup -- may be 'nil'
539 end -- check_timers()
540
541 local timer_gateway_batched = timer_gateway.batched
542 set_finalizer( function( err, stk)
543 if err and type( err) ~= "userdata" then
544 WR( "LanesTimer error: "..tostring(err))
545 --elseif type( err) == "userdata" then
546 -- WR( "LanesTimer after cancel" )
547 --else
548 -- WR("LanesTimer finalized")
549 end 589 end
550 end) 590 end -- timer_body()
551 while true do 591 timer_lane = gen( "*", { package= {}, priority = max_prio}, timer_body)() -- "*" instead of "io,package" for LuaJIT compatibility...
552 local next_wakeup = check_timers() 592 end -- first_time
553 593
554 -- Sleep until next timer to wake up, or a set/clear command 594 -----
595 -- = timer( linda_h, key_val, date_tbl|first_secs [,period_secs] )
596 --
597 -- PUBLIC LANES API
598 timer = function( linda, key, a, period )
599 if getmetatable( linda) ~= "Linda" then
600 error "expecting a Linda"
601 end
602 if a == 0.0 then
603 -- Caller expects to get current time stamp in Linda, on return
604 -- (like the timer had expired instantly); it would be good to set this
605 -- as late as possible (to give most current time) but also we want it
606 -- to precede any possible timers that might start striking.
555 -- 607 --
556 local secs 608 linda:set( key, now_secs())
557 if next_wakeup then 609
558 secs = next_wakeup - now_secs() 610 if not period or period==0.0 then
559 if secs < 0 then secs = 0 end 611 timer_gateway:send( TGW_KEY, linda, key, nil, nil ) -- clear the timer
560 end 612 return -- nothing more to do
561 local key, what = timer_gateway:receive( secs, TGW_KEY, TGW_QUERY)
562
563 if key == TGW_KEY then
564 assert( getmetatable( what) == "Linda") -- 'what' should be a linda on which the client sets a timer
565 local _, key, wakeup_at, period = timer_gateway:receive( 0, timer_gateway_batched, TGW_KEY, 3)
566 assert( key)
567 set_timer( what, key, wakeup_at, period and period > 0 and period or nil)
568 elseif key == TGW_QUERY then
569 if what == "get_timers" then
570 timer_gateway:send( TGW_REPLY, get_timers())
571 else
572 timer_gateway:send( TGW_REPLY, "unknown query " .. what)
573 end
574 --elseif secs == nil then -- got no value while block-waiting?
575 -- WR( "timer lane: no linda, aborted?")
576 end 613 end
614 a= period
577 end 615 end
578 end -- timer_body()
579 timer_lane = gen( "*", { package= {}, priority = max_prio}, timer_body)() -- "*" instead of "io,package" for LuaJIT compatibility...
580 end -- first_time
581 616
582 ----- 617 local wakeup_at= type(a)=="table" and wakeup_conv(a) -- given point of time
583 -- = timer( linda_h, key_val, date_tbl|first_secs [,period_secs] ) 618 or (a and now_secs()+a or nil)
584 -- 619 -- queue to timer
585 -- PUBLIC LANES API
586 timer = function( linda, key, a, period )
587 if getmetatable( linda) ~= "Linda" then
588 error "expecting a Linda"
589 end
590 if a == 0.0 then
591 -- Caller expects to get current time stamp in Linda, on return
592 -- (like the timer had expired instantly); it would be good to set this
593 -- as late as possible (to give most current time) but also we want it
594 -- to precede any possible timers that might start striking.
595 -- 620 --
596 linda:set( key, core.now_secs()) 621 timer_gateway:send( TGW_KEY, linda, key, wakeup_at, period )
622 end -- timer()
597 623
598 if not period or period==0.0 then 624 -----
599 timer_gateway:send( TGW_KEY, linda, key, nil, nil ) -- clear the timer 625 -- {[{linda, slot, when, period}[,...]]} = timers()
600 return -- nothing more to do
601 end
602 a= period
603 end
604
605 local wakeup_at= type(a)=="table" and core.wakeup_conv(a) -- given point of time
606 or (a and core.now_secs()+a or nil)
607 -- queue to timer
608 -- 626 --
609 timer_gateway:send( TGW_KEY, linda, key, wakeup_at, period ) 627 -- PUBLIC LANES API
610 end 628 timers = function()
611 629 timer_gateway:send( TGW_QUERY, "get_timers")
612 ----- 630 local _, r = timer_gateway:receive( TGW_REPLY)
613 -- {[{linda, slot, when, period}[,...]]} = timers() 631 return r
614 -- 632 end -- timers()
615 -- PUBLIC LANES API
616 timers = function()
617 timer_gateway:send( TGW_QUERY, "get_timers")
618 local _, r = timer_gateway:receive( TGW_REPLY)
619 return r
620 end
621 633
622 end -- settings.with_timers 634 end -- settings.with_timers
623 635
diff --git a/src/lanes_private.h b/src/lanes_private.h
index bcc3014..18e55fd 100644
--- a/src/lanes_private.h
+++ b/src/lanes_private.h
@@ -4,27 +4,42 @@
4#include "uniquekey.h" 4#include "uniquekey.h"
5#include "universe.h" 5#include "universe.h"
6 6
7#include <chrono>
8#include <condition_variable>
7#include <latch> 9#include <latch>
10#include <stop_token>
11#include <thread>
8 12
9// NOTE: values to be changed by either thread, during execution, without 13// NOTE: values to be changed by either thread, during execution, without
10// locking, are marked "volatile" 14// locking, are marked "volatile"
11// 15//
12class Lane 16class Lane
13{ 17{
14 private: 18 public:
15 19
16 enum class ThreadStatus 20 /*
21 Pending: The Lua VM hasn't done anything yet.
22 Running, Waiting: Thread is inside the Lua VM. If the thread is forcefully stopped, we can't lua_close() the Lua State.
23 Done, Error, Cancelled: Thread execution is outside the Lua VM. It can be lua_close()d.
24 */
25 enum class Status
17 { 26 {
18 Normal, // normal master side state 27 Pending,
19 Killed // issued an OS kill 28 Running,
29 Waiting,
30 Done,
31 Error,
32 Cancelled
20 }; 33 };
34 using enum Status;
21 35
22 public: 36 // the thread
23 37 std::jthread m_thread;
24 using enum ThreadStatus; 38 // a latch to wait for the lua_State to be ready
25
26 THREAD_T thread;
27 std::latch m_ready{ 1 }; 39 std::latch m_ready{ 1 };
40 // to wait for stop requests through m_thread's stop_source
41 std::mutex m_done_mutex;
42 std::condition_variable m_done_signal; // use condition_variable_any if waiting for a stop_token
28 // 43 //
29 // M: sub-thread OS thread 44 // M: sub-thread OS thread
30 // S: not used 45 // S: not used
@@ -37,37 +52,20 @@ class Lane
37 // M: prepares the state, and reads results 52 // M: prepares the state, and reads results
38 // S: while S is running, M must keep out of modifying the state 53 // S: while S is running, M must keep out of modifying the state
39 54
40 volatile enum e_status status{ PENDING }; 55 Status volatile m_status{ Pending };
41 // 56 //
42 // M: sets to PENDING (before launching) 57 // M: sets to Pending (before launching)
43 // S: updates -> RUNNING/WAITING -> DONE/ERROR_ST/CANCELLED 58 // S: updates -> Running/Waiting -> Done/Error/Cancelled
44 59
45 SIGNAL_T* volatile waiting_on{ nullptr }; 60 std::condition_variable* volatile m_waiting_on{ nullptr };
46 // 61 //
47 // When status is WAITING, points on the linda's signal the thread waits on, else nullptr 62 // When status is Waiting, points on the linda's signal the thread waits on, else nullptr
48 63
49 volatile CancelRequest cancel_request{ CancelRequest::None }; 64 CancelRequest volatile cancel_request{ CancelRequest::None };
50 // 65 //
51 // M: sets to false, flags true for cancel request 66 // M: sets to false, flags true for cancel request
52 // S: reads to see if cancel is requested 67 // S: reads to see if cancel is requested
53 68
54#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
55 SIGNAL_T done_signal;
56 //
57 // M: Waited upon at lane ending (if Posix with no PTHREAD_TIMEDJOIN)
58 // S: sets the signal once cancellation is noticed (avoids a kill)
59
60 MUTEX_T done_lock;
61 //
62 // Lock required by 'done_signal' condition variable, protecting
63 // lane status changes to DONE/ERROR_ST/CANCELLED.
64#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
65
66 volatile ThreadStatus mstatus{ Normal };
67 //
68 // M: sets to Normal, if issued a kill changes to Killed
69 // S: not used
70
71 Lane* volatile selfdestruct_next{ nullptr }; 69 Lane* volatile selfdestruct_next{ nullptr };
72 // 70 //
73 // M: sets to non-nullptr if facing lane handle '__gc' cycle but the lane 71 // M: sets to non-nullptr if facing lane handle '__gc' cycle but the lane
@@ -80,7 +78,7 @@ class Lane
80 // 78 //
81 // For tracking only 79 // For tracking only
82 80
83 static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internal_allocator.alloc(size_); } 81 [[nodiscard]] static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internal_allocator.alloc(size_); }
84 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception 82 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception
85 static void operator delete(void* p_, Universe* U_) { U_->internal_allocator.free(p_, sizeof(Lane)); } 83 static void operator delete(void* p_, Universe* U_) { U_->internal_allocator.free(p_, sizeof(Lane)); }
86 // this one is for us, to make sure memory is freed by the correct allocator 84 // this one is for us, to make sure memory is freed by the correct allocator
@@ -88,6 +86,9 @@ class Lane
88 86
89 Lane(Universe* U_, lua_State* L_); 87 Lane(Universe* U_, lua_State* L_);
90 ~Lane(); 88 ~Lane();
89
90 [[nodiscard]] bool waitForCompletion(lua_Duration duration_);
91 void startThread(int priority_);
91}; 92};
92 93
93// xxh64 of string "LANE_POINTER_REGKEY" generated at https://www.pelock.com/products/hash-calculator 94// xxh64 of string "LANE_POINTER_REGKEY" generated at https://www.pelock.com/products/hash-calculator
@@ -97,6 +98,9 @@ static constexpr UniqueKey LANE_POINTER_REGKEY{ 0xB3022205633743BCull }; // used
97// 'Lane' are malloc/free'd and the handle only carries a pointer. 98// 'Lane' are malloc/free'd and the handle only carries a pointer.
98// This is not deep userdata since the handle's not portable among lanes. 99// This is not deep userdata since the handle's not portable among lanes.
99// 100//
100#define lua_toLane( L, i) (*((Lane**) luaL_checkudata( L, i, "Lane"))) 101[[nodiscard]] inline Lane* lua_toLane(lua_State* L, int i_)
102{
103 return *(static_cast<Lane**>(luaL_checkudata(L, i_, "Lane")));
104}
101 105
102int push_thread_status( lua_State* L, Lane* s); 106void push_thread_status(lua_State* L, Lane* lane_);
diff --git a/src/linda.cpp b/src/linda.cpp
index 37a74b0..e749f52 100644
--- a/src/linda.cpp
+++ b/src/linda.cpp
@@ -61,8 +61,8 @@ class Linda : public DeepPrelude // Deep userdata MUST start with this header
61 61
62 public: 62 public:
63 63
64 SIGNAL_T read_happened; 64 std::condition_variable m_read_happened;
65 SIGNAL_T write_happened; 65 std::condition_variable m_write_happened;
66 Universe* const U; // the universe this linda belongs to 66 Universe* const U; // the universe this linda belongs to
67 uintptr_t const group; // a group to control keeper allocation between lindas 67 uintptr_t const group; // a group to control keeper allocation between lindas
68 CancelRequest simulate_cancel{ CancelRequest::None }; 68 CancelRequest simulate_cancel{ CancelRequest::None };
@@ -70,7 +70,7 @@ class Linda : public DeepPrelude // Deep userdata MUST start with this header
70 public: 70 public:
71 71
72 // a fifo full userdata has one uservalue, the table that holds the actual fifo contents 72 // a fifo full userdata has one uservalue, the table that holds the actual fifo contents
73 static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internal_allocator.alloc(size_); } 73 [[nodiscard]] static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internal_allocator.alloc(size_); }
74 // always embedded somewhere else or "in-place constructed" as a full userdata 74 // always embedded somewhere else or "in-place constructed" as a full userdata
75 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception 75 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception
76 static void operator delete(void* p_, Universe* U_) { U_->internal_allocator.free(p_, sizeof(Linda)); } 76 static void operator delete(void* p_, Universe* U_) { U_->internal_allocator.free(p_, sizeof(Linda)); }
@@ -81,17 +81,11 @@ class Linda : public DeepPrelude // Deep userdata MUST start with this header
81 : U{ U_ } 81 : U{ U_ }
82 , group{ group_ << KEEPER_MAGIC_SHIFT } 82 , group{ group_ << KEEPER_MAGIC_SHIFT }
83 { 83 {
84 SIGNAL_INIT(&read_happened);
85 SIGNAL_INIT(&write_happened);
86
87 setName(name_, len_); 84 setName(name_, len_);
88 } 85 }
89 86
90 ~Linda() 87 ~Linda()
91 { 88 {
92 // There aren't any lanes waiting on these lindas, since all proxies have been gc'ed. Right?
93 SIGNAL_FREE(&read_happened);
94 SIGNAL_FREE(&write_happened);
95 if (std::holds_alternative<AllocatedName>(m_name)) 89 if (std::holds_alternative<AllocatedName>(m_name))
96 { 90 {
97 AllocatedName& name = std::get<AllocatedName>(m_name); 91 AllocatedName& name = std::get<AllocatedName>(m_name);
@@ -143,10 +137,10 @@ class Linda : public DeepPrelude // Deep userdata MUST start with this header
143 return nullptr; 137 return nullptr;
144 } 138 }
145}; 139};
146static void* linda_id( lua_State*, DeepOp); 140[[nodiscard]] static void* linda_id(lua_State*, DeepOp);
147 141
148template<bool OPT> 142template<bool OPT>
149static inline Linda* lua_toLinda(lua_State* L, int idx_) 143[[nodiscard]] static inline Linda* lua_toLinda(lua_State* L, int idx_)
150{ 144{
151 Linda* const linda{ static_cast<Linda*>(luaG_todeep(L, linda_id, idx_)) }; 145 Linda* const linda{ static_cast<Linda*>(luaG_todeep(L, linda_id, idx_)) };
152 if (!OPT) 146 if (!OPT)
@@ -168,7 +162,7 @@ static void check_key_types(lua_State* L, int start_, int end_)
168 { 162 {
169 continue; 163 continue;
170 } 164 }
171 std::ignore = luaL_error(L, "argument #%d: invalid key type (not a boolean, string, number or light userdata)", i); 165 luaL_error(L, "argument #%d: invalid key type (not a boolean, string, number or light userdata)", i); // doesn't return
172 } 166 }
173} 167}
174 168
@@ -216,15 +210,19 @@ LUAG_FUNC(linda_protected_call)
216LUAG_FUNC(linda_send) 210LUAG_FUNC(linda_send)
217{ 211{
218 Linda* const linda{ lua_toLinda<false>(L, 1) }; 212 Linda* const linda{ lua_toLinda<false>(L, 1) };
219 time_d timeout{ -1.0 }; 213 std::chrono::time_point<std::chrono::steady_clock> until{ std::chrono::time_point<std::chrono::steady_clock>::max() };
220 int key_i{ 2 }; // index of first key, if timeout not there 214 int key_i{ 2 }; // index of first key, if timeout not there
221 215
222 if (lua_type(L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion 216 if (lua_type(L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion
223 { 217 {
224 timeout = SIGNAL_TIMEOUT_PREPARE(lua_tonumber(L, 2)); 218 lua_Duration const duration{ lua_tonumber(L, 2) };
219 if (duration.count() >= 0.0)
220 {
221 until = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration);
222 }
225 ++key_i; 223 ++key_i;
226 } 224 }
227 else if (lua_isnil(L, 2)) // alternate explicit "no timeout" by passing nil before the key 225 else if (lua_isnil(L, 2)) // alternate explicit "infinite timeout" by passing nil before the key
228 { 226 {
229 ++key_i; 227 ++key_i;
230 } 228 }
@@ -266,6 +264,7 @@ LUAG_FUNC(linda_send)
266 lua_State* const KL{ K ? K->L : nullptr }; 264 lua_State* const KL{ K ? K->L : nullptr };
267 if (KL == nullptr) 265 if (KL == nullptr)
268 return 0; 266 return 0;
267
269 STACK_CHECK_START_REL(KL, 0); 268 STACK_CHECK_START_REL(KL, 0);
270 for (bool try_again{ true };;) 269 for (bool try_again{ true };;)
271 { 270 {
@@ -295,34 +294,37 @@ LUAG_FUNC(linda_send)
295 if (ret) 294 if (ret)
296 { 295 {
297 // Wake up ALL waiting threads 296 // Wake up ALL waiting threads
298 SIGNAL_ALL(&linda->write_happened); 297 linda->m_write_happened.notify_all();
299 break; 298 break;
300 } 299 }
301 300
302 // instant timout to bypass the wait syscall 301 // instant timout to bypass the wait syscall
303 if (timeout == 0.0) 302 if (std::chrono::steady_clock::now() >= until)
304 { 303 {
305 break; /* no wait; instant timeout */ 304 break; /* no wait; instant timeout */
306 } 305 }
307 306
308 // storage limit hit, wait until timeout or signalled that we should try again 307 // storage limit hit, wait until timeout or signalled that we should try again
309 { 308 {
310 enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings 309 Lane::Status prev_status{ Lane::Error }; // prevent 'might be used uninitialized' warnings
311 if (lane != nullptr) 310 if (lane != nullptr)
312 { 311 {
313 // change status of lane to "waiting" 312 // change status of lane to "waiting"
314 prev_status = lane->status; // RUNNING, most likely 313 prev_status = lane->m_status; // Running, most likely
315 ASSERT_L(prev_status == RUNNING); // but check, just in case 314 ASSERT_L(prev_status == Lane::Running); // but check, just in case
316 lane->status = WAITING; 315 lane->m_status = Lane::Waiting;
317 ASSERT_L(lane->waiting_on == nullptr); 316 ASSERT_L(lane->m_waiting_on == nullptr);
318 lane->waiting_on = &linda->read_happened; 317 lane->m_waiting_on = &linda->m_read_happened;
319 } 318 }
320 // could not send because no room: wait until some data was read before trying again, or until timeout is reached 319 // could not send because no room: wait until some data was read before trying again, or until timeout is reached
321 try_again = SIGNAL_WAIT(&linda->read_happened, &K->keeper_cs, timeout); 320 std::unique_lock<std::mutex> keeper_lock{ K->m_mutex, std::adopt_lock };
321 std::cv_status const status{ linda->m_read_happened.wait_until(keeper_lock, until) };
322 keeper_lock.release(); // we don't want to release the lock!
323 try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups
322 if (lane != nullptr) 324 if (lane != nullptr)
323 { 325 {
324 lane->waiting_on = nullptr; 326 lane->m_waiting_on = nullptr;
325 lane->status = prev_status; 327 lane->m_status = prev_status;
326 } 328 }
327 } 329 }
328 } 330 }
@@ -369,21 +371,24 @@ static constexpr UniqueKey BATCH_SENTINEL{ 0x2DDFEE0968C62AA7ull };
369LUAG_FUNC(linda_receive) 371LUAG_FUNC(linda_receive)
370{ 372{
371 Linda* const linda{ lua_toLinda<false>(L, 1) }; 373 Linda* const linda{ lua_toLinda<false>(L, 1) };
372 374 std::chrono::time_point<std::chrono::steady_clock> until{ std::chrono::time_point<std::chrono::steady_clock>::max() };
373 time_d timeout{ -1.0 }; 375 int key_i{ 2 }; // index of first key, if timeout not there
374 int key_i{ 2 };
375 376
376 if (lua_type(L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion 377 if (lua_type(L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion
377 { 378 {
378 timeout = SIGNAL_TIMEOUT_PREPARE(lua_tonumber(L, 2)); 379 lua_Duration const duration{ lua_tonumber(L, 2) };
380 if (duration.count() >= 0.0)
381 {
382 until = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration);
383 }
379 ++key_i; 384 ++key_i;
380 } 385 }
381 else if (lua_isnil(L, 2)) // alternate explicit "no timeout" by passing nil before the key 386 else if (lua_isnil(L, 2)) // alternate explicit "infinite timeout" by passing nil before the key
382 { 387 {
383 ++key_i; 388 ++key_i;
384 } 389 }
385 390
386 keeper_api_t keeper_receive; 391 keeper_api_t selected_keeper_receive{ nullptr };
387 int expected_pushed_min{ 0 }, expected_pushed_max{ 0 }; 392 int expected_pushed_min{ 0 }, expected_pushed_max{ 0 };
388 // are we in batched mode? 393 // are we in batched mode?
389 BATCH_SENTINEL.pushKey(L); 394 BATCH_SENTINEL.pushKey(L);
@@ -396,7 +401,7 @@ LUAG_FUNC(linda_receive)
396 // make sure the keys are of a valid type 401 // make sure the keys are of a valid type
397 check_key_types(L, key_i, key_i); 402 check_key_types(L, key_i, key_i);
398 // receive multiple values from a single slot 403 // receive multiple values from a single slot
399 keeper_receive = KEEPER_API(receive_batched); 404 selected_keeper_receive = KEEPER_API(receive_batched);
400 // we expect a user-defined amount of return value 405 // we expect a user-defined amount of return value
401 expected_pushed_min = (int) luaL_checkinteger(L, key_i + 1); 406 expected_pushed_min = (int) luaL_checkinteger(L, key_i + 1);
402 expected_pushed_max = (int) luaL_optinteger(L, key_i + 2, expected_pushed_min); 407 expected_pushed_max = (int) luaL_optinteger(L, key_i + 2, expected_pushed_min);
@@ -413,17 +418,20 @@ LUAG_FUNC(linda_receive)
413 // make sure the keys are of a valid type 418 // make sure the keys are of a valid type
414 check_key_types(L, key_i, lua_gettop(L)); 419 check_key_types(L, key_i, lua_gettop(L));
415 // receive a single value, checking multiple slots 420 // receive a single value, checking multiple slots
416 keeper_receive = KEEPER_API(receive); 421 selected_keeper_receive = KEEPER_API(receive);
417 // we expect a single (value, key) pair of returned values 422 // we expect a single (value, key) pair of returned values
418 expected_pushed_min = expected_pushed_max = 2; 423 expected_pushed_min = expected_pushed_max = 2;
419 } 424 }
420 425
421 Lane* const lane{ LANE_POINTER_REGKEY.readLightUserDataValue<Lane>(L) }; 426 Lane* const lane{ LANE_POINTER_REGKEY.readLightUserDataValue<Lane>(L) };
422 Keeper* const K{ which_keeper(linda->U->keepers, linda->hashSeed()) }; 427 Keeper* const K{ which_keeper(linda->U->keepers, linda->hashSeed()) };
423 if (K == nullptr) 428 lua_State* const KL{ K ? K->L : nullptr };
429 if (KL == nullptr)
424 return 0; 430 return 0;
431
425 CancelRequest cancel{ CancelRequest::None }; 432 CancelRequest cancel{ CancelRequest::None };
426 int pushed{ 0 }; 433 int pushed{ 0 };
434 STACK_CHECK_START_REL(KL, 0);
427 for (bool try_again{ true };;) 435 for (bool try_again{ true };;)
428 { 436 {
429 if (lane != nullptr) 437 if (lane != nullptr)
@@ -439,7 +447,7 @@ LUAG_FUNC(linda_receive)
439 } 447 }
440 448
441 // all arguments of receive() but the first are passed to the keeper's receive function 449 // all arguments of receive() but the first are passed to the keeper's receive function
442 pushed = keeper_call(linda->U, K->L, keeper_receive, L, linda, key_i); 450 pushed = keeper_call(linda->U, KL, selected_keeper_receive, L, linda, key_i);
443 if (pushed < 0) 451 if (pushed < 0)
444 { 452 {
445 break; 453 break;
@@ -451,36 +459,40 @@ LUAG_FUNC(linda_receive)
451 keeper_toggle_nil_sentinels(L, lua_gettop(L) - pushed, LookupMode::FromKeeper); 459 keeper_toggle_nil_sentinels(L, lua_gettop(L) - pushed, LookupMode::FromKeeper);
452 // To be done from within the 'K' locking area 460 // To be done from within the 'K' locking area
453 // 461 //
454 SIGNAL_ALL(&linda->read_happened); 462 linda->m_read_happened.notify_all();
455 break; 463 break;
456 } 464 }
457 465
458 if (timeout == 0.0) 466 if (std::chrono::steady_clock::now() >= until)
459 { 467 {
460 break; /* instant timeout */ 468 break; /* instant timeout */
461 } 469 }
462 470
463 // nothing received, wait until timeout or signalled that we should try again 471 // nothing received, wait until timeout or signalled that we should try again
464 { 472 {
465 enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings 473 Lane::Status prev_status{ Lane::Error }; // prevent 'might be used uninitialized' warnings
466 if (lane != nullptr) 474 if (lane != nullptr)
467 { 475 {
468 // change status of lane to "waiting" 476 // change status of lane to "waiting"
469 prev_status = lane->status; // RUNNING, most likely 477 prev_status = lane->m_status; // Running, most likely
470 ASSERT_L(prev_status == RUNNING); // but check, just in case 478 ASSERT_L(prev_status == Lane::Running); // but check, just in case
471 lane->status = WAITING; 479 lane->m_status = Lane::Waiting;
472 ASSERT_L(lane->waiting_on == nullptr); 480 ASSERT_L(lane->m_waiting_on == nullptr);
473 lane->waiting_on = &linda->write_happened; 481 lane->m_waiting_on = &linda->m_write_happened;
474 } 482 }
475 // not enough data to read: wakeup when data was sent, or when timeout is reached 483 // not enough data to read: wakeup when data was sent, or when timeout is reached
476 try_again = SIGNAL_WAIT(&linda->write_happened, &K->keeper_cs, timeout); 484 std::unique_lock<std::mutex> keeper_lock{ K->m_mutex, std::adopt_lock };
485 std::cv_status const status{ linda->m_write_happened.wait_until(keeper_lock, until) };
486 keeper_lock.release(); // we don't want to release the lock!
487 try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups
477 if (lane != nullptr) 488 if (lane != nullptr)
478 { 489 {
479 lane->waiting_on = nullptr; 490 lane->m_waiting_on = nullptr;
480 lane->status = prev_status; 491 lane->m_status = prev_status;
481 } 492 }
482 } 493 }
483 } 494 }
495 STACK_CHECK(KL, 0);
484 496
485 if (pushed < 0) 497 if (pushed < 0)
486 { 498 {
@@ -537,13 +549,13 @@ LUAG_FUNC(linda_set)
537 if (has_value) 549 if (has_value)
538 { 550 {
539 // we put some data in the slot, tell readers that they should wake 551 // we put some data in the slot, tell readers that they should wake
540 SIGNAL_ALL(&linda->write_happened); // To be done from within the 'K' locking area 552 linda->m_write_happened.notify_all(); // To be done from within the 'K' locking area
541 } 553 }
542 if (pushed == 1) 554 if (pushed == 1)
543 { 555 {
544 // the key was full, but it is no longer the case, tell writers they should wake 556 // the key was full, but it is no longer the case, tell writers they should wake
545 ASSERT_L(lua_type(L, -1) == LUA_TBOOLEAN && lua_toboolean(L, -1) == 1); 557 ASSERT_L(lua_type(L, -1) == LUA_TBOOLEAN && lua_toboolean(L, -1) == 1);
546 SIGNAL_ALL(&linda->read_happened); // To be done from within the 'K' locking area 558 linda->m_read_happened.notify_all(); // To be done from within the 'K' locking area
547 } 559 }
548 } 560 }
549 } 561 }
@@ -648,7 +660,7 @@ LUAG_FUNC( linda_limit)
648 if( pushed == 1) 660 if( pushed == 1)
649 { 661 {
650 ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1); 662 ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1);
651 SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area 663 linda->m_read_happened.notify_all(); // To be done from within the 'K' locking area
652 } 664 }
653 } 665 }
654 else // linda is cancelled 666 else // linda is cancelled
@@ -678,8 +690,8 @@ LUAG_FUNC(linda_cancel)
678 linda->simulate_cancel = CancelRequest::Soft; 690 linda->simulate_cancel = CancelRequest::Soft;
679 if (strcmp(who, "both") == 0) // tell everyone writers to wake up 691 if (strcmp(who, "both") == 0) // tell everyone writers to wake up
680 { 692 {
681 SIGNAL_ALL(&linda->write_happened); 693 linda->m_write_happened.notify_all();
682 SIGNAL_ALL(&linda->read_happened); 694 linda->m_read_happened.notify_all();
683 } 695 }
684 else if (strcmp(who, "none") == 0) // reset flag 696 else if (strcmp(who, "none") == 0) // reset flag
685 { 697 {
@@ -687,11 +699,11 @@ LUAG_FUNC(linda_cancel)
687 } 699 }
688 else if (strcmp(who, "read") == 0) // tell blocked readers to wake up 700 else if (strcmp(who, "read") == 0) // tell blocked readers to wake up
689 { 701 {
690 SIGNAL_ALL(&linda->write_happened); 702 linda->m_write_happened.notify_all();
691 } 703 }
692 else if (strcmp(who, "write") == 0) // tell blocked writers to wake up 704 else if (strcmp(who, "write") == 0) // tell blocked writers to wake up
693 { 705 {
694 SIGNAL_ALL(&linda->read_happened); 706 linda->m_read_happened.notify_all();
695 } 707 }
696 else 708 else
697 { 709 {
@@ -730,7 +742,7 @@ LUAG_FUNC(linda_deep)
730*/ 742*/
731 743
732template <bool OPT> 744template <bool OPT>
733static int linda_tostring(lua_State* L, int idx_) 745[[nodiscard]] static int linda_tostring(lua_State* L, int idx_)
734{ 746{
735 Linda* const linda{ lua_toLinda<OPT>(L, idx_) }; 747 Linda* const linda{ lua_toLinda<OPT>(L, idx_) };
736 if (linda != nullptr) 748 if (linda != nullptr)
@@ -792,7 +804,7 @@ LUAG_FUNC(linda_concat)
792LUAG_FUNC(linda_dump) 804LUAG_FUNC(linda_dump)
793{ 805{
794 Linda* const linda{ lua_toLinda<false>(L, 1) }; 806 Linda* const linda{ lua_toLinda<false>(L, 1) };
795 return keeper_push_linda_storage(linda->U, L, linda, linda->hashSeed()); 807 return keeper_push_linda_storage(linda->U, Dest{ L }, linda, linda->hashSeed());
796} 808}
797 809
798// ################################################################################################# 810// #################################################################################################
@@ -804,7 +816,7 @@ LUAG_FUNC(linda_dump)
804LUAG_FUNC(linda_towatch) 816LUAG_FUNC(linda_towatch)
805{ 817{
806 Linda* const linda{ lua_toLinda<false>(L, 1) }; 818 Linda* const linda{ lua_toLinda<false>(L, 1) };
807 int pushed{ keeper_push_linda_storage(linda->U, L, linda, linda->hashSeed()) }; 819 int pushed{ keeper_push_linda_storage(linda->U, Dest{ L }, linda, linda->hashSeed()) };
808 if (pushed == 0) 820 if (pushed == 0)
809 { 821 {
810 // if the linda is empty, don't return nil 822 // if the linda is empty, don't return nil
@@ -839,7 +851,7 @@ LUAG_FUNC(linda_towatch)
839* For any other strings, the ID function must not react at all. This allows 851* For any other strings, the ID function must not react at all. This allows
840* future extensions of the system. 852* future extensions of the system.
841*/ 853*/
842static void* linda_id( lua_State* L, DeepOp op_) 854[[nodiscard]] static void* linda_id(lua_State* L, DeepOp op_)
843{ 855{
844 switch( op_) 856 switch( op_)
845 { 857 {
@@ -885,15 +897,22 @@ static void* linda_id( lua_State* L, DeepOp op_)
885 { 897 {
886 Linda* const linda{ lua_tolightuserdata<Linda>(L, 1) }; 898 Linda* const linda{ lua_tolightuserdata<Linda>(L, 1) };
887 ASSERT_L(linda); 899 ASSERT_L(linda);
888 900 Keeper* const myK{ which_keeper(linda->U->keepers, linda->hashSeed()) };
889 // Clean associated structures in the keeper state. 901 // if collected after the universe, keepers are already destroyed, and there is nothing to clear
890 Keeper* const K{ keeper_acquire(linda->U->keepers, linda->hashSeed()) }; 902 if (myK)
891 if (K && K->L) // can be nullptr if this happens during main state shutdown (lanes is GC'ed -> no keepers -> no need to cleanup)
892 { 903 {
904 // if collected from my own keeper, we can't acquire/release it
905 // because we are already inside a protected area, and trying to do so would deadlock!
906 bool const need_acquire_release{ myK->L != L };
907 // Clean associated structures in the keeper state.
908 Keeper* const K{ need_acquire_release ? keeper_acquire(linda->U->keepers, linda->hashSeed()) : myK };
893 // hopefully this won't ever raise an error as we would jump to the closest pcall site while forgetting to release the keeper mutex... 909 // hopefully this won't ever raise an error as we would jump to the closest pcall site while forgetting to release the keeper mutex...
894 keeper_call(linda->U, K->L, KEEPER_API(clear), L, linda, 0); 910 std::ignore = keeper_call(linda->U, K->L, KEEPER_API(clear), L, linda, 0);
911 if (need_acquire_release)
912 {
913 keeper_release(K);
914 }
895 } 915 }
896 keeper_release(K);
897 916
898 delete linda; // operator delete overload ensures things go as expected 917 delete linda; // operator delete overload ensures things go as expected
899 return nullptr; 918 return nullptr;
@@ -990,11 +1009,11 @@ static void* linda_id( lua_State* L, DeepOp op_)
990 */ 1009 */
991LUAG_FUNC(linda) 1010LUAG_FUNC(linda)
992{ 1011{
993 int const top = lua_gettop(L); 1012 int const top{ lua_gettop(L) };
994 luaL_argcheck(L, top <= 2, top, "too many arguments"); 1013 luaL_argcheck(L, top <= 2, top, "too many arguments");
995 if (top == 1) 1014 if (top == 1)
996 { 1015 {
997 int const t = lua_type(L, 1); 1016 int const t{ lua_type(L, 1) };
998 luaL_argcheck(L, t == LUA_TSTRING || t == LUA_TNUMBER, 1, "wrong parameter (should be a string or a number)"); 1017 luaL_argcheck(L, t == LUA_TSTRING || t == LUA_TNUMBER, 1, "wrong parameter (should be a string or a number)");
999 } 1018 }
1000 else if (top == 2) 1019 else if (top == 2)
@@ -1002,5 +1021,5 @@ LUAG_FUNC(linda)
1002 luaL_checktype(L, 1, LUA_TSTRING); 1021 luaL_checktype(L, 1, LUA_TSTRING);
1003 luaL_checktype(L, 2, LUA_TNUMBER); 1022 luaL_checktype(L, 2, LUA_TNUMBER);
1004 } 1023 }
1005 return luaG_newdeepuserdata(L, linda_id, 0); 1024 return luaG_newdeepuserdata(Dest{ L }, linda_id, 0);
1006} 1025}
diff --git a/src/macros_and_utils.h b/src/macros_and_utils.h
index 31027d6..e8d5ab5 100644
--- a/src/macros_and_utils.h
+++ b/src/macros_and_utils.h
@@ -11,20 +11,25 @@ extern "C" {
11#endif // __cplusplus 11#endif // __cplusplus
12 12
13#include <cassert> 13#include <cassert>
14#include <chrono>
14#include <tuple> 15#include <tuple>
15#include <type_traits> 16#include <type_traits>
16 17
18using namespace std::chrono_literals;
19
17#define USE_DEBUG_SPEW() 0 20#define USE_DEBUG_SPEW() 0
18#if USE_DEBUG_SPEW() 21#if USE_DEBUG_SPEW()
19extern char const* debugspew_indent; 22extern char const* debugspew_indent;
20#define INDENT_BEGIN "%.*s " 23#define INDENT_BEGIN "%.*s "
21#define INDENT_END , (U ? U->debugspew_indent_depth : 0), debugspew_indent 24#define INDENT_END , (U ? U->debugspew_indent_depth.load(std::memory_order_relaxed) : 0), debugspew_indent
22#define DEBUGSPEW_CODE(_code) _code 25#define DEBUGSPEW_CODE(_code) _code
23#define DEBUGSPEW_PARAM_COMMA( param_) param_, 26#define DEBUGSPEW_OR_NOT(a_, b_) a_
27#define DEBUGSPEW_PARAM_COMMA(param_) param_,
24#define DEBUGSPEW_COMMA_PARAM( param_) , param_ 28#define DEBUGSPEW_COMMA_PARAM( param_) , param_
25#else // USE_DEBUG_SPEW() 29#else // USE_DEBUG_SPEW()
26#define DEBUGSPEW_CODE(_code) 30#define DEBUGSPEW_CODE(_code)
27#define DEBUGSPEW_PARAM_COMMA( param_) 31#define DEBUGSPEW_OR_NOT(a_, b_) b_
32#define DEBUGSPEW_PARAM_COMMA(param_)
28#define DEBUGSPEW_COMMA_PARAM( param_) 33#define DEBUGSPEW_COMMA_PARAM( param_)
29#endif // USE_DEBUG_SPEW() 34#endif // USE_DEBUG_SPEW()
30 35
@@ -41,8 +46,8 @@ extern char const* debugspew_indent;
41 46
42#else // NDEBUG 47#else // NDEBUG
43 48
44#define _ASSERT_L( L, cond_) if( (cond_) == 0) { (void) luaL_error( L, "ASSERT failed: %s:%d '%s'", __FILE__, __LINE__, #cond_);} 49#define _ASSERT_L(L, cond_) if( (cond_) == 0) { (void) luaL_error(L, "ASSERT failed: %s:%d '%s'", __FILE__, __LINE__, #cond_);}
45#define STACK_DUMP( L) luaG_dump( L) 50#define STACK_DUMP(L) luaG_dump(L)
46 51
47class StackChecker 52class StackChecker
48{ 53{
@@ -72,7 +77,7 @@ class StackChecker
72 if ((offset_ < 0) || (m_oldtop < 0)) 77 if ((offset_ < 0) || (m_oldtop < 0))
73 { 78 {
74 assert(false); 79 assert(false);
75 std::ignore = luaL_error(m_L, "STACK INIT ASSERT failed (%d not %d): %s:%d", lua_gettop(m_L), offset_, file_, line_); 80 luaL_error(m_L, "STACK INIT ASSERT failed (%d not %d): %s:%d", lua_gettop(m_L), offset_, file_, line_); // doesn't return
76 } 81 }
77 } 82 }
78 83
@@ -83,7 +88,7 @@ class StackChecker
83 if (lua_gettop(m_L) != pos_) 88 if (lua_gettop(m_L) != pos_)
84 { 89 {
85 assert(false); 90 assert(false);
86 std::ignore = luaL_error(m_L, "STACK INIT ASSERT failed (%d not %d): %s:%d", lua_gettop(m_L), pos_, file_, line_); 91 luaL_error(m_L, "STACK INIT ASSERT failed (%d not %d): %s:%d", lua_gettop(m_L), pos_, file_, line_); // doesn't return
87 } 92 }
88 } 93 }
89 94
@@ -103,7 +108,7 @@ class StackChecker
103 if (actual != expected_) 108 if (actual != expected_)
104 { 109 {
105 assert(false); 110 assert(false);
106 std::ignore = luaL_error(m_L, "STACK ASSERT failed (%d not %d): %s:%d", actual, expected_, file_, line_); 111 luaL_error(m_L, "STACK ASSERT failed (%d not %d): %s:%d", actual, expected_, file_, line_); // doesn't return
107 } 112 }
108 } 113 }
109 } 114 }
@@ -123,24 +128,24 @@ inline void STACK_GROW(lua_State* L, int n_)
123{ 128{
124 if (!lua_checkstack(L, n_)) 129 if (!lua_checkstack(L, n_))
125 { 130 {
126 std::ignore = luaL_error(L, "Cannot grow stack!"); 131 luaL_error(L, "Cannot grow stack!"); // doesn't return
127 } 132 }
128} 133}
129 134
130#define LUAG_FUNC( func_name) int LG_##func_name( lua_State* L) 135#define LUAG_FUNC(func_name) [[nodiscard]] int LG_##func_name(lua_State* L)
131 136
132// ################################################################################################# 137// #################################################################################################
133 138
134// a small helper to extract a full userdata pointer from the stack in a safe way 139// a small helper to extract a full userdata pointer from the stack in a safe way
135template<typename T> 140template<typename T>
136T* lua_tofulluserdata(lua_State* L, int index_) 141[[nodiscard]] T* lua_tofulluserdata(lua_State* L, int index_)
137{ 142{
138 ASSERT_L(lua_isnil(L, index_) || lua_type(L, index_) == LUA_TUSERDATA); 143 ASSERT_L(lua_isnil(L, index_) || lua_type(L, index_) == LUA_TUSERDATA);
139 return static_cast<T*>(lua_touserdata(L, index_)); 144 return static_cast<T*>(lua_touserdata(L, index_));
140} 145}
141 146
142template<typename T> 147template<typename T>
143auto lua_tolightuserdata(lua_State* L, int index_) 148[[nodiscard]] auto lua_tolightuserdata(lua_State* L, int index_)
144{ 149{
145 ASSERT_L(lua_isnil(L, index_) || lua_islightuserdata(L, index_)); 150 ASSERT_L(lua_isnil(L, index_) || lua_islightuserdata(L, index_));
146 if constexpr (std::is_pointer_v<T>) 151 if constexpr (std::is_pointer_v<T>)
@@ -154,7 +159,7 @@ auto lua_tolightuserdata(lua_State* L, int index_)
154} 159}
155 160
156template <typename T> 161template <typename T>
157T* lua_newuserdatauv(lua_State* L, int nuvalue_) 162[[nodiscard]] T* lua_newuserdatauv(lua_State* L, int nuvalue_)
158{ 163{
159 return static_cast<T*>(lua_newuserdatauv(L, sizeof(T), nuvalue_)); 164 return static_cast<T*>(lua_newuserdatauv(L, sizeof(T), nuvalue_));
160} 165}
@@ -167,3 +172,22 @@ T* lua_newuserdatauv(lua_State* L, int nuvalue_)
167 std::ignore = lua_error(L); // doesn't return 172 std::ignore = lua_error(L); // doesn't return
168 assert(false); // we should never get here, but i'm paranoid 173 assert(false); // we should never get here, but i'm paranoid
169} 174}
175
176using lua_Duration = std::chrono::template duration<lua_Number>;
177
178// #################################################################################################
179
180// A unique type generator
181template <typename T, auto = []{}>
182struct Unique
183{
184 T m_val;
185 constexpr Unique() = default;
186 constexpr operator T() const { return m_val; }
187 constexpr explicit Unique(T b_) : m_val{ b_ } {}
188};
189
190// #################################################################################################
191
192using Source = Unique<lua_State*>;
193using Dest = Unique<lua_State*>; \ No newline at end of file
diff --git a/src/state.cpp b/src/state.cpp
index 55540c8..4a5f995 100644
--- a/src/state.cpp
+++ b/src/state.cpp
@@ -1,5 +1,5 @@
1/* 1/*
2* STATE.C 2* STATE.CPP
3* 3*
4* Lua tools to support Lanes. 4* Lua tools to support Lanes.
5*/ 5*/
@@ -8,7 +8,7 @@
8=============================================================================== 8===============================================================================
9 9
10Copyright (C) 2002-10 Asko Kauppi <akauppi@gmail.com> 10Copyright (C) 2002-10 Asko Kauppi <akauppi@gmail.com>
112011-21 benoit Germain <bnt.germain@gmail.com> 112011-24 benoit Germain <bnt.germain@gmail.com>
12 12
13Permission is hereby granted, free of charge, to any person obtaining a copy 13Permission is hereby granted, free of charge, to any person obtaining a copy
14of this software and associated documentation files (the "Software"), to deal 14of this software and associated documentation files (the "Software"), to deal
@@ -31,20 +31,11 @@ THE SOFTWARE.
31=============================================================================== 31===============================================================================
32*/ 32*/
33 33
34#include <stdio.h> 34#include "state.h"
35#include <assert.h> 35
36#include <string.h>
37#include <ctype.h>
38#include <stdlib.h>
39#if !defined(__APPLE__)
40#include <malloc.h>
41#endif // __APPLE__
42
43#include "compat.h"
44#include "macros_and_utils.h"
45#include "universe.h"
46#include "tools.h"
47#include "lanes.h" 36#include "lanes.h"
37#include "tools.h"
38#include "universe.h"
48 39
49// ################################################################################################ 40// ################################################################################################
50 41
@@ -58,7 +49,7 @@ THE SOFTWARE.
58// 49//
59// Upvalues: [1]: original 'require' function 50// Upvalues: [1]: original 'require' function
60// 51//
61static int luaG_new_require( lua_State* L) 52[[nodiscard]] static int luaG_new_require(lua_State* L)
62{ 53{
63 int rc; 54 int rc;
64 int const args = lua_gettop( L); // args 55 int const args = lua_gettop( L); // args
@@ -88,6 +79,8 @@ static int luaG_new_require( lua_State* L)
88 return lua_gettop(L); // result(s) 79 return lua_gettop(L); // result(s)
89} 80}
90 81
82// #################################################################################################
83
91/* 84/*
92* Serialize calls to 'require', if it exists 85* Serialize calls to 'require', if it exists
93*/ 86*/
@@ -119,15 +112,16 @@ void serialize_require(DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L)
119 112
120/*---=== luaG_newstate ===---*/ 113/*---=== luaG_newstate ===---*/
121 114
122static int require_lanes_core( lua_State* L) 115[[nodiscard]] static int require_lanes_core(lua_State* L)
123{ 116{
124 // leaves a copy of 'lanes.core' module table on the stack 117 // leaves a copy of 'lanes.core' module table on the stack
125 luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); 118 luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0);
126 return 1; 119 return 1;
127} 120}
128 121
122// #################################################################################################
129 123
130static const luaL_Reg libs[] = 124static luaL_Reg const libs[] =
131{ 125{
132 { LUA_LOADLIBNAME, luaopen_package}, 126 { LUA_LOADLIBNAME, luaopen_package},
133 { LUA_TABLIBNAME, luaopen_table}, 127 { LUA_TABLIBNAME, luaopen_table},
@@ -163,7 +157,9 @@ static const luaL_Reg libs[] =
163 { nullptr, nullptr } 157 { nullptr, nullptr }
164}; 158};
165 159
166static void open1lib( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, char const* name_, size_t len_) 160// #################################################################################################
161
162static void open1lib(DEBUGSPEW_PARAM_COMMA(Universe* U) lua_State* L, char const* name_, size_t len_)
167{ 163{
168 int i; 164 int i;
169 for( i = 0; libs[i].name; ++ i) 165 for( i = 0; libs[i].name; ++ i)
@@ -192,30 +188,33 @@ static void open1lib( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, char con
192 } 188 }
193} 189}
194 190
191// #################################################################################################
195 192
196// just like lua_xmove, args are (from, to) 193// just like lua_xmove, args are (from, to)
197static void copy_one_time_settings( Universe* U, lua_State* L, lua_State* L2) 194static void copy_one_time_settings(Universe* U, Source L, Dest L2)
198{ 195{
199 STACK_GROW( L, 2); 196 STACK_GROW(L, 2);
200 STACK_CHECK_START_REL(L, 0); 197 STACK_CHECK_START_REL(L, 0);
201 STACK_CHECK_START_REL(L2, 0); 198 STACK_CHECK_START_REL(L2, 0);
202 199
203 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "copy_one_time_settings()\n" INDENT_END)); 200 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "copy_one_time_settings()\n" INDENT_END));
204 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 201 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
205 202
206 CONFIG_REGKEY.pushValue(L); // config 203 CONFIG_REGKEY.pushValue(L); // config
207 // copy settings from from source to destination registry 204 // copy settings from from source to destination registry
208 if( luaG_inter_move( U, L, L2, 1, LookupMode::LaneBody) < 0) // // config 205 if (luaG_inter_move(U, L, L2, 1, LookupMode::LaneBody) != InterCopyResult::Success) // // config
209 { 206 {
210 (void) luaL_error( L, "failed to copy settings when loading lanes.core"); 207 luaL_error( L, "failed to copy settings when loading lanes.core"); // doesn't return
211 } 208 }
212 // set L2:_R[CONFIG_REGKEY] = settings 209 // set L2:_R[CONFIG_REGKEY] = settings
213 CONFIG_REGKEY.setValue(L2, [](lua_State* L) { lua_insert(L, -2); }); // config 210 CONFIG_REGKEY.setValue(L2, [](lua_State* L) { lua_insert(L, -2); }); // config
214 STACK_CHECK( L2, 0); 211 STACK_CHECK(L2, 0);
215 STACK_CHECK( L, 0); 212 STACK_CHECK(L, 0);
216 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 213 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
217} 214}
218 215
216// #################################################################################################
217
219void initialize_on_state_create( Universe* U, lua_State* L) 218void initialize_on_state_create( Universe* U, lua_State* L)
220{ 219{
221 STACK_CHECK_START_REL(L, 1); // settings 220 STACK_CHECK_START_REL(L, 1); // settings
@@ -247,7 +246,9 @@ void initialize_on_state_create( Universe* U, lua_State* L)
247 STACK_CHECK(L, 1); 246 STACK_CHECK(L, 1);
248} 247}
249 248
250lua_State* create_state( Universe* U, lua_State* from_) 249// #################################################################################################
250
251lua_State* create_state(Universe* U, lua_State* from_)
251{ 252{
252 lua_State* L; 253 lua_State* L;
253#if LUAJIT_FLAVOR() == 64 254#if LUAJIT_FLAVOR() == 64
@@ -273,11 +274,13 @@ lua_State* create_state( Universe* U, lua_State* from_)
273 274
274 if (L == nullptr) 275 if (L == nullptr)
275 { 276 {
276 std::ignore = luaL_error( from_, "luaG_newstate() failed while creating state; out of memory"); 277 luaL_error(from_, "luaG_newstate() failed while creating state; out of memory"); // doesn't return
277 } 278 }
278 return L; 279 return L;
279} 280}
280 281
282// #################################################################################################
283
281void call_on_state_create(Universe* U, lua_State* L, lua_State* from_, LookupMode mode_) 284void call_on_state_create(Universe* U, lua_State* L, lua_State* from_, LookupMode mode_)
282{ 285{
283 if (U->on_state_create_func != nullptr) 286 if (U->on_state_create_func != nullptr)
@@ -313,6 +316,8 @@ void call_on_state_create(Universe* U, lua_State* L, lua_State* from_, LookupMod
313 } 316 }
314} 317}
315 318
319// #################################################################################################
320
316/* 321/*
317* Like 'luaL_openlibs()' but allows the set of libraries be selected 322* Like 'luaL_openlibs()' but allows the set of libraries be selected
318* 323*
@@ -326,11 +331,11 @@ void call_on_state_create(Universe* U, lua_State* L, lua_State* from_, LookupMod
326* *NOT* called for keeper states! 331* *NOT* called for keeper states!
327* 332*
328*/ 333*/
329lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_) 334lua_State* luaG_newstate(Universe* U, Source from_, char const* libs_)
330{ 335{
331 lua_State* L = create_state( U, from_); 336 Dest const L{ create_state(U, from_) };
332 337
333 STACK_GROW( L, 2); 338 STACK_GROW(L, 2);
334 STACK_CHECK_START_ABS(L, 0); 339 STACK_CHECK_START_ABS(L, 0);
335 340
336 // copy the universe as a light userdata (only the master state holds the full userdata) 341 // copy the universe as a light userdata (only the master state holds the full userdata)
@@ -349,8 +354,8 @@ lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_)
349 return L; 354 return L;
350 } 355 }
351 356
352 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate()\n" INDENT_END)); 357 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "luaG_newstate()\n" INDENT_END));
353 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 358 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
354 359
355 // copy settings (for example because it may contain a Lua on_state_create function) 360 // copy settings (for example because it may contain a Lua on_state_create function)
356 copy_one_time_settings( U, from_, L); 361 copy_one_time_settings( U, from_, L);
@@ -423,22 +428,22 @@ lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_)
423 428
424#if 0 && USE_DEBUG_SPEW() 429#if 0 && USE_DEBUG_SPEW()
425 // dump the lookup database contents 430 // dump the lookup database contents
426 lua_getfield( L, LUA_REGISTRYINDEX, LOOKUP_REGKEY); // {} 431 lua_getfield(L, LUA_REGISTRYINDEX, LOOKUP_REGKEY); // {}
427 lua_pushnil( L); // {} nil 432 lua_pushnil(L); // {} nil
428 while( lua_next( L, -2)) // {} k v 433 while (lua_next(L, -2)) // {} k v
429 { 434 {
430 lua_getglobal( L, "print"); // {} k v print 435 lua_getglobal(L, "print"); // {} k v print
431 lua_pushlstring( L, debugspew_indent, U->debugspew_indent_depth); // {} k v print " " 436 lua_pushlstring(L, debugspew_indent, U->debugspew_indent_depth.load(std::memory_order_relaxed)); // {} k v print " "
432 lua_pushvalue( L, -4); // {} k v print " " k 437 lua_pushvalue(L, -4); // {} k v print " " k
433 lua_pushvalue( L, -4); // {} k v print " " k v 438 lua_pushvalue(L, -4); // {} k v print " " k v
434 lua_call( L, 3, 0); // {} k v 439 lua_call(L, 3, 0); // {} k v
435 lua_pop( L, 1); // {} k 440 lua_pop(L, 1); // {} k
436 } 441 }
437 lua_pop( L, 1); // {} 442 lua_pop(L, 1); // {}
438#endif // USE_DEBUG_SPEW() 443#endif // USE_DEBUG_SPEW()
439 444
440 lua_pop( L, 1); 445 lua_pop(L, 1);
441 STACK_CHECK(L, 0); 446 STACK_CHECK(L, 0);
442 DEBUGSPEW_CODE(--U->debugspew_indent_depth); 447 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
443 return L; 448 return L;
444} 449}
diff --git a/src/state.h b/src/state.h
index 0e35e89..e1c311a 100644
--- a/src/state.h
+++ b/src/state.h
@@ -3,14 +3,15 @@
3#include "macros_and_utils.h" 3#include "macros_and_utils.h"
4 4
5// forwards 5// forwards
6struct Universe; 6enum class LookupMode;
7class Universe;
7 8
8void serialize_require(DEBUGSPEW_PARAM_COMMA(Universe* U) lua_State* L); 9void serialize_require(DEBUGSPEW_PARAM_COMMA(Universe* U) lua_State* L);
9 10
10// ################################################################################################ 11// ################################################################################################
11 12
12lua_State* create_state(Universe* U, lua_State* from_); 13[[nodiscard]] lua_State* create_state(Universe* U, lua_State* from_);
13lua_State* luaG_newstate(Universe* U, lua_State* _from, char const* libs); 14[[nodiscard]] lua_State* luaG_newstate(Universe* U, Source _from, char const* libs);
14 15
15// ################################################################################################ 16// ################################################################################################
16 17
diff --git a/src/threading.cpp b/src/threading.cpp
index afeb184..259693a 100644
--- a/src/threading.cpp
+++ b/src/threading.cpp
@@ -1,6 +1,6 @@
1/* 1/*
2 * THREADING.C Copyright (c) 2007-08, Asko Kauppi 2 * THREADING.CPP Copyright (c) 2007-08, Asko Kauppi
3 * Copyright (C) 2009-19, Benoit Germain 3 * Copyright (C) 2009-24, Benoit Germain
4 * 4 *
5 * Lua Lanes OS threading specific code. 5 * Lua Lanes OS threading specific code.
6 * 6 *
@@ -12,7 +12,7 @@
12=============================================================================== 12===============================================================================
13 13
14Copyright (C) 2007-10 Asko Kauppi <akauppi@gmail.com> 14Copyright (C) 2007-10 Asko Kauppi <akauppi@gmail.com>
15Copyright (C) 2009-14, Benoit Germain <bnt.germain@gmail.com> 15Copyright (C) 2009-24, Benoit Germain <bnt.germain@gmail.com>
16 16
17Permission is hereby granted, free of charge, to any person obtaining a copy 17Permission is hereby granted, free of charge, to any person obtaining a copy
18of this software and associated documentation files (the "Software"), to deal 18of this software and associated documentation files (the "Software"), to deal
@@ -47,12 +47,6 @@ THE SOFTWARE.
47 47
48#endif // __linux__ 48#endif // __linux__
49 49
50#include <stdio.h>
51#include <stdlib.h>
52#include <assert.h>
53#include <errno.h>
54#include <math.h>
55
56#include "threading.h" 50#include "threading.h"
57 51
58#if !defined( PLATFORM_XBOX) && !defined( PLATFORM_WIN32) && !defined( PLATFORM_POCKETPC) 52#if !defined( PLATFORM_XBOX) && !defined( PLATFORM_WIN32) && !defined( PLATFORM_POCKETPC)
@@ -65,12 +59,6 @@ THE SOFTWARE.
65# include <unistd.h> 59# include <unistd.h>
66#endif 60#endif
67 61
68/* Linux needs to check, whether it's been run as root
69*/
70#ifdef PLATFORM_LINUX
71 volatile bool sudo;
72#endif
73
74#ifdef PLATFORM_OSX 62#ifdef PLATFORM_OSX
75# include "threading_osx.h" 63# include "threading_osx.h"
76#endif 64#endif
@@ -93,205 +81,34 @@ THE SOFTWARE.
93# pragma warning( disable : 4054 ) 81# pragma warning( disable : 4054 )
94#endif 82#endif
95 83
96//#define THREAD_CREATE_RETRIES_MAX 20
97 // loops (maybe retry forever?)
98
99/* 84/*
100* FAIL is for unexpected API return values - essentially programming 85* FAIL is for unexpected API return values - essentially programming
101* error in _this_ code. 86* error in _this_ code.
102*/ 87*/
103#if defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) 88#if defined(PLATFORM_XBOX) || defined(PLATFORM_WIN32) || defined(PLATFORM_POCKETPC)
104static void FAIL( char const* funcname, int rc) 89 static void FAIL(char const* funcname, int rc)
105{ 90 {
106#if defined( PLATFORM_XBOX) 91#if defined(PLATFORM_XBOX)
107 fprintf( stderr, "%s() failed! (%d)\n", funcname, rc ); 92 fprintf(stderr, "%s() failed! (%d)\n", funcname, rc);
108#else // PLATFORM_XBOX 93#else // PLATFORM_XBOX
109 char buf[256]; 94 char buf[256];
110 FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, nullptr, rc, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, 256, nullptr); 95 FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, nullptr, rc, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, 256, nullptr);
111 fprintf( stderr, "%s() failed! [GetLastError() -> %d] '%s'", funcname, rc, buf); 96 fprintf(stderr, "%s() failed! [GetLastError() -> %d] '%s'", funcname, rc, buf);
112#endif // PLATFORM_XBOX 97#endif // PLATFORM_XBOX
113#ifdef _MSC_VER 98#ifdef _MSC_VER
114 __debugbreak(); // give a chance to the debugger! 99 __debugbreak(); // give a chance to the debugger!
115#endif // _MSC_VER 100#endif // _MSC_VER
116 abort(); 101 abort();
117} 102 }
118#endif // win32 build 103#endif // win32 build
119 104
120 105
121/*
122* Returns millisecond timing (in seconds) for the current time.
123*
124* Note: This function should be called once in single-threaded mode in Win32,
125* to get it initialized.
126*/
127time_d now_secs(void) {
128
129#if defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)
130 /*
131 * Windows FILETIME values are "100-nanosecond intervals since
132 * January 1, 1601 (UTC)" (MSDN). Well, we'd want Unix Epoch as
133 * the offset and it seems, so would they:
134 *
135 * <http://msdn.microsoft.com/en-us/library/ms724928(VS.85).aspx>
136 */
137 SYSTEMTIME st;
138 FILETIME ft;
139 ULARGE_INTEGER uli;
140 static ULARGE_INTEGER uli_epoch; // Jan 1st 1970 0:0:0
141
142 if (uli_epoch.HighPart==0) {
143 st.wYear= 1970;
144 st.wMonth= 1; // Jan
145 st.wDay= 1;
146 st.wHour= st.wMinute= st.wSecond= st.wMilliseconds= 0;
147
148 if (!SystemTimeToFileTime( &st, &ft ))
149 FAIL( "SystemTimeToFileTime", GetLastError() );
150
151 uli_epoch.LowPart= ft.dwLowDateTime;
152 uli_epoch.HighPart= ft.dwHighDateTime;
153 }
154
155 GetSystemTime( &st ); // current system date/time in UTC
156 if (!SystemTimeToFileTime( &st, &ft ))
157 FAIL( "SystemTimeToFileTime", GetLastError() );
158
159 uli.LowPart= ft.dwLowDateTime;
160 uli.HighPart= ft.dwHighDateTime;
161
162 /* 'double' has less accuracy than 64-bit int, but if it were to degrade,
163 * it would do so gracefully. In practice, the integer accuracy is not
164 * of the 100ns class but just 1ms (Windows XP).
165 */
166# if 1
167 // >= 2.0.3 code
168 return (double) ((uli.QuadPart - uli_epoch.QuadPart)/10000) / 1000.0;
169# elif 0
170 // fix from Kriss Daniels, see:
171 // <http://luaforge.net/forum/forum.php?thread_id=22704&forum_id=1781>
172 //
173 // "seem to be getting negative numbers from the old version, probably number
174 // conversion clipping, this fixes it and maintains ms resolution"
175 //
176 // This was a bad fix, and caused timer test 5 sec timers to disappear.
177 // --AKa 25-Jan-2009
178 //
179 return ((double)((signed)((uli.QuadPart/10000) - (uli_epoch.QuadPart/10000)))) / 1000.0;
180# else
181 // <= 2.0.2 code
182 return (double)(uli.QuadPart - uli_epoch.QuadPart) / 10000000.0;
183# endif
184#else // !(defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC))
185 struct timeval tv;
186 // {
187 // time_t tv_sec; /* seconds since Jan. 1, 1970 */
188 // suseconds_t tv_usec; /* and microseconds */
189 // };
190
191 int rc = gettimeofday(&tv, nullptr /*time zone not used any more (in Linux)*/);
192 assert( rc==0 );
193
194 return ((double)tv.tv_sec) + ((tv.tv_usec)/1000) / 1000.0;
195#endif // !(defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC))
196}
197
198
199/*
200*/
201time_d SIGNAL_TIMEOUT_PREPARE( double secs ) {
202 if (secs<=0.0) return secs;
203 else return now_secs() + secs;
204}
205
206
207#if THREADAPI == THREADAPI_PTHREAD
208/*
209* Prepare 'abs_secs' kind of timeout to 'timespec' format
210*/
211static void prepare_timeout( struct timespec *ts, time_d abs_secs ) {
212 assert(ts);
213 assert( abs_secs >= 0.0 );
214
215 if (abs_secs==0.0)
216 abs_secs= now_secs();
217
218 ts->tv_sec= (time_t) floor( abs_secs );
219 ts->tv_nsec= ((long)((abs_secs - ts->tv_sec) * 1000.0 +0.5)) * 1000000UL; // 1ms = 1000000ns
220 if (ts->tv_nsec == 1000000000UL)
221 {
222 ts->tv_nsec = 0;
223 ts->tv_sec = ts->tv_sec + 1;
224 }
225}
226#endif // THREADAPI == THREADAPI_PTHREAD
227
228
229/*---=== Threading ===---*/ 106/*---=== Threading ===---*/
230 107
231//--- 108// ##################################################################################################
232// It may be meaningful to explicitly limit the new threads' C stack size. 109// ##################################################################################################
233// We should know how much Lua needs in the C stack, all Lua side allocations
234// are done in heap so they don't count.
235//
236// Consequence of _not_ limiting the stack is running out of virtual memory
237// with 1000-5000 threads on 32-bit systems.
238//
239// Note: using external C modules may be affected by the stack size check.
240// if having problems, set back to '0' (default stack size of the system).
241//
242// Win32: 64K (?)
243// Win64: xxx
244//
245// Linux x86: 2MB Ubuntu 7.04 via 'pthread_getstacksize()'
246// Linux x64: xxx
247// Linux ARM: xxx
248//
249// OS X 10.4.9: 512K <http://developer.apple.com/qa/qa2005/qa1419.html>
250// valid values N * 4KB
251//
252#ifndef _THREAD_STACK_SIZE
253# if defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) || defined( PLATFORM_CYGWIN)
254# define _THREAD_STACK_SIZE 0
255 // Win32: does it work with less?
256# elif (defined PLATFORM_OSX)
257# define _THREAD_STACK_SIZE (524288/2) // 262144
258 // OS X: "make test" works on 65536 and even below
259 // "make perftest" works on >= 4*65536 == 262144 (not 3*65536)
260# elif (defined PLATFORM_LINUX) && (defined __i386)
261# define _THREAD_STACK_SIZE (2097152/16) // 131072
262 // Linux x86 (Ubuntu 7.04): "make perftest" works on /16 (not on /32)
263# elif (defined PLATFORM_BSD) && (defined __i386)
264# define _THREAD_STACK_SIZE (1048576/8) // 131072
265 // FreeBSD 6.2 SMP i386: ("gmake perftest" works on /8 (not on /16)
266# endif
267#endif
268
269#if THREADAPI == THREADAPI_WINDOWS 110#if THREADAPI == THREADAPI_WINDOWS
270 111
271#if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available
272 //
273 void MUTEX_INIT( MUTEX_T *ref ) {
274 *ref= CreateMutex( nullptr /*security attr*/, false /*not locked*/, nullptr );
275 if (!ref) FAIL( "CreateMutex", GetLastError() );
276 }
277 void MUTEX_FREE( MUTEX_T *ref ) {
278 if (!CloseHandle(*ref)) FAIL( "CloseHandle (mutex)", GetLastError() );
279 *ref= nullptr;
280 }
281 void MUTEX_LOCK( MUTEX_T *ref )
282 {
283 DWORD rc = WaitForSingleObject( *ref, INFINITE);
284 // ERROR_WAIT_NO_CHILDREN means a thread was killed (lane terminated because of error raised during a linda transfer for example) while having grabbed this mutex
285 // this is not a big problem as we will grab it just the same, so ignore this particular error
286 if( rc != 0 && rc != ERROR_WAIT_NO_CHILDREN)
287 FAIL( "WaitForSingleObject", (rc == WAIT_FAILED) ? GetLastError() : rc);
288 }
289 void MUTEX_UNLOCK( MUTEX_T *ref ) {
290 if (!ReleaseMutex(*ref))
291 FAIL( "ReleaseMutex", GetLastError() );
292 }
293#endif // CONDITION_VARIABLE aren't available
294
295static int const gs_prio_remap[] = 112static int const gs_prio_remap[] =
296{ 113{
297 THREAD_PRIORITY_IDLE, 114 THREAD_PRIORITY_IDLE,
@@ -303,348 +120,120 @@ static int const gs_prio_remap[] =
303 THREAD_PRIORITY_TIME_CRITICAL 120 THREAD_PRIORITY_TIME_CRITICAL
304}; 121};
305 122
306/* MSDN: "If you would like to use the CRT in ThreadProc, use the 123// ###############################################################################################
307_beginthreadex function instead (of CreateThread)."
308MSDN: "you can create at most 2028 threads"
309*/
310// Note: Visual C++ requires '__stdcall' where it is
311void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (__stdcall *func)( void*), void* data, int prio /* -3..+3 */)
312{
313 HANDLE h = (HANDLE) _beginthreadex(nullptr, // security
314 _THREAD_STACK_SIZE,
315 func,
316 data,
317 0, // flags (0/CREATE_SUSPENDED)
318 nullptr // thread id (not used)
319 );
320 124
321 if (h == nullptr) // _beginthreadex returns 0L on failure instead of -1L (like _beginthread) 125void THREAD_SET_PRIORITY(int prio_, [[maybe_unused]] bool sudo_)
322 { 126{
323 FAIL( "CreateThread", GetLastError()); 127 // prio range [-3,+3] was checked by the caller
324 } 128 if (!SetThreadPriority(GetCurrentThread(), gs_prio_remap[prio_ + 3]))
325
326 if (prio != THREAD_PRIO_DEFAULT)
327 { 129 {
328 if (!SetThreadPriority( h, gs_prio_remap[prio + 3])) 130 FAIL("THREAD_SET_PRIORITY", GetLastError());
329 {
330 FAIL( "SetThreadPriority", GetLastError());
331 }
332 } 131 }
333
334 *ref = h;
335} 132}
336 133
134// ###############################################################################################
337 135
338void THREAD_SET_PRIORITY( int prio) 136void JTHREAD_SET_PRIORITY(std::jthread& thread_, int prio_, [[maybe_unused]] bool sudo_)
339{ 137{
340 // prio range [-3,+3] was checked by the caller 138 // prio range [-3,+3] was checked by the caller
341 if (!SetThreadPriority( GetCurrentThread(), gs_prio_remap[prio + 3])) 139 if (!SetThreadPriority(thread_.native_handle(), gs_prio_remap[prio_ + 3]))
342 { 140 {
343 FAIL( "THREAD_SET_PRIORITY", GetLastError()); 141 FAIL("JTHREAD_SET_PRIORITY", GetLastError());
344 } 142 }
345} 143}
346 144
347void THREAD_SET_AFFINITY( unsigned int aff) 145// ###############################################################################################
348{
349 if( !SetThreadAffinityMask( GetCurrentThread(), aff))
350 {
351 FAIL( "THREAD_SET_AFFINITY", GetLastError());
352 }
353}
354 146
355bool THREAD_WAIT_IMPL( THREAD_T *ref, double secs) 147void THREAD_SET_AFFINITY(unsigned int aff)
356{ 148{
357 DWORD ms = (secs<0.0) ? INFINITE : (DWORD)((secs*1000.0)+0.5); 149 if (!SetThreadAffinityMask(GetCurrentThread(), aff))
358
359 DWORD rc= WaitForSingleObject( *ref, ms /*timeout*/ );
360 //
361 // (WAIT_ABANDONED)
362 // WAIT_OBJECT_0 success (0)
363 // WAIT_TIMEOUT
364 // WAIT_FAILED more info via GetLastError()
365
366 if (rc == WAIT_TIMEOUT) return false;
367 if( rc !=0) FAIL( "WaitForSingleObject", rc==WAIT_FAILED ? GetLastError() : rc);
368 *ref = nullptr; // thread no longer usable
369 return true;
370 }
371 //
372 void THREAD_KILL( THREAD_T *ref )
373 { 150 {
374 // nonexistent on Xbox360, simply disable until a better solution is found 151 FAIL("THREAD_SET_AFFINITY", GetLastError());
375 #if !defined( PLATFORM_XBOX)
376 // in theory no-one should call this as it is very dangerous (memory and mutex leaks, no notification of DLLs, etc.)
377 if (!TerminateThread( *ref, 0 )) FAIL("TerminateThread", GetLastError());
378 #endif // PLATFORM_XBOX
379 *ref = nullptr;
380 } 152 }
153}
381 154
382 void THREAD_MAKE_ASYNCH_CANCELLABLE() {} // nothing to do for windows threads, we can cancel them anytime we want 155// ###############################################################################################
383 156
384#if !defined __GNUC__ 157#if !defined __GNUC__
385 //see http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 158//see http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
386 #define MS_VC_EXCEPTION 0x406D1388 159#define MS_VC_EXCEPTION 0x406D1388
387 #pragma pack(push,8) 160#pragma pack(push,8)
388 typedef struct tagTHREADNAME_INFO 161typedef struct tagTHREADNAME_INFO
389 { 162{
390 DWORD dwType; // Must be 0x1000. 163 DWORD dwType; // Must be 0x1000.
391 LPCSTR szName; // Pointer to name (in user addr space). 164 LPCSTR szName; // Pointer to name (in user addr space).
392 DWORD dwThreadID; // Thread ID (-1=caller thread). 165 DWORD dwThreadID; // Thread ID (-1=caller thread).
393 DWORD dwFlags; // Reserved for future use, must be zero. 166 DWORD dwFlags; // Reserved for future use, must be zero.
394 } THREADNAME_INFO; 167} THREADNAME_INFO;
395 #pragma pack(pop) 168#pragma pack(pop)
396#endif // !__GNUC__ 169#endif // !__GNUC__
397 170
398 void THREAD_SETNAME( char const* _name) 171void THREAD_SETNAME(char const* _name)
399 { 172{
400#if !defined __GNUC__ 173#if !defined __GNUC__
401 THREADNAME_INFO info; 174 THREADNAME_INFO info;
402 info.dwType = 0x1000; 175 info.dwType = 0x1000;
403 info.szName = _name; 176 info.szName = _name;
404 info.dwThreadID = GetCurrentThreadId(); 177 info.dwThreadID = GetCurrentThreadId();
405 info.dwFlags = 0; 178 info.dwFlags = 0;
406
407 __try
408 {
409 RaiseException( MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info );
410 }
411 __except(EXCEPTION_EXECUTE_HANDLER)
412 {
413 }
414#endif // !__GNUC__
415 }
416
417#if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available
418 179
419 void SIGNAL_INIT( SIGNAL_T* ref) 180 __try
420 { 181 {
421 InitializeCriticalSection( &ref->signalCS); 182 RaiseException( MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info );
422 InitializeCriticalSection( &ref->countCS);
423 if( 0 == (ref->waitEvent = CreateEvent( 0, true, false, 0))) // manual-reset
424 FAIL( "CreateEvent", GetLastError());
425 if( 0 == (ref->waitDoneEvent = CreateEvent( 0, false, false, 0))) // auto-reset
426 FAIL( "CreateEvent", GetLastError());
427 ref->waitersCount = 0;
428 } 183 }
429 184 __except(EXCEPTION_EXECUTE_HANDLER)
430 void SIGNAL_FREE( SIGNAL_T* ref)
431 { 185 {
432 CloseHandle( ref->waitDoneEvent);
433 CloseHandle( ref->waitEvent);
434 DeleteCriticalSection( &ref->countCS);
435 DeleteCriticalSection( &ref->signalCS);
436 } 186 }
187#endif // !__GNUC__
188}
437 189
438 bool SIGNAL_WAIT( SIGNAL_T* ref, MUTEX_T* mu_ref, time_d abs_secs) 190// ##################################################################################################
439 { 191// ##################################################################################################
440 DWORD errc;
441 DWORD ms;
442
443 if( abs_secs < 0.0)
444 ms = INFINITE;
445 else if( abs_secs == 0.0)
446 ms = 0;
447 else
448 {
449 time_d msd = (abs_secs - now_secs()) * 1000.0 + 0.5;
450 // If the time already passed, still try once (ms==0). A short timeout
451 // may have turned negative or 0 because of the two time samples done.
452 ms = msd <= 0.0 ? 0 : (DWORD)msd;
453 }
454
455 EnterCriticalSection( &ref->signalCS);
456 EnterCriticalSection( &ref->countCS);
457 ++ ref->waitersCount;
458 LeaveCriticalSection( &ref->countCS);
459 LeaveCriticalSection( &ref->signalCS);
460
461 errc = SignalObjectAndWait( *mu_ref, ref->waitEvent, ms, false);
462
463 EnterCriticalSection( &ref->countCS);
464 if( 0 == -- ref->waitersCount)
465 {
466 // we're the last one leaving...
467 ResetEvent( ref->waitEvent);
468 SetEvent( ref->waitDoneEvent);
469 }
470 LeaveCriticalSection( &ref->countCS);
471 MUTEX_LOCK( mu_ref);
472
473 switch( errc)
474 {
475 case WAIT_TIMEOUT:
476 return false;
477 case WAIT_OBJECT_0:
478 return true;
479 }
480
481 FAIL( "SignalObjectAndWait", GetLastError());
482 return false;
483 }
484
485 void SIGNAL_ALL( SIGNAL_T* ref)
486 {
487 DWORD errc = WAIT_OBJECT_0;
488
489 EnterCriticalSection( &ref->signalCS);
490 EnterCriticalSection( &ref->countCS);
491
492 if( ref->waitersCount > 0)
493 {
494 ResetEvent( ref->waitDoneEvent);
495 SetEvent( ref->waitEvent);
496 LeaveCriticalSection( &ref->countCS);
497 errc = WaitForSingleObject( ref->waitDoneEvent, INFINITE);
498 }
499 else
500 {
501 LeaveCriticalSection( &ref->countCS);
502 }
503
504 LeaveCriticalSection( &ref->signalCS);
505
506 if( WAIT_OBJECT_0 != errc)
507 FAIL( "WaitForSingleObject", GetLastError());
508 }
509
510#else // CONDITION_VARIABLE are available, use them
511
512 //
513 void SIGNAL_INIT( SIGNAL_T *ref )
514 {
515 InitializeConditionVariable( ref);
516 }
517
518 void SIGNAL_FREE( SIGNAL_T *ref )
519 {
520 // nothing to do
521 (void)ref;
522 }
523
524 bool SIGNAL_WAIT( SIGNAL_T *ref, MUTEX_T *mu_ref, time_d abs_secs)
525 {
526 long ms;
527
528 if( abs_secs < 0.0)
529 ms = INFINITE;
530 else if( abs_secs == 0.0)
531 ms = 0;
532 else
533 {
534 ms = (long) ((abs_secs - now_secs())*1000.0 + 0.5);
535
536 // If the time already passed, still try once (ms==0). A short timeout
537 // may have turned negative or 0 because of the two time samples done.
538 //
539 if( ms < 0)
540 ms = 0;
541 }
542
543 if( !SleepConditionVariableCS( ref, mu_ref, ms))
544 {
545 if( GetLastError() == ERROR_TIMEOUT)
546 {
547 return false;
548 }
549 else
550 {
551 FAIL( "SleepConditionVariableCS", GetLastError());
552 }
553 }
554 return true;
555 }
556
557 void SIGNAL_ONE( SIGNAL_T *ref )
558 {
559 WakeConditionVariable( ref);
560 }
561
562 void SIGNAL_ALL( SIGNAL_T *ref )
563 {
564 WakeAllConditionVariable( ref);
565 }
566
567#endif // CONDITION_VARIABLE are available
568
569#else // THREADAPI == THREADAPI_PTHREAD 192#else // THREADAPI == THREADAPI_PTHREAD
570 // PThread (Linux, OS X, ...) 193// ##################################################################################################
571 // 194// ##################################################################################################
572 // On OS X, user processes seem to be able to change priorities.
573 // On Linux, SCHED_RR and su privileges are required.. !-(
574 //
575 #include <errno.h>
576 #include <sched.h>
577 195
578# if (defined(__MINGW32__) || defined(__MINGW64__)) && defined pthread_attr_setschedpolicy 196// PThread (Linux, OS X, ...)
579# if pthread_attr_setschedpolicy( A, S) == ENOTSUP 197//
580 // from the mingw-w64 team: 198// On OS X, user processes seem to be able to change priorities.
581 // Well, we support pthread_setschedparam by which you can specify 199// On Linux, SCHED_RR and su privileges are required.. !-(
582 // threading-policy. Nevertheless, yes we lack this function. In 200//
583 // general its implementation is pretty much trivial, as on Win32 target 201#include <errno.h>
584 // just SCHED_OTHER can be supported. 202#include <sched.h>
585 #undef pthread_attr_setschedpolicy 203
586 static int pthread_attr_setschedpolicy( pthread_attr_t* attr, int policy) 204#if (defined(__MINGW32__) || defined(__MINGW64__)) && defined pthread_attr_setschedpolicy
205#if pthread_attr_setschedpolicy(A, S) == ENOTSUP
206// from the mingw-w64 team:
207// Well, we support pthread_setschedparam by which you can specify
208// threading-policy. Nevertheless, yes we lack this function. In
209// general its implementation is pretty much trivial, as on Win32 target
210// just SCHED_OTHER can be supported.
211#undef pthread_attr_setschedpolicy
212[[nodiscard]] static int pthread_attr_setschedpolicy(pthread_attr_t* attr, int policy)
213{
214 if (policy != SCHED_OTHER)
587 { 215 {
588 if( policy != SCHED_OTHER) 216 return ENOTSUP;
589 {
590 return ENOTSUP;
591 }
592 return 0;
593 } 217 }
594# endif // pthread_attr_setschedpolicy() 218 return 0;
595# endif // defined(__MINGW32__) || defined(__MINGW64__) 219}
220#endif // pthread_attr_setschedpolicy()
221#endif // defined(__MINGW32__) || defined(__MINGW64__)
596 222
597 static void _PT_FAIL( int rc, const char *name, const char *file, int line ) { 223static void _PT_FAIL( int rc, const char *name, const char *file, int line )
224{
598 const char *why= (rc==EINVAL) ? "EINVAL" : 225 const char *why= (rc==EINVAL) ? "EINVAL" :
599 (rc==EBUSY) ? "EBUSY" : 226 (rc==EBUSY) ? "EBUSY" :
600 (rc==EPERM) ? "EPERM" : 227 (rc==EPERM) ? "EPERM" :
601 (rc==ENOMEM) ? "ENOMEM" : 228 (rc==ENOMEM) ? "ENOMEM" :
602 (rc==ESRCH) ? "ESRCH" : 229 (rc==ESRCH) ? "ESRCH" :
603 (rc==ENOTSUP) ? "ENOTSUP": 230 (rc==ENOTSUP) ? "ENOTSUP":
604 //... 231 //...
605 "<UNKNOWN>"; 232 "<UNKNOWN>";
606 fprintf( stderr, "%s %d: %s failed, %d %s\n", file, line, name, rc, why ); 233 fprintf( stderr, "%s %d: %s failed, %d %s\n", file, line, name, rc, why );
607 abort(); 234 abort();
608 } 235}
609 #define PT_CALL( call ) { int rc= call; if (rc!=0) _PT_FAIL( rc, #call, __FILE__, __LINE__ ); } 236#define PT_CALL( call ) { int rc= call; if (rc!=0) _PT_FAIL( rc, #call, __FILE__, __LINE__ ); }
610 //
611 void SIGNAL_INIT( SIGNAL_T *ref ) {
612 PT_CALL(pthread_cond_init(ref, nullptr /*attr*/));
613 }
614 void SIGNAL_FREE( SIGNAL_T *ref ) {
615 PT_CALL( pthread_cond_destroy(ref) );
616 }
617 //
618 /*
619 * Timeout is given as absolute since we may have fake wakeups during
620 * a timed out sleep. A Linda with some other key read, or just because
621 * PThread cond vars can wake up unwantedly.
622 */
623 bool SIGNAL_WAIT( SIGNAL_T *ref, pthread_mutex_t *mu, time_d abs_secs ) {
624 if (abs_secs<0.0) {
625 PT_CALL( pthread_cond_wait( ref, mu ) ); // infinite
626 } else {
627 int rc;
628 struct timespec ts;
629
630 assert( abs_secs != 0.0 );
631 prepare_timeout( &ts, abs_secs );
632
633 rc= pthread_cond_timedwait( ref, mu, &ts );
634
635 if (rc==ETIMEDOUT) return false;
636 if (rc) { _PT_FAIL( rc, "pthread_cond_timedwait()", __FILE__, __LINE__ ); }
637 }
638 return true;
639 }
640 //
641 void SIGNAL_ONE( SIGNAL_T *ref ) {
642 PT_CALL( pthread_cond_signal(ref) ); // wake up ONE (or no) waiting thread
643 }
644 //
645 void SIGNAL_ALL( SIGNAL_T *ref ) {
646 PT_CALL( pthread_cond_broadcast(ref) ); // wake up ALL waiting threads
647 }
648 237
649// array of 7 thread priority values, hand-tuned by platform so that we offer a uniform [-3,+3] public priority range 238// array of 7 thread priority values, hand-tuned by platform so that we offer a uniform [-3,+3] public priority range
650static int const gs_prio_remap[] = 239static int const gs_prio_remap[] =
@@ -747,14 +336,6 @@ static int const gs_prio_remap[] =
747 // 336 //
748 // TBD: Find right values for Cygwin 337 // TBD: Find right values for Cygwin
749 // 338 //
750# elif defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)
751 // any other value not supported by win32-pthread as of version 2.9.1
752# define _PRIO_MODE SCHED_OTHER
753
754 // PTHREAD_SCOPE_PROCESS not supported by win32-pthread as of version 2.9.1
755 //#define _PRIO_SCOPE PTHREAD_SCOPE_SYSTEM // but do we need this at all to start with?
756 THREAD_PRIORITY_IDLE, THREAD_PRIORITY_LOWEST, THREAD_PRIORITY_BELOW_NORMAL, THREAD_PRIORITY_NORMAL, THREAD_PRIORITY_ABOVE_NORMAL, THREAD_PRIORITY_HIGHEST, THREAD_PRIORITY_TIME_CRITICAL
757
758# else 339# else
759# error "Unknown OS: not implemented!" 340# error "Unknown OS: not implemented!"
760# endif 341# endif
@@ -767,7 +348,7 @@ static int const gs_prio_remap[] =
767#endif // _PRIO_0 348#endif // _PRIO_0
768}; 349};
769 350
770static int select_prio(int prio /* -3..+3 */) 351[[nodiscard]] static int select_prio(int prio /* -3..+3 */)
771{ 352{
772 if (prio == THREAD_PRIO_DEFAULT) 353 if (prio == THREAD_PRIO_DEFAULT)
773 prio = 0; 354 prio = 0;
@@ -775,267 +356,93 @@ static int select_prio(int prio /* -3..+3 */)
775 return gs_prio_remap[prio + 3]; 356 return gs_prio_remap[prio + 3];
776} 357}
777 358
778void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (*func)( void*), void* data, int prio /* -3..+3 */) 359void THREAD_SET_PRIORITY(int prio_, [[maybe_unused]] bool sudo_)
779{ 360{
780 pthread_attr_t a;
781 bool const change_priority =
782#ifdef PLATFORM_LINUX 361#ifdef PLATFORM_LINUX
783 sudo && // only root-privileged process can change priorities 362 if (!sudo_) // only root-privileged process can change priorities
784#endif 363 return;
785 (prio != THREAD_PRIO_DEFAULT); 364#endif // PLATFORM_LINUX
786
787 PT_CALL( pthread_attr_init( &a));
788
789#ifndef PTHREAD_TIMEDJOIN
790 // We create a NON-JOINABLE thread. This is mainly due to the lack of
791 // 'pthread_timedjoin()', but does offer other benefits (s.a. earlier
792 // freeing of the thread's resources).
793 //
794 PT_CALL( pthread_attr_setdetachstate( &a, PTHREAD_CREATE_DETACHED));
795#endif // PTHREAD_TIMEDJOIN
796
797 // Use this to find a system's default stack size (DEBUG)
798#if 0
799 {
800 size_t n;
801 pthread_attr_getstacksize( &a, &n);
802 fprintf( stderr, "Getstack: %u\n", (unsigned int)n);
803 }
804 // 524288 on OS X
805 // 2097152 on Linux x86 (Ubuntu 7.04)
806 // 1048576 on FreeBSD 6.2 SMP i386
807#endif // 0
808
809#if defined _THREAD_STACK_SIZE && _THREAD_STACK_SIZE > 0
810 PT_CALL( pthread_attr_setstacksize( &a, _THREAD_STACK_SIZE));
811#endif
812
813 if (change_priority)
814 {
815 struct sched_param sp;
816 // "The specified scheduling parameters are only used if the scheduling
817 // parameter inheritance attribute is PTHREAD_EXPLICIT_SCHED."
818 //
819#if !defined __ANDROID__ || ( defined __ANDROID__ && __ANDROID_API__ >= 28 )
820 PT_CALL( pthread_attr_setinheritsched( &a, PTHREAD_EXPLICIT_SCHED));
821#endif
822
823#ifdef _PRIO_SCOPE
824 PT_CALL( pthread_attr_setscope( &a, _PRIO_SCOPE));
825#endif // _PRIO_SCOPE
826
827 PT_CALL( pthread_attr_setschedpolicy( &a, _PRIO_MODE));
828
829 sp.sched_priority = select_prio(prio);
830 PT_CALL( pthread_attr_setschedparam( &a, &sp));
831 }
832
833 //---
834 // Seems on OS X, _POSIX_THREAD_THREADS_MAX is some kind of system
835 // thread limit (not userland thread). Actual limit for us is way higher.
836 // PTHREAD_THREADS_MAX is not defined (even though man page refers to it!)
837 //
838# ifndef THREAD_CREATE_RETRIES_MAX
839 // Don't bother with retries; a failure is a failure
840 //
841 {
842 int rc = pthread_create( ref, &a, func, data);
843 if( rc) _PT_FAIL( rc, "pthread_create()", __FILE__, __LINE__ - 1);
844 }
845# else
846# error "This code deprecated"
847 /*
848 // Wait slightly if thread creation has exchausted the system
849 //
850 { int retries;
851 for( retries=0; retries<THREAD_CREATE_RETRIES_MAX; retries++ ) {
852
853 int rc= pthread_create( ref, &a, func, data );
854 //
855 // OS X / Linux:
856 // EAGAIN: ".. lacked the necessary resources to create
857 // another thread, or the system-imposed limit on the
858 // total number of threads in a process
859 // [PTHREAD_THREADS_MAX] would be exceeded."
860 // EINVAL: attr is invalid
861 // Linux:
862 // EPERM: no rights for given parameters or scheduling (no sudo)
863 // ENOMEM: (known to fail with this code, too - not listed in man)
864
865 if (rc==0) break; // ok!
866
867 // In practise, exhaustion seems to be coming from memory, not a
868 // maximum number of threads. Keep tuning... ;)
869 //
870 if (rc==EAGAIN) {
871 //fprintf( stderr, "Looping (retries=%d) ", retries ); // DEBUG
872
873 // Try again, later.
874
875 Yield();
876 } else {
877 _PT_FAIL( rc, "pthread_create()", __FILE__, __LINE__ );
878 }
879 }
880 }
881 */
882# endif
883 365
884 PT_CALL( pthread_attr_destroy( &a)); 366 struct sched_param sp;
367 // prio range [-3,+3] was checked by the caller
368 sp.sched_priority = gs_prio_remap[prio_ + 3];
369 PT_CALL(pthread_setschedparam(pthread_self(), _PRIO_MODE, &sp));
885} 370}
886 371
372// #################################################################################################
887 373
888void THREAD_SET_PRIORITY( int prio) 374void JTHREAD_SET_PRIORITY(std::jthread& thread_, int prio_, [[maybe_unused]] bool sudo_)
889{ 375{
890#ifdef PLATFORM_LINUX 376#ifdef PLATFORM_LINUX
891 if( sudo) // only root-privileged process can change priorities 377 if (!sudo_) // only root-privileged process can change priorities
378 return;
892#endif // PLATFORM_LINUX 379#endif // PLATFORM_LINUX
893 { 380
894 struct sched_param sp; 381 struct sched_param sp;
895 // prio range [-3,+3] was checked by the caller 382 // prio range [-3,+3] was checked by the caller
896 sp.sched_priority = gs_prio_remap[ prio + 3]; 383 sp.sched_priority = gs_prio_remap[prio_ + 3];
897 PT_CALL( pthread_setschedparam( pthread_self(), _PRIO_MODE, &sp)); 384 PT_CALL(pthread_setschedparam(static_cast<pthread_t>(thread_.native_handle()), _PRIO_MODE, &sp));
898 }
899} 385}
900 386
901void THREAD_SET_AFFINITY( unsigned int aff) 387// #################################################################################################
388
389void THREAD_SET_AFFINITY(unsigned int aff)
902{ 390{
903 int bit = 0; 391 int bit = 0;
904#ifdef __NetBSD__ 392#ifdef __NetBSD__
905 cpuset_t *cpuset = cpuset_create(); 393 cpuset_t* cpuset = cpuset_create();
906 if (cpuset == nullptr) 394 if (cpuset == nullptr)
907 _PT_FAIL( errno, "cpuset_create", __FILE__, __LINE__-2 ); 395 _PT_FAIL(errno, "cpuset_create", __FILE__, __LINE__ - 2);
908#define CPU_SET(b, s) cpuset_set(b, *(s)) 396#define CPU_SET(b, s) cpuset_set(b, *(s))
909#else 397#else
910 cpu_set_t cpuset; 398 cpu_set_t cpuset;
911 CPU_ZERO( &cpuset); 399 CPU_ZERO(&cpuset);
912#endif 400#endif
913 while( aff != 0) 401 while (aff != 0)
914 { 402 {
915 if( aff & 1) 403 if (aff & 1)
916 { 404 {
917 CPU_SET( bit, &cpuset); 405 CPU_SET(bit, &cpuset);
918 } 406 }
919 ++ bit; 407 ++bit;
920 aff >>= 1; 408 aff >>= 1;
921 } 409 }
922#ifdef __ANDROID__ 410#ifdef __ANDROID__
923 PT_CALL( sched_setaffinity( pthread_self(), sizeof(cpu_set_t), &cpuset)); 411 PT_CALL(sched_setaffinity(pthread_self(), sizeof(cpu_set_t), &cpuset));
924#elif defined(__NetBSD__) 412#elif defined(__NetBSD__)
925 PT_CALL( pthread_setaffinity_np( pthread_self(), cpuset_size(cpuset), cpuset)); 413 PT_CALL(pthread_setaffinity_np(pthread_self(), cpuset_size(cpuset), cpuset));
926 cpuset_destroy( cpuset); 414 cpuset_destroy(cpuset);
927#else 415#else
928 PT_CALL( pthread_setaffinity_np( pthread_self(), sizeof(cpu_set_t), &cpuset)); 416 PT_CALL(pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset));
929#endif 417#endif
930} 418}
931 419
932 /* 420// #################################################################################################
933 * Wait for a thread to finish.
934 *
935 * 'mu_ref' is a lock we should use for the waiting; initially unlocked.
936 * Same lock as passed to THREAD_EXIT.
937 *
938 * Returns true for successful wait, false for timed out
939 */
940bool THREAD_WAIT( THREAD_T *ref, double secs , SIGNAL_T *signal_ref, MUTEX_T *mu_ref, volatile enum e_status *st_ref)
941{
942 struct timespec ts_store;
943 const struct timespec* timeout = nullptr;
944 bool done;
945
946 // Do timeout counting before the locks
947 //
948#if THREADWAIT_METHOD == THREADWAIT_TIMEOUT
949 if (secs>=0.0)
950#else // THREADWAIT_METHOD == THREADWAIT_CONDVAR
951 if (secs>0.0)
952#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
953 {
954 prepare_timeout( &ts_store, now_secs()+secs );
955 timeout= &ts_store;
956 }
957 421
958#if THREADWAIT_METHOD == THREADWAIT_TIMEOUT 422void THREAD_SETNAME(char const* _name)
959 /* Thread is joinable 423{
960 */ 424 // exact API to set the thread name is platform-dependant
961 if (!timeout) { 425 // if you need to fix the build, or if you know how to fill a hole, tell me (bnt.germain@gmail.com) so that I can submit the fix in github.
962 PT_CALL(pthread_join(*ref, nullptr /*ignore exit value*/));
963 done = true;
964 } else {
965 int rc = PTHREAD_TIMEDJOIN(*ref, nullptr, timeout);
966 if ((rc!=0) && (rc!=ETIMEDOUT)) {
967 _PT_FAIL( rc, "PTHREAD_TIMEDJOIN", __FILE__, __LINE__-2 );
968 }
969 done= rc==0;
970 }
971#else // THREADWAIT_METHOD == THREADWAIT_CONDVAR
972 /* Since we've set the thread up as PTHREAD_CREATE_DETACHED, we cannot
973 * join with it. Use the cond.var.
974 */
975 (void) ref; // unused
976 MUTEX_LOCK( mu_ref );
977
978 // 'secs'==0.0 does not need to wait, just take the current status
979 // within the 'mu_ref' locks
980 //
981 if (secs != 0.0) {
982 while( *st_ref < DONE ) {
983 if (!timeout) {
984 PT_CALL( pthread_cond_wait( signal_ref, mu_ref ));
985 } else {
986 int rc= pthread_cond_timedwait( signal_ref, mu_ref, timeout );
987 if (rc==ETIMEDOUT) break;
988 if (rc!=0) _PT_FAIL( rc, "pthread_cond_timedwait", __FILE__, __LINE__-2 );
989 }
990 }
991 }
992 done= *st_ref >= DONE; // DONE|ERROR_ST|CANCELLED
993
994 MUTEX_UNLOCK( mu_ref );
995#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
996 return done;
997 }
998 //
999 void THREAD_KILL( THREAD_T *ref ) {
1000#ifdef __ANDROID__
1001 __android_log_print(ANDROID_LOG_WARN, LOG_TAG, "Cannot kill thread!");
1002#else
1003 pthread_cancel( *ref );
1004#endif
1005 }
1006
1007 void THREAD_MAKE_ASYNCH_CANCELLABLE()
1008 {
1009#ifdef __ANDROID__
1010 __android_log_print(ANDROID_LOG_WARN, LOG_TAG, "Cannot make thread async cancellable!");
1011#else
1012 // that's the default, but just in case...
1013 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, nullptr);
1014 // we want cancellation to take effect immediately if possible, instead of waiting for a cancellation point (which is the default)
1015 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, nullptr);
1016#endif
1017 }
1018
1019 void THREAD_SETNAME( char const* _name)
1020 {
1021 // exact API to set the thread name is platform-dependant
1022 // if you need to fix the build, or if you know how to fill a hole, tell me (bnt.germain@gmail.com) so that I can submit the fix in github.
1023#if defined PLATFORM_BSD && !defined __NetBSD__ 426#if defined PLATFORM_BSD && !defined __NetBSD__
1024 pthread_set_name_np( pthread_self(), _name); 427 pthread_set_name_np(pthread_self(), _name);
1025#elif defined PLATFORM_BSD && defined __NetBSD__ 428#elif defined PLATFORM_BSD && defined __NetBSD__
1026 pthread_setname_np( pthread_self(), "%s", (void *)_name); 429 pthread_setname_np(pthread_self(), "%s", (void*) _name);
1027#elif defined PLATFORM_LINUX 430#elif defined PLATFORM_LINUX
1028 #if LINUX_USE_PTHREAD_SETNAME_NP 431#if LINUX_USE_PTHREAD_SETNAME_NP
1029 pthread_setname_np( pthread_self(), _name); 432 pthread_setname_np(pthread_self(), _name);
1030 #else // LINUX_USE_PTHREAD_SETNAME_NP 433#else // LINUX_USE_PTHREAD_SETNAME_NP
1031 prctl(PR_SET_NAME, _name, 0, 0, 0); 434 prctl(PR_SET_NAME, _name, 0, 0, 0);
1032 #endif // LINUX_USE_PTHREAD_SETNAME_NP 435#endif // LINUX_USE_PTHREAD_SETNAME_NP
1033#elif defined PLATFORM_QNX || defined PLATFORM_CYGWIN 436#elif defined PLATFORM_QNX || defined PLATFORM_CYGWIN
1034 pthread_setname_np( pthread_self(), _name); 437 pthread_setname_np(pthread_self(), _name);
1035#elif defined PLATFORM_OSX 438#elif defined PLATFORM_OSX
1036 pthread_setname_np(_name); 439 pthread_setname_np(_name);
1037#elif defined PLATFORM_WIN32 || defined PLATFORM_POCKETPC 440#else
1038 PT_CALL( pthread_setname_np( pthread_self(), _name)); 441 fprintf(stderr, "THREAD_SETNAME: unsupported platform\n");
442 abort();
1039#endif 443#endif
1040 } 444}
445
1041#endif // THREADAPI == THREADAPI_PTHREAD 446#endif // THREADAPI == THREADAPI_PTHREAD
447// #################################################################################################
448// #################################################################################################
diff --git a/src/threading.h b/src/threading.h
index 38a021f..fc35730 100644
--- a/src/threading.h
+++ b/src/threading.h
@@ -1,25 +1,13 @@
1#pragma once 1#pragma once
2 2
3/*
4 * win32-pthread:
5 * define HAVE_WIN32_PTHREAD and PTW32_INCLUDE_WINDOWS_H in your project configuration when building for win32-pthread.
6 * link against pthreadVC2.lib, and of course have pthreadVC2.dll somewhere in your path.
7 */
8#include "platform.h" 3#include "platform.h"
9 4
10#include <time.h> 5#include <thread>
11
12/* Note: ERROR is a defined entity on Win32
13 PENDING: The Lua VM hasn't done anything yet.
14 RUNNING, WAITING: Thread is inside the Lua VM. If the thread is forcefully stopped, we can't lua_close() the Lua State.
15 DONE, ERROR_ST, CANCELLED: Thread execution is outside the Lua VM. It can be lua_close()d.
16*/
17enum e_status { PENDING, RUNNING, WAITING, DONE, ERROR_ST, CANCELLED };
18 6
19#define THREADAPI_WINDOWS 1 7#define THREADAPI_WINDOWS 1
20#define THREADAPI_PTHREAD 2 8#define THREADAPI_PTHREAD 2
21 9
22#if( defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)) && !defined( HAVE_WIN32_PTHREAD) 10#if( defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC))
23//#pragma message ( "THREADAPI_WINDOWS" ) 11//#pragma message ( "THREADAPI_WINDOWS" )
24#define THREADAPI THREADAPI_WINDOWS 12#define THREADAPI THREADAPI_WINDOWS
25#else // (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) 13#else // (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC)
@@ -27,22 +15,24 @@ enum e_status { PENDING, RUNNING, WAITING, DONE, ERROR_ST, CANCELLED };
27#define THREADAPI THREADAPI_PTHREAD 15#define THREADAPI THREADAPI_PTHREAD
28#endif // (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) 16#endif // (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC)
29 17
30/*---=== Locks & Signals ===--- 18static constexpr int THREAD_PRIO_DEFAULT{ -999 };
31*/
32 19
20// ##################################################################################################
21// ##################################################################################################
33#if THREADAPI == THREADAPI_WINDOWS 22#if THREADAPI == THREADAPI_WINDOWS
34 #if defined( PLATFORM_XBOX) 23
35 #include <xtl.h> 24#if defined(PLATFORM_XBOX)
36 #else // !PLATFORM_XBOX 25#include <xtl.h>
37 #define WIN32_LEAN_AND_MEAN 26#else // !PLATFORM_XBOX
38 // CONDITION_VARIABLE needs version 0x0600+ 27#define WIN32_LEAN_AND_MEAN
39 // _WIN32_WINNT value is already defined by MinGW, but not by MSVC 28// CONDITION_VARIABLE needs version 0x0600+
40 #ifndef _WIN32_WINNT 29// _WIN32_WINNT value is already defined by MinGW, but not by MSVC
41 #define _WIN32_WINNT 0x0600 30#ifndef _WIN32_WINNT
42 #endif // _WIN32_WINNT 31#define _WIN32_WINNT 0x0600
43 #include <windows.h> 32#endif // _WIN32_WINNT
44 #endif // !PLATFORM_XBOX 33#include <windows.h>
45 #include <process.h> 34#endif // !PLATFORM_XBOX
35#include <process.h>
46 36
47/* 37/*
48#define XSTR(x) STR(x) 38#define XSTR(x) STR(x)
@@ -50,202 +40,36 @@ enum e_status { PENDING, RUNNING, WAITING, DONE, ERROR_ST, CANCELLED };
50#pragma message( "The value of _WIN32_WINNT: " XSTR(_WIN32_WINNT)) 40#pragma message( "The value of _WIN32_WINNT: " XSTR(_WIN32_WINNT))
51*/ 41*/
52 42
53 // MSDN: http://msdn2.microsoft.com/en-us/library/ms684254.aspx 43static constexpr int THREAD_PRIO_MIN{ -3 };
54 // 44static constexpr int THREAD_PRIO_MAX{ +3 };
55 // CRITICAL_SECTION can be used for simple code protection. Mutexes are
56 // needed for use with the SIGNAL system.
57 //
58
59 #if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available, use a signal
60
61 struct SIGNAL_T
62 {
63 CRITICAL_SECTION signalCS;
64 CRITICAL_SECTION countCS;
65 HANDLE waitEvent;
66 HANDLE waitDoneEvent;
67 LONG waitersCount;
68 };
69
70
71 #define MUTEX_T HANDLE
72 void MUTEX_INIT( MUTEX_T* ref);
73 void MUTEX_FREE( MUTEX_T* ref);
74 void MUTEX_LOCK( MUTEX_T* ref);
75 void MUTEX_UNLOCK( MUTEX_T* ref);
76
77 #else // CONDITION_VARIABLE are available, use them
78
79 #define SIGNAL_T CONDITION_VARIABLE
80 #define MUTEX_T CRITICAL_SECTION
81 #define MUTEX_INIT( ref) InitializeCriticalSection( ref)
82 #define MUTEX_FREE( ref) DeleteCriticalSection( ref)
83 #define MUTEX_LOCK( ref) EnterCriticalSection( ref)
84 #define MUTEX_UNLOCK( ref) LeaveCriticalSection( ref)
85
86 #endif // CONDITION_VARIABLE are available
87
88 #define MUTEX_RECURSIVE_INIT(ref) MUTEX_INIT(ref) /* always recursive in Win32 */
89
90 using THREAD_RETURN_T = unsigned int;
91 45
92 #define YIELD() Sleep(0) 46// ##################################################################################################
93 #define THREAD_CALLCONV __stdcall 47// ##################################################################################################
94#else // THREADAPI == THREADAPI_PTHREAD 48#else // THREADAPI == THREADAPI_PTHREAD
95 // PThread (Linux, OS X, ...) 49// ##################################################################################################
96 50// ##################################################################################################
97 // looks like some MinGW installations don't support PTW32_INCLUDE_WINDOWS_H, so let's include it ourselves, just in case
98 #if defined(PLATFORM_WIN32)
99 #include <windows.h>
100 #endif // PLATFORM_WIN32
101 #include <pthread.h>
102
103 #ifdef PLATFORM_LINUX
104 #if defined(__GLIBC__)
105 # define _MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
106 #else
107 # define _MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE
108 #endif
109 #else
110 /* OS X, ... */
111 # define _MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE
112 #endif
113
114 #define MUTEX_T pthread_mutex_t
115 #define MUTEX_INIT(ref) pthread_mutex_init(ref, nullptr)
116 #define MUTEX_RECURSIVE_INIT(ref) \
117 { pthread_mutexattr_t a; pthread_mutexattr_init( &a ); \
118 pthread_mutexattr_settype( &a, _MUTEX_RECURSIVE ); \
119 pthread_mutex_init(ref,&a); pthread_mutexattr_destroy( &a ); \
120 }
121 #define MUTEX_FREE(ref) pthread_mutex_destroy(ref)
122 #define MUTEX_LOCK(ref) pthread_mutex_lock(ref)
123 #define MUTEX_UNLOCK(ref) pthread_mutex_unlock(ref)
124
125 using THREAD_RETURN_T = void *;
126
127 using SIGNAL_T = pthread_cond_t;
128
129 void SIGNAL_ONE( SIGNAL_T *ref );
130
131 // Yield is non-portable:
132 //
133 // OS X 10.4.8/9 has pthread_yield_np()
134 // Linux 2.4 has pthread_yield() if _GNU_SOURCE is #defined
135 // FreeBSD 6.2 has pthread_yield()
136 // ...
137 //
138 #if defined( PLATFORM_OSX)
139 #define YIELD() pthread_yield_np()
140 #else
141 #define YIELD() sched_yield()
142 #endif
143 #define THREAD_CALLCONV
144#endif //THREADAPI == THREADAPI_PTHREAD
145
146void SIGNAL_INIT( SIGNAL_T *ref );
147void SIGNAL_FREE( SIGNAL_T *ref );
148void SIGNAL_ALL( SIGNAL_T *ref );
149 51
150/* 52// PThread (Linux, OS X, ...)
151* 'time_d': <0.0 for no timeout
152* 0.0 for instant check
153* >0.0 absolute timeout in secs + ms
154*/
155using time_d = double;
156time_d now_secs(void);
157
158time_d SIGNAL_TIMEOUT_PREPARE( double rel_secs );
159
160bool SIGNAL_WAIT( SIGNAL_T *ref, MUTEX_T *mu, time_d timeout );
161
162
163/*---=== Threading ===---
164*/
165 53
166#define THREAD_PRIO_DEFAULT (-999) 54// looks like some MinGW installations don't support PTW32_INCLUDE_WINDOWS_H, so let's include it ourselves, just in case
55#if defined(PLATFORM_WIN32)
56#include <windows.h>
57#endif // PLATFORM_WIN32
58#include <pthread.h>
167 59
168#if THREADAPI == THREADAPI_WINDOWS 60#if defined(PLATFORM_LINUX) && !defined(LINUX_SCHED_RR)
169 61static constexpr int THREAD_PRIO_MIN{ 0 };
170 using THREAD_T = HANDLE; 62#else
171# define THREAD_ISNULL( _h) (_h == 0) 63static constexpr int THREAD_PRIO_MIN{ -3 };
172 void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (__stdcall *func)( void*), void* data, int prio /* -3..+3 */); 64#endif
65static constexpr int THREAD_PRIO_MAX{ +3 };
173 66
174# define THREAD_PRIO_MIN (-3) 67#endif // THREADAPI == THREADAPI_PTHREAD
175# define THREAD_PRIO_MAX (+3) 68// ##################################################################################################
69// ##################################################################################################
176 70
177# define THREAD_CLEANUP_PUSH( cb_, val_) 71void THREAD_SETNAME(char const* _name);
178# define THREAD_CLEANUP_POP( execute_) 72void THREAD_SET_PRIORITY(int prio_, bool sudo_);
179 73void THREAD_SET_AFFINITY(unsigned int aff);
180#else // THREADAPI == THREADAPI_PTHREAD
181 74
182 /* Platforms that have a timed 'pthread_join()' can get away with a simpler 75void JTHREAD_SET_PRIORITY(std::jthread& thread_, int prio_, bool sudo_);
183 * implementation. Others will use a condition variable.
184 */
185# if defined __WINPTHREADS_VERSION
186//# define USE_PTHREAD_TIMEDJOIN
187# endif // __WINPTHREADS_VERSION
188
189# ifdef USE_PTHREAD_TIMEDJOIN
190# ifdef PLATFORM_OSX
191# error "No 'pthread_timedjoin()' on this system"
192# else
193 /* Linux, ... */
194# define PTHREAD_TIMEDJOIN pthread_timedjoin_np
195# endif
196# endif
197
198 using THREAD_T = pthread_t;
199# define THREAD_ISNULL( _h) 0 // pthread_t may be a structure: never 'null' by itself
200
201 void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (*func)( void*), void* data, int prio /* -3..+3 */);
202
203# if defined(PLATFORM_LINUX)
204 extern volatile bool sudo;
205# ifdef LINUX_SCHED_RR
206# define THREAD_PRIO_MIN (sudo ? -3 : 0)
207# else
208# define THREAD_PRIO_MIN (0)
209# endif
210# define THREAD_PRIO_MAX (sudo ? +3 : 0)
211# else
212# define THREAD_PRIO_MIN (-3)
213# define THREAD_PRIO_MAX (+3)
214# endif
215
216# if THREADWAIT_METHOD == THREADWAIT_CONDVAR
217# define THREAD_CLEANUP_PUSH( cb_, val_) pthread_cleanup_push( cb_, val_)
218# define THREAD_CLEANUP_POP( execute_) pthread_cleanup_pop( execute_)
219# else
220# define THREAD_CLEANUP_PUSH( cb_, val_) {
221# define THREAD_CLEANUP_POP( execute_) }
222# endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
223#endif // THREADAPI == THREADAPI_WINDOWS
224
225/*
226* Win32 and PTHREAD_TIMEDJOIN allow waiting for a thread with a timeout.
227* Posix without PTHREAD_TIMEDJOIN needs to use a condition variable approach.
228*/
229#define THREADWAIT_TIMEOUT 1
230#define THREADWAIT_CONDVAR 2
231
232#if THREADAPI == THREADAPI_WINDOWS || (defined PTHREAD_TIMEDJOIN)
233#define THREADWAIT_METHOD THREADWAIT_TIMEOUT
234#else // THREADAPI == THREADAPI_WINDOWS || (defined PTHREAD_TIMEDJOIN)
235#define THREADWAIT_METHOD THREADWAIT_CONDVAR
236#endif // THREADAPI == THREADAPI_WINDOWS || (defined PTHREAD_TIMEDJOIN)
237
238
239#if THREADWAIT_METHOD == THREADWAIT_TIMEOUT
240bool THREAD_WAIT_IMPL( THREAD_T *ref, double secs);
241#define THREAD_WAIT( a, b, c, d, e) THREAD_WAIT_IMPL( a, b)
242#else // THREADWAIT_METHOD == THREADWAIT_CONDVAR
243bool THREAD_WAIT_IMPL( THREAD_T *ref, double secs, SIGNAL_T *signal_ref, MUTEX_T *mu_ref, volatile enum e_status *st_ref);
244#define THREAD_WAIT THREAD_WAIT_IMPL
245#endif // // THREADWAIT_METHOD == THREADWAIT_CONDVAR
246
247void THREAD_KILL( THREAD_T* ref);
248void THREAD_SETNAME( char const* _name);
249void THREAD_MAKE_ASYNCH_CANCELLABLE();
250void THREAD_SET_PRIORITY( int prio);
251void THREAD_SET_AFFINITY( unsigned int aff);
diff --git a/src/threading_osx.h b/src/threading_osx.h
index b47d2f6..f4d41e0 100644
--- a/src/threading_osx.h
+++ b/src/threading_osx.h
@@ -2,8 +2,7 @@
2 * THREADING_OSX.H 2 * THREADING_OSX.H
3 * http://yyshen.github.io/2015/01/18/binding_threads_to_cores_osx.html 3 * http://yyshen.github.io/2015/01/18/binding_threads_to_cores_osx.html
4 */ 4 */
5#ifndef __threading_osx_h__ 5#pragma once
6#define __threading_osx_h__ 1
7 6
8#include <mach/mach_types.h> 7#include <mach/mach_types.h>
9#include <mach/thread_act.h> 8#include <mach/thread_act.h>
@@ -18,9 +17,9 @@ struct cpu_set_t
18 17
19static inline void CPU_ZERO(cpu_set_t *cs) { cs->count = 0; } 18static inline void CPU_ZERO(cpu_set_t *cs) { cs->count = 0; }
20static inline void CPU_SET(int num, cpu_set_t *cs) { cs->count |= (1 << num); } 19static inline void CPU_SET(int num, cpu_set_t *cs) { cs->count |= (1 << num); }
21static inline int CPU_ISSET(int num, cpu_set_t *cs) { return (cs->count & (1 << num)); } 20[[nodiscard]] static inline int CPU_ISSET(int num, cpu_set_t *cs) { return (cs->count & (1 << num)); }
22 21
23int sched_getaffinity(pid_t pid, size_t cpu_size, cpu_set_t *cpu_set) 22[[nodiscard]] int sched_getaffinity(pid_t pid, size_t cpu_size, cpu_set_t *cpu_set)
24{ 23{
25 int32_t core_count = 0; 24 int32_t core_count = 0;
26 size_t len = sizeof(core_count); 25 size_t len = sizeof(core_count);
@@ -39,7 +38,7 @@ int sched_getaffinity(pid_t pid, size_t cpu_size, cpu_set_t *cpu_set)
39 return 0; 38 return 0;
40} 39}
41 40
42int pthread_setaffinity_np(pthread_t thread, size_t cpu_size, cpu_set_t *cpu_set) 41[[nodiscard]] int pthread_setaffinity_np(pthread_t thread, size_t cpu_size, cpu_set_t *cpu_set)
43{ 42{
44 thread_port_t mach_thread; 43 thread_port_t mach_thread;
45 int core = 0; 44 int core = 0;
@@ -57,4 +56,3 @@ int pthread_setaffinity_np(pthread_t thread, size_t cpu_size, cpu_set_t *cpu_set
57 return 0; 56 return 0;
58} 57}
59 58
60#endif
diff --git a/src/tools.cpp b/src/tools.cpp
index df7602e..a0a3018 100644
--- a/src/tools.cpp
+++ b/src/tools.cpp
@@ -31,24 +31,12 @@ THE SOFTWARE.
31=============================================================================== 31===============================================================================
32*/ 32*/
33 33
34#include <stdio.h>
35#include <assert.h>
36#include <string.h>
37#include <ctype.h>
38#include <stdlib.h>
39#if !defined(__APPLE__)
40#include <malloc.h>
41#endif // __APPLE__
42
43#include "tools.h" 34#include "tools.h"
44#include "compat.h" 35
45#include "universe.h" 36#include "universe.h"
46#include "keeper.h"
47#include "lanes.h"
48#include "uniquekey.h"
49 37
50// functions implemented in deep.c 38// functions implemented in deep.c
51extern bool copydeep(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, int i, LookupMode mode_, char const* upName_); 39extern bool copydeep(Universe* U, Dest L2, int L2_cache_i, Source L, int i, LookupMode mode_, char const* upName_);
52extern void push_registry_subtable( lua_State* L, UniqueKey key_); 40extern void push_registry_subtable( lua_State* L, UniqueKey key_);
53 41
54DEBUGSPEW_CODE( char const* debugspew_indent = "----+----!----+----!----+----!----+----!----+----!----+----!----+----!----+"); 42DEBUGSPEW_CODE( char const* debugspew_indent = "----+----!----+----!----+----!----+----!----+----!----+----!----+----!----+");
@@ -116,7 +104,7 @@ void luaG_dump( lua_State* L)
116 104
117 for( i = 1; i <= top; ++ i) 105 for( i = 1; i <= top; ++ i)
118 { 106 {
119 int type = lua_type( L, i); 107 LuaType type{ lua_type_as_enum(L, i) };
120 108
121 fprintf( stderr, "\t[%d]= (%s) ", i, lua_typename( L, type)); 109 fprintf( stderr, "\t[%d]= (%s) ", i, lua_typename( L, type));
122 110
@@ -156,7 +144,7 @@ void luaG_dump( lua_State* L)
156// ################################################################################################ 144// ################################################################################################
157 145
158// same as PUC-Lua l_alloc 146// same as PUC-Lua l_alloc
159extern "C" static void* libc_lua_Alloc([[maybe_unused]] void* ud, [[maybe_unused]] void* ptr_, [[maybe_unused]] size_t osize_, size_t nsize_) 147extern "C" [[nodiscard]] static void* libc_lua_Alloc([[maybe_unused]] void* ud, [[maybe_unused]] void* ptr_, [[maybe_unused]] size_t osize_, size_t nsize_)
160{ 148{
161 if (nsize_ == 0) 149 if (nsize_ == 0)
162 { 150 {
@@ -171,7 +159,7 @@ extern "C" static void* libc_lua_Alloc([[maybe_unused]] void* ud, [[maybe_unused
171 159
172// ################################################################################################# 160// #################################################################################################
173 161
174static int luaG_provide_protected_allocator(lua_State* L) 162[[nodiscard]] static int luaG_provide_protected_allocator(lua_State* L)
175{ 163{
176 Universe* const U{ universe_get(L) }; 164 Universe* const U{ universe_get(L) };
177 // push a new full userdata on the stack, giving access to the universe's protected allocator 165 // push a new full userdata on the stack, giving access to the universe's protected allocator
@@ -246,7 +234,7 @@ void initialize_allocator_function(Universe* U, lua_State* L)
246 234
247// ################################################################################################ 235// ################################################################################################
248 236
249static int dummy_writer( lua_State* L, void const* p, size_t sz, void* ud) 237[[nodiscard]] static int dummy_writer(lua_State* L, void const* p, size_t sz, void* ud)
250{ 238{
251 (void)L; (void)p; (void)sz; (void) ud; // unused 239 (void)L; (void)p; (void)sz; (void) ud; // unused
252 return 666; 240 return 666;
@@ -303,7 +291,7 @@ FuncSubType luaG_getfuncsubtype( lua_State *L, int _i)
303 291
304// ################################################################################################# 292// #################################################################################################
305 293
306static lua_CFunction luaG_tocfunction(lua_State* L, int _i, FuncSubType* _out) 294[[nodiscard]] static lua_CFunction luaG_tocfunction(lua_State* L, int _i, FuncSubType* _out)
307{ 295{
308 lua_CFunction p = lua_tocfunction( L, _i); 296 lua_CFunction p = lua_tocfunction( L, _i);
309 *_out = luaG_getfuncsubtype( L, _i); 297 *_out = luaG_getfuncsubtype( L, _i);
@@ -316,7 +304,7 @@ static constexpr UniqueKey LOOKUPCACHE_REGKEY{ 0x837a68dfc6fcb716ull };
316// ################################################################################################# 304// #################################################################################################
317 305
318// inspired from tconcat() in ltablib.c 306// inspired from tconcat() in ltablib.c
319static char const* luaG_pushFQN( lua_State* L, int t, int last, size_t* length) 307[[nodiscard]] static char const* luaG_pushFQN(lua_State* L, int t, int last, size_t* length)
320{ 308{
321 int i = 1; 309 int i = 1;
322 luaL_Buffer b; 310 luaL_Buffer b;
@@ -350,7 +338,7 @@ static char const* luaG_pushFQN( lua_State* L, int t, int last, size_t* length)
350 * if we already had an entry of type [o] = ..., replace the name if the new one is shorter 338 * if we already had an entry of type [o] = ..., replace the name if the new one is shorter
351 * pops the processed object from the stack 339 * pops the processed object from the stack
352 */ 340 */
353static void update_lookup_entry( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, int _ctx_base, int _depth) 341static void update_lookup_entry(DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, int _ctx_base, int _depth)
354{ 342{
355 // slot 1 in the stack contains the table that receives everything we found 343 // slot 1 in the stack contains the table that receives everything we found
356 int const dest = _ctx_base; 344 int const dest = _ctx_base;
@@ -359,9 +347,9 @@ static void update_lookup_entry( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State*
359 347
360 size_t prevNameLength, newNameLength; 348 size_t prevNameLength, newNameLength;
361 char const* prevName; 349 char const* prevName;
362 DEBUGSPEW_CODE( char const *newName); 350 DEBUGSPEW_CODE(char const *newName);
363 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "update_lookup_entry()\n" INDENT_END)); 351 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "update_lookup_entry()\n" INDENT_END));
364 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 352 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
365 353
366 STACK_CHECK_START_REL(L, 0); 354 STACK_CHECK_START_REL(L, 0);
367 // first, raise an error if the function is already known 355 // first, raise an error if the function is already known
@@ -374,7 +362,7 @@ static void update_lookup_entry( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State*
374 ++ _depth; 362 ++ _depth;
375 lua_rawseti( L, fqn, _depth); // ... {bfc} k o name? 363 lua_rawseti( L, fqn, _depth); // ... {bfc} k o name?
376 // generate name 364 // generate name
377 DEBUGSPEW_CODE( newName =) luaG_pushFQN( L, fqn, _depth, &newNameLength); // ... {bfc} k o name? "f.q.n" 365 DEBUGSPEW_OR_NOT(newName, std::ignore) = luaG_pushFQN(L, fqn, _depth, &newNameLength);// ... {bfc} k o name? "f.q.n"
378 // Lua 5.2 introduced a hash randomizer seed which causes table iteration to yield a different key order 366 // Lua 5.2 introduced a hash randomizer seed which causes table iteration to yield a different key order
379 // on different VMs even when the tables are populated the exact same way. 367 // on different VMs even when the tables are populated the exact same way.
380 // When Lua is built with compatibility options (such as LUA_COMPAT_ALL), 368 // When Lua is built with compatibility options (such as LUA_COMPAT_ALL),
@@ -420,8 +408,8 @@ static void update_lookup_entry( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State*
420 lua_rawseti( L, fqn, _depth); // ... {bfc} k 408 lua_rawseti( L, fqn, _depth); // ... {bfc} k
421 } 409 }
422 -- _depth; 410 -- _depth;
423 STACK_CHECK( L, -1); 411 STACK_CHECK(L, -1);
424 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 412 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
425} 413}
426 414
427// ################################################################################################# 415// #################################################################################################
@@ -435,8 +423,8 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U)
435 int const cache = _ctx_base + 2; 423 int const cache = _ctx_base + 2;
436 // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search) 424 // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search)
437 int const breadth_first_cache = lua_gettop( L) + 1; 425 int const breadth_first_cache = lua_gettop( L) + 1;
438 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "populate_func_lookup_table_recur()\n" INDENT_END)); 426 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "populate_func_lookup_table_recur()\n" INDENT_END));
439 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 427 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
440 428
441 STACK_GROW( L, 6); 429 STACK_GROW( L, 6);
442 // slot _i contains a table where we search for functions (or a full userdata with a metatable) 430 // slot _i contains a table where we search for functions (or a full userdata with a metatable)
@@ -457,8 +445,8 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U)
457 STACK_CHECK( L, 0); 445 STACK_CHECK( L, 0);
458 if( visit_count > 0) 446 if( visit_count > 0)
459 { 447 {
460 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "already visited\n" INDENT_END)); 448 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "already visited\n" INDENT_END));
461 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 449 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
462 return; 450 return;
463 } 451 }
464 452
@@ -513,7 +501,7 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U)
513 { 501 {
514 DEBUGSPEW_CODE( char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"); 502 DEBUGSPEW_CODE( char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string");
515 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "table '%s'\n" INDENT_END, key)); 503 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "table '%s'\n" INDENT_END, key));
516 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 504 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
517 // un-visit this table in case we do need to process it 505 // un-visit this table in case we do need to process it
518 lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} 506 lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {}
519 lua_rawget( L, cache); // ... {_i} {bfc} k {} n 507 lua_rawget( L, cache); // ... {_i} {bfc} k {} n
@@ -536,7 +524,7 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U)
536 populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, lua_gettop( L), _depth); 524 populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, lua_gettop( L), _depth);
537 lua_pop( L, 1); // ... {_i} {bfc} k 525 lua_pop( L, 1); // ... {_i} {bfc} k
538 STACK_CHECK( L, 2); 526 STACK_CHECK( L, 2);
539 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 527 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
540 } 528 }
541 // remove table name from fqn stack 529 // remove table name from fqn stack
542 lua_pushnil( L); // ... {_i} {bfc} nil 530 lua_pushnil( L); // ... {_i} {bfc} nil
@@ -546,7 +534,7 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U)
546 lua_pop( L, 1); // ... {_i} 534 lua_pop( L, 1); // ... {_i}
547 STACK_CHECK( L, 0); 535 STACK_CHECK( L, 0);
548 // we are done // ... {_i} {bfc} 536 // we are done // ... {_i} {bfc}
549 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 537 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
550} 538}
551 539
552// ################################################################################################# 540// #################################################################################################
@@ -554,14 +542,14 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U)
554/* 542/*
555 * create a "fully.qualified.name" <-> function equivalence database 543 * create a "fully.qualified.name" <-> function equivalence database
556 */ 544 */
557void populate_func_lookup_table( lua_State* L, int _i, char const* name_) 545void populate_func_lookup_table(lua_State* L, int i_, char const* name_)
558{ 546{
559 int const ctx_base = lua_gettop( L) + 1; 547 int const ctx_base = lua_gettop(L) + 1;
560 int const in_base = lua_absindex( L, _i); 548 int const in_base = lua_absindex(L, i_);
561 int start_depth = 0; 549 int start_depth = 0;
562 DEBUGSPEW_CODE( Universe* U = universe_get( L)); 550 DEBUGSPEW_CODE( Universe* U = universe_get( L));
563 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END, L, name_ ? name_ : "nullptr")); 551 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END, L, name_ ? name_ : "nullptr"));
564 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 552 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
565 STACK_GROW( L, 3); 553 STACK_GROW( L, 3);
566 STACK_CHECK_START_REL(L, 0); 554 STACK_CHECK_START_REL(L, 0);
567 LOOKUP_REGKEY.pushValue(L); // {} 555 LOOKUP_REGKEY.pushValue(L); // {}
@@ -612,7 +600,7 @@ void populate_func_lookup_table( lua_State* L, int _i, char const* name_)
612 (void) luaL_error( L, "unsupported module type %s", lua_typename( L, lua_type( L, in_base))); 600 (void) luaL_error( L, "unsupported module type %s", lua_typename( L, lua_type( L, in_base)));
613 } 601 }
614 STACK_CHECK( L, 0); 602 STACK_CHECK( L, 0);
615 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 603 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
616} 604}
617 605
618// ################################################################################################# 606// #################################################################################################
@@ -625,7 +613,7 @@ static constexpr UniqueKey REG_MTID{ 0x2e68f9b4751584dcull };
625/* 613/*
626* Get a unique ID for metatable at [i]. 614* Get a unique ID for metatable at [i].
627*/ 615*/
628static lua_Integer get_mt_id( Universe* U, lua_State* L, int i) 616[[nodiscard]] static lua_Integer get_mt_id(Universe* U, lua_State* L, int i)
629{ 617{
630 lua_Integer id; 618 lua_Integer id;
631 619
@@ -666,25 +654,25 @@ static lua_Integer get_mt_id( Universe* U, lua_State* L, int i)
666// ################################################################################################# 654// #################################################################################################
667 655
668// function sentinel used to transfer native functions from/to keeper states 656// function sentinel used to transfer native functions from/to keeper states
669static int func_lookup_sentinel( lua_State* L) 657[[nodiscard]] static int func_lookup_sentinel(lua_State* L)
670{ 658{
671 return luaL_error( L, "function lookup sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); 659 return luaL_error(L, "function lookup sentinel for %s, should never be called", lua_tostring(L, lua_upvalueindex(1)));
672} 660}
673 661
674// ################################################################################################# 662// #################################################################################################
675 663
676// function sentinel used to transfer native table from/to keeper states 664// function sentinel used to transfer native table from/to keeper states
677static int table_lookup_sentinel( lua_State* L) 665[[nodiscard]] static int table_lookup_sentinel(lua_State* L)
678{ 666{
679 return luaL_error( L, "table lookup sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); 667 return luaL_error(L, "table lookup sentinel for %s, should never be called", lua_tostring(L, lua_upvalueindex(1)));
680} 668}
681 669
682// ################################################################################################# 670// #################################################################################################
683 671
684// function sentinel used to transfer cloned full userdata from/to keeper states 672// function sentinel used to transfer cloned full userdata from/to keeper states
685static int userdata_clone_sentinel( lua_State* L) 673[[nodiscard]] static int userdata_clone_sentinel(lua_State* L)
686{ 674{
687 return luaL_error( L, "userdata clone sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); 675 return luaL_error(L, "userdata clone sentinel for %s, should never be called", lua_tostring(L, lua_upvalueindex(1)));
688} 676}
689 677
690// ################################################################################################# 678// #################################################################################################
@@ -692,7 +680,7 @@ static int userdata_clone_sentinel( lua_State* L)
692/* 680/*
693 * retrieve the name of a function/table in the lookup database 681 * retrieve the name of a function/table in the lookup database
694 */ 682 */
695static char const* find_lookup_name(lua_State* L, int i, LookupMode mode_, char const* upName_, size_t* len_) 683[[nodiscard]] static char const* find_lookup_name(lua_State* L, int i, LookupMode mode_, char const* upName_, size_t* len_)
696{ 684{
697 DEBUGSPEW_CODE( Universe* const U = universe_get( L)); 685 DEBUGSPEW_CODE( Universe* const U = universe_get( L));
698 char const* fqn; 686 char const* fqn;
@@ -765,7 +753,7 @@ static char const* find_lookup_name(lua_State* L, int i, LookupMode mode_, char
765/* 753/*
766 * Push a looked-up table, or nothing if we found nothing 754 * Push a looked-up table, or nothing if we found nothing
767 */ 755 */
768static bool lookup_table(lua_State* L2, lua_State* L, int i, LookupMode mode_, char const* upName_) 756[[nodiscard]] static bool lookup_table(Dest L2, Source L, int i, LookupMode mode_, char const* upName_)
769{ 757{
770 // get the name of the table we want to send 758 // get the name of the table we want to send
771 size_t len; 759 size_t len;
@@ -775,7 +763,7 @@ static bool lookup_table(lua_State* L2, lua_State* L, int i, LookupMode mode_, c
775 return false; 763 return false;
776 } 764 }
777 // push the equivalent table in the destination's stack, retrieved from the lookup table 765 // push the equivalent table in the destination's stack, retrieved from the lookup table
778 STACK_CHECK_START_REL(L2, 0); // L // L2 766 STACK_CHECK_START_REL(L2, 0); // L // L2
779 STACK_GROW( L2, 3); // up to 3 slots are necessary on error 767 STACK_GROW( L2, 3); // up to 3 slots are necessary on error
780 switch( mode_) 768 switch( mode_)
781 { 769 {
@@ -785,34 +773,34 @@ static bool lookup_table(lua_State* L2, lua_State* L, int i, LookupMode mode_, c
785 773
786 case LookupMode::ToKeeper: 774 case LookupMode::ToKeeper:
787 // push a sentinel closure that holds the lookup name as upvalue 775 // push a sentinel closure that holds the lookup name as upvalue
788 lua_pushlstring( L2, fqn, len); // "f.q.n" 776 lua_pushlstring(L2, fqn, len); // "f.q.n"
789 lua_pushcclosure( L2, table_lookup_sentinel, 1); // f 777 lua_pushcclosure(L2, table_lookup_sentinel, 1); // f
790 break; 778 break;
791 779
792 case LookupMode::LaneBody: 780 case LookupMode::LaneBody:
793 case LookupMode::FromKeeper: 781 case LookupMode::FromKeeper:
794 LOOKUP_REGKEY.pushValue(L2); // {} 782 LOOKUP_REGKEY.pushValue(L2); // {}
795 STACK_CHECK( L2, 1); 783 STACK_CHECK(L2, 1);
796 ASSERT_L( lua_istable( L2, -1)); 784 ASSERT_L(lua_istable(L2, -1));
797 lua_pushlstring( L2, fqn, len); // {} "f.q.n" 785 lua_pushlstring(L2, fqn, len); // {} "f.q.n"
798 lua_rawget( L2, -2); // {} t 786 lua_rawget(L2, -2); // {} t
799 // we accept destination lookup failures in the case of transfering the Lanes body function (this will result in the source table being cloned instead) 787 // we accept destination lookup failures in the case of transfering the Lanes body function (this will result in the source table being cloned instead)
800 // but not when we extract something out of a keeper, as there is nothing to clone! 788 // but not when we extract something out of a keeper, as there is nothing to clone!
801 if (lua_isnil(L2, -1) && mode_ == LookupMode::LaneBody) 789 if (lua_isnil(L2, -1) && mode_ == LookupMode::LaneBody)
802 { 790 {
803 lua_pop( L2, 2); // 791 lua_pop(L2, 2); //
804 STACK_CHECK( L2, 0); 792 STACK_CHECK(L2, 0);
805 return false; 793 return false;
806 } 794 }
807 else if( !lua_istable( L2, -1)) 795 else if( !lua_istable(L2, -1))
808 { 796 {
809 char const* from, *to; 797 char const* from, *to;
810 lua_getglobal( L, "decoda_name"); // ... t ... decoda_name 798 lua_getglobal(L, "decoda_name"); // ... t ... decoda_name
811 from = lua_tostring( L, -1); 799 from = lua_tostring(L, -1);
812 lua_pop( L, 1); // ... t ... 800 lua_pop(L, 1); // ... t ...
813 lua_getglobal( L2, "decoda_name"); // {} t decoda_name 801 lua_getglobal(L2, "decoda_name"); // {} t decoda_name
814 to = lua_tostring( L2, -1); 802 to = lua_tostring( L2, -1);
815 lua_pop( L2, 1); // {} t 803 lua_pop(L2, 1); // {} t
816 // when mode_ == LookupMode::FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error 804 // when mode_ == LookupMode::FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error
817 (void) luaL_error( 805 (void) luaL_error(
818 (mode_ == LookupMode::FromKeeper) ? L2 : L 806 (mode_ == LookupMode::FromKeeper) ? L2 : L
@@ -823,7 +811,7 @@ static bool lookup_table(lua_State* L2, lua_State* L, int i, LookupMode mode_, c
823 ); 811 );
824 return false; 812 return false;
825 } 813 }
826 lua_remove( L2, -2); // t 814 lua_remove(L2, -2); // t
827 break; 815 break;
828 } 816 }
829 STACK_CHECK( L2, 1); 817 STACK_CHECK( L2, 1);
@@ -842,13 +830,12 @@ static bool lookup_table(lua_State* L2, lua_State* L, int i, LookupMode mode_, c
842 * Returns true if the table was cached (no need to fill it!); false if 830 * Returns true if the table was cached (no need to fill it!); false if
843 * it's a virgin. 831 * it's a virgin.
844 */ 832 */
845static bool push_cached_table(lua_State* L2, int L2_cache_i, lua_State* L, int i) 833[[nodiscard]] static bool push_cached_table(Dest L2, int L2_cache_i, Source L, int i)
846{ 834{
847 bool not_found_in_cache; // L2
848 void const* p{ lua_topointer(L, i) }; 835 void const* p{ lua_topointer(L, i) };
849 836
850 ASSERT_L( L2_cache_i != 0); 837 ASSERT_L( L2_cache_i != 0);
851 STACK_GROW( L2, 3); 838 STACK_GROW( L2, 3); // L2
852 STACK_CHECK_START_REL(L2, 0); 839 STACK_CHECK_START_REL(L2, 0);
853 840
854 // We don't need to use the from state ('L') in ID since the life span 841 // We don't need to use the from state ('L') in ID since the life span
@@ -859,7 +846,7 @@ static bool push_cached_table(lua_State* L2, int L2_cache_i, lua_State* L, int i
859 //fprintf( stderr, "<< ID: %s >>\n", lua_tostring( L2, -1)); 846 //fprintf( stderr, "<< ID: %s >>\n", lua_tostring( L2, -1));
860 847
861 lua_rawget( L2, L2_cache_i); // ... {cached|nil} 848 lua_rawget( L2, L2_cache_i); // ... {cached|nil}
862 not_found_in_cache = lua_isnil( L2, -1); 849 bool const not_found_in_cache{ lua_isnil(L2, -1) };
863 if( not_found_in_cache) 850 if( not_found_in_cache)
864 { 851 {
865 lua_pop( L2, 1); // ... 852 lua_pop( L2, 1); // ...
@@ -878,83 +865,83 @@ static bool push_cached_table(lua_State* L2, int L2_cache_i, lua_State* L, int i
878/* 865/*
879 * Return some name helping to identify an object 866 * Return some name helping to identify an object
880 */ 867 */
881static int discover_object_name_recur( lua_State* L, int shortest_, int depth_) 868[[nodiscard]] static int discover_object_name_recur(lua_State* L, int shortest_, int depth_)
882{ 869{
883 int const what = 1; // o "r" {c} {fqn} ... {?} 870 int const what = 1; // o "r" {c} {fqn} ... {?}
884 int const result = 2; 871 int const result = 2;
885 int const cache = 3; 872 int const cache = 3;
886 int const fqn = 4; 873 int const fqn = 4;
887 // no need to scan this table if the name we will discover is longer than one we already know 874 // no need to scan this table if the name we will discover is longer than one we already know
888 if( shortest_ <= depth_ + 1) 875 if (shortest_ <= depth_ + 1)
889 { 876 {
890 return shortest_; 877 return shortest_;
891 } 878 }
892 STACK_GROW( L, 3); 879 STACK_GROW(L, 3);
893 STACK_CHECK_START_REL(L, 0); 880 STACK_CHECK_START_REL(L, 0);
894 // stack top contains the table to search in 881 // stack top contains the table to search in
895 lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} 882 lua_pushvalue(L, -1); // o "r" {c} {fqn} ... {?} {?}
896 lua_rawget( L, cache); // o "r" {c} {fqn} ... {?} nil/1 883 lua_rawget(L, cache); // o "r" {c} {fqn} ... {?} nil/1
897 // if table is already visited, we are done 884 // if table is already visited, we are done
898 if( !lua_isnil( L, -1)) 885 if( !lua_isnil(L, -1))
899 { 886 {
900 lua_pop( L, 1); // o "r" {c} {fqn} ... {?} 887 lua_pop(L, 1); // o "r" {c} {fqn} ... {?}
901 return shortest_; 888 return shortest_;
902 } 889 }
903 // examined table is not in the cache, add it now 890 // examined table is not in the cache, add it now
904 lua_pop( L, 1); // o "r" {c} {fqn} ... {?} 891 lua_pop(L, 1); // o "r" {c} {fqn} ... {?}
905 lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} 892 lua_pushvalue(L, -1); // o "r" {c} {fqn} ... {?} {?}
906 lua_pushinteger( L, 1); // o "r" {c} {fqn} ... {?} {?} 1 893 lua_pushinteger(L, 1); // o "r" {c} {fqn} ... {?} {?} 1
907 lua_rawset( L, cache); // o "r" {c} {fqn} ... {?} 894 lua_rawset(L, cache); // o "r" {c} {fqn} ... {?}
908 // scan table contents 895 // scan table contents
909 lua_pushnil( L); // o "r" {c} {fqn} ... {?} nil 896 lua_pushnil(L); // o "r" {c} {fqn} ... {?} nil
910 while( lua_next( L, -2)) // o "r" {c} {fqn} ... {?} k v 897 while (lua_next(L, -2)) // o "r" {c} {fqn} ... {?} k v
911 { 898 {
912 //char const *const strKey = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : nullptr; // only for debugging 899 //char const *const strKey = (lua_type(L, -2) == LUA_TSTRING) ? lua_tostring(L, -2) : nullptr; // only for debugging
913 //lua_Number const numKey = (lua_type( L, -2) == LUA_TNUMBER) ? lua_tonumber( L, -2) : -6666; // only for debugging 900 //lua_Number const numKey = (lua_type(L, -2) == LUA_TNUMBER) ? lua_tonumber(L, -2) : -6666; // only for debugging
914 STACK_CHECK( L, 2); 901 STACK_CHECK(L, 2);
915 // append key name to fqn stack 902 // append key name to fqn stack
916 ++ depth_; 903 ++ depth_;
917 lua_pushvalue( L, -2); // o "r" {c} {fqn} ... {?} k v k 904 lua_pushvalue(L, -2); // o "r" {c} {fqn} ... {?} k v k
918 lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v 905 lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v
919 if( lua_rawequal( L, -1, what)) // is it what we are looking for? 906 if (lua_rawequal(L, -1, what)) // is it what we are looking for?
920 { 907 {
921 STACK_CHECK( L, 2); 908 STACK_CHECK(L, 2);
922 // update shortest name 909 // update shortest name
923 if( depth_ < shortest_) 910 if( depth_ < shortest_)
924 { 911 {
925 shortest_ = depth_; 912 shortest_ = depth_;
926 luaG_pushFQN( L, fqn, depth_, nullptr); // o "r" {c} {fqn} ... {?} k v "fqn" 913 std::ignore = luaG_pushFQN(L, fqn, depth_, nullptr); // o "r" {c} {fqn} ... {?} k v "fqn"
927 lua_replace( L, result); // o "r" {c} {fqn} ... {?} k v 914 lua_replace(L, result); // o "r" {c} {fqn} ... {?} k v
928 } 915 }
929 // no need to search further at this level 916 // no need to search further at this level
930 lua_pop( L, 2); // o "r" {c} {fqn} ... {?} 917 lua_pop(L, 2); // o "r" {c} {fqn} ... {?}
931 STACK_CHECK( L, 0); 918 STACK_CHECK(L, 0);
932 break; 919 break;
933 } 920 }
934 switch( lua_type( L, -1)) // o "r" {c} {fqn} ... {?} k v 921 switch (lua_type(L, -1)) // o "r" {c} {fqn} ... {?} k v
935 { 922 {
936 default: // nil, boolean, light userdata, number and string aren't identifiable 923 default: // nil, boolean, light userdata, number and string aren't identifiable
937 break; 924 break;
938 925
939 case LUA_TTABLE: // o "r" {c} {fqn} ... {?} k {} 926 case LUA_TTABLE: // o "r" {c} {fqn} ... {?} k {}
940 STACK_CHECK( L, 2); 927 STACK_CHECK(L, 2);
941 shortest_ = discover_object_name_recur( L, shortest_, depth_); 928 shortest_ = discover_object_name_recur(L, shortest_, depth_);
942 // search in the table's metatable too 929 // search in the table's metatable too
943 if( lua_getmetatable( L, -1)) // o "r" {c} {fqn} ... {?} k {} {mt} 930 if (lua_getmetatable(L, -1)) // o "r" {c} {fqn} ... {?} k {} {mt}
944 { 931 {
945 if( lua_istable( L, -1)) 932 if( lua_istable(L, -1))
946 { 933 {
947 ++ depth_; 934 ++ depth_;
948 lua_pushliteral( L, "__metatable"); // o "r" {c} {fqn} ... {?} k {} {mt} "__metatable" 935 lua_pushliteral(L, "__metatable"); // o "r" {c} {fqn} ... {?} k {} {mt} "__metatable"
949 lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} 936 lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt}
950 shortest_ = discover_object_name_recur( L, shortest_, depth_); 937 shortest_ = discover_object_name_recur(L, shortest_, depth_);
951 lua_pushnil( L); // o "r" {c} {fqn} ... {?} k {} {mt} nil 938 lua_pushnil(L); // o "r" {c} {fqn} ... {?} k {} {mt} nil
952 lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} 939 lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt}
953 -- depth_; 940 -- depth_;
954 } 941 }
955 lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k {} 942 lua_pop(L, 1); // o "r" {c} {fqn} ... {?} k {}
956 } 943 }
957 STACK_CHECK( L, 2); 944 STACK_CHECK(L, 2);
958 break; 945 break;
959 946
960 case LUA_TTHREAD: // o "r" {c} {fqn} ... {?} k T 947 case LUA_TTHREAD: // o "r" {c} {fqn} ... {?} k T
@@ -962,61 +949,61 @@ static int discover_object_name_recur( lua_State* L, int shortest_, int depth_)
962 break; 949 break;
963 950
964 case LUA_TUSERDATA: // o "r" {c} {fqn} ... {?} k U 951 case LUA_TUSERDATA: // o "r" {c} {fqn} ... {?} k U
965 STACK_CHECK( L, 2); 952 STACK_CHECK(L, 2);
966 // search in the object's metatable (some modules are built that way) 953 // search in the object's metatable (some modules are built that way)
967 if( lua_getmetatable( L, -1)) // o "r" {c} {fqn} ... {?} k U {mt} 954 if (lua_getmetatable(L, -1)) // o "r" {c} {fqn} ... {?} k U {mt}
968 { 955 {
969 if( lua_istable( L, -1)) 956 if (lua_istable(L, -1))
970 { 957 {
971 ++ depth_; 958 ++ depth_;
972 lua_pushliteral( L, "__metatable"); // o "r" {c} {fqn} ... {?} k U {mt} "__metatable" 959 lua_pushliteral(L, "__metatable"); // o "r" {c} {fqn} ... {?} k U {mt} "__metatable"
973 lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} 960 lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt}
974 shortest_ = discover_object_name_recur( L, shortest_, depth_); 961 shortest_ = discover_object_name_recur(L, shortest_, depth_);
975 lua_pushnil( L); // o "r" {c} {fqn} ... {?} k U {mt} nil 962 lua_pushnil(L); // o "r" {c} {fqn} ... {?} k U {mt} nil
976 lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} 963 lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt}
977 -- depth_; 964 -- depth_;
978 } 965 }
979 lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U 966 lua_pop(L, 1); // o "r" {c} {fqn} ... {?} k U
980 } 967 }
981 STACK_CHECK( L, 2); 968 STACK_CHECK(L, 2);
982 // search in the object's uservalues 969 // search in the object's uservalues
983 { 970 {
984 int uvi = 1; 971 int uvi = 1;
985 while( lua_getiuservalue( L, -1, uvi) != LUA_TNONE) // o "r" {c} {fqn} ... {?} k U {u} 972 while (lua_getiuservalue(L, -1, uvi) != LUA_TNONE) // o "r" {c} {fqn} ... {?} k U {u}
986 { 973 {
987 if( lua_istable( L, -1)) // if it is a table, look inside 974 if( lua_istable(L, -1)) // if it is a table, look inside
988 { 975 {
989 ++ depth_; 976 ++ depth_;
990 lua_pushliteral( L, "uservalue"); // o "r" {c} {fqn} ... {?} k v {u} "uservalue" 977 lua_pushliteral(L, "uservalue"); // o "r" {c} {fqn} ... {?} k v {u} "uservalue"
991 lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} 978 lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u}
992 shortest_ = discover_object_name_recur( L, shortest_, depth_); 979 shortest_ = discover_object_name_recur(L, shortest_, depth_);
993 lua_pushnil( L); // o "r" {c} {fqn} ... {?} k v {u} nil 980 lua_pushnil(L); // o "r" {c} {fqn} ... {?} k v {u} nil
994 lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} 981 lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u}
995 -- depth_; 982 -- depth_;
996 } 983 }
997 lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U 984 lua_pop(L, 1); // o "r" {c} {fqn} ... {?} k U
998 ++ uvi; 985 ++ uvi;
999 } 986 }
1000 // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now 987 // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now
1001 lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U 988 lua_pop(L, 1); // o "r" {c} {fqn} ... {?} k U
1002 } 989 }
1003 STACK_CHECK( L, 2); 990 STACK_CHECK(L, 2);
1004 break; 991 break;
1005 } 992 }
1006 // make ready for next iteration 993 // make ready for next iteration
1007 lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k 994 lua_pop(L, 1); // o "r" {c} {fqn} ... {?} k
1008 // remove name from fqn stack 995 // remove name from fqn stack
1009 lua_pushnil( L); // o "r" {c} {fqn} ... {?} k nil 996 lua_pushnil(L); // o "r" {c} {fqn} ... {?} k nil
1010 lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k 997 lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k
1011 STACK_CHECK( L, 1); 998 STACK_CHECK(L, 1);
1012 -- depth_; 999 -- depth_;
1013 } // o "r" {c} {fqn} ... {?} 1000 } // o "r" {c} {fqn} ... {?}
1014 STACK_CHECK( L, 0); 1001 STACK_CHECK(L, 0);
1015 // remove the visited table from the cache, in case a shorter path to the searched object exists 1002 // remove the visited table from the cache, in case a shorter path to the searched object exists
1016 lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} 1003 lua_pushvalue(L, -1); // o "r" {c} {fqn} ... {?} {?}
1017 lua_pushnil( L); // o "r" {c} {fqn} ... {?} {?} nil 1004 lua_pushnil(L); // o "r" {c} {fqn} ... {?} {?} nil
1018 lua_rawset( L, cache); // o "r" {c} {fqn} ... {?} 1005 lua_rawset(L, cache); // o "r" {c} {fqn} ... {?}
1019 STACK_CHECK( L, 0); 1006 STACK_CHECK(L, 0);
1020 return shortest_; 1007 return shortest_;
1021} 1008}
1022 1009
@@ -1181,7 +1168,7 @@ static char const* vt_names[] =
1181// we have to do it that way because we can't unbalance the stack between buffer operations 1168// we have to do it that way because we can't unbalance the stack between buffer operations
1182// namely, this means we can't push a function on top of the stack *after* we initialize the buffer! 1169// namely, this means we can't push a function on top of the stack *after* we initialize the buffer!
1183// luckily, this also works with earlier Lua versions 1170// luckily, this also works with earlier Lua versions
1184static int buf_writer( lua_State* L, void const* b, size_t size, void* ud) 1171[[nodiscard]] static int buf_writer(lua_State* L, void const* b, size_t size, void* ud)
1185{ 1172{
1186 luaL_Buffer* B = (luaL_Buffer*) ud; 1173 luaL_Buffer* B = (luaL_Buffer*) ud;
1187 if( !B->L) 1174 if( !B->L)
@@ -1194,7 +1181,7 @@ static int buf_writer( lua_State* L, void const* b, size_t size, void* ud)
1194 1181
1195// ################################################################################################# 1182// #################################################################################################
1196 1183
1197static void copy_func(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, int i, LookupMode mode_, char const* upName_) 1184static void copy_func(Universe* U, Dest L2, int L2_cache_i, Source L, int i, LookupMode mode_, char const* upName_)
1198{ 1185{
1199 int n, needToPush; 1186 int n, needToPush;
1200 luaL_Buffer B; 1187 luaL_Buffer B;
@@ -1348,10 +1335,10 @@ static void copy_func(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L,
1348 * 1335 *
1349 * Always pushes a function to 'L2'. 1336 * Always pushes a function to 'L2'.
1350 */ 1337 */
1351static void copy_cached_func(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, int i, LookupMode mode_, char const* upName_) 1338static void copy_cached_func(Universe* U, Dest L2, int L2_cache_i, Source L, int i, LookupMode mode_, char const* upName_)
1352{ 1339{
1353 FuncSubType funcSubType; 1340 FuncSubType funcSubType;
1354 /*lua_CFunction cfunc =*/ luaG_tocfunction( L, i, &funcSubType); // nullptr for LuaJIT-fast && bytecode functions 1341 std::ignore = luaG_tocfunction(L, i, &funcSubType); // nullptr for LuaJIT-fast && bytecode functions
1355 if( funcSubType == FST_Bytecode) 1342 if( funcSubType == FST_Bytecode)
1356 { 1343 {
1357 void* const aspointer = (void*)lua_topointer( L, i); 1344 void* const aspointer = (void*)lua_topointer( L, i);
@@ -1403,64 +1390,63 @@ static void copy_cached_func(Universe* U, lua_State* L2, int L2_cache_i, lua_Sta
1403 1390
1404// ################################################################################################# 1391// #################################################################################################
1405 1392
1406static bool push_cached_metatable(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, int i, LookupMode mode_, char const* upName_) 1393[[nodiscard]] static bool push_cached_metatable(Universe* U, Dest L2, int L2_cache_i, Source L, int i, LookupMode mode_, char const* upName_)
1407{ 1394{
1408 STACK_CHECK_START_REL(L, 0); 1395 STACK_CHECK_START_REL(L, 0);
1409 if( lua_getmetatable( L, i)) // ... mt 1396 if (!lua_getmetatable(L, i)) // ... mt
1410 { 1397 {
1411 lua_Integer const mt_id = get_mt_id( U, L, -1); // Unique id for the metatable 1398 STACK_CHECK( L, 0);
1399 return false;
1400 }
1401 STACK_CHECK(L, 1);
1412 1402
1413 STACK_CHECK_START_REL(L2, 0); 1403 lua_Integer const mt_id{ get_mt_id(U, L, -1) }; // Unique id for the metatable
1414 STACK_GROW( L2, 4); 1404
1415 // do we already know this metatable? 1405 STACK_CHECK_START_REL(L2, 0);
1416 push_registry_subtable( L2, REG_MTID); // _R[REG_MTID] 1406 STACK_GROW(L2, 4);
1417 lua_pushinteger( L2, mt_id); // _R[REG_MTID] id 1407 // do we already know this metatable?
1418 lua_rawget( L2, -2); // _R[REG_MTID] mt? 1408 push_registry_subtable(L2, REG_MTID); // _R[REG_MTID]
1419 1409 lua_pushinteger(L2, mt_id); // _R[REG_MTID] id
1420 STACK_CHECK( L2, 2); 1410 lua_rawget(L2, -2); // _R[REG_MTID] mt|nil
1421 1411 STACK_CHECK(L2, 2);
1422 if( lua_isnil( L2, -1)) 1412
1423 { // L2 did not know the metatable 1413 if (lua_isnil(L2, -1))
1424 lua_pop( L2, 1); // _R[REG_MTID] 1414 { // L2 did not know the metatable
1425 if (inter_copy_one(U, L2, L2_cache_i, L, lua_gettop( L), VT::METATABLE, mode_, upName_)) // _R[REG_MTID] mt 1415 lua_pop(L2, 1); // _R[REG_MTID]
1426 { 1416 if (!inter_copy_one(U, L2, L2_cache_i, L, lua_gettop(L), VT::METATABLE, mode_, upName_)) // _R[REG_MTID] mt?
1427 STACK_CHECK( L2, 2); 1417 {
1428 // mt_id -> metatable 1418 luaL_error(L, "Error copying a metatable"); // doesn't return
1429 lua_pushinteger( L2, mt_id); // _R[REG_MTID] mt id
1430 lua_pushvalue( L2, -2); // _R[REG_MTID] mt id mt
1431 lua_rawset( L2, -4); // _R[REG_MTID] mt
1432
1433 // metatable -> mt_id
1434 lua_pushvalue( L2, -1); // _R[REG_MTID] mt mt
1435 lua_pushinteger( L2, mt_id); // _R[REG_MTID] mt mt id
1436 lua_rawset( L2, -4); // _R[REG_MTID] mt
1437 }
1438 else
1439 {
1440 (void) luaL_error( L, "Error copying a metatable");
1441 }
1442 STACK_CHECK( L2, 2);
1443 } 1419 }
1444 lua_remove( L2, -2); // mt
1445 1420
1446 lua_pop( L, 1); // ... 1421 STACK_CHECK(L2, 2); // _R[REG_MTID] mt
1447 STACK_CHECK( L2, 1); 1422 // mt_id -> metatable
1448 STACK_CHECK( L, 0); 1423 lua_pushinteger(L2, mt_id); // _R[REG_MTID] mt id
1449 return true; 1424 lua_pushvalue(L2, -2); // _R[REG_MTID] mt id mt
1425 lua_rawset(L2, -4); // _R[REG_MTID] mt
1426
1427 // metatable -> mt_id
1428 lua_pushvalue(L2, -1); // _R[REG_MTID] mt mt
1429 lua_pushinteger(L2, mt_id); // _R[REG_MTID] mt mt id
1430 lua_rawset(L2, -4); // _R[REG_MTID] mt
1431 STACK_CHECK(L2, 2);
1450 } 1432 }
1451 STACK_CHECK( L, 0); 1433 lua_remove(L2, -2); // mt
1452 return false; 1434
1435 lua_pop(L, 1); // ...
1436 STACK_CHECK(L2, 1);
1437 STACK_CHECK(L, 0);
1438 return true;
1453} 1439}
1454 1440
1455// ################################################################################################# 1441// #################################################################################################
1456 1442
1457static void inter_copy_keyvaluepair(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, VT vt_, LookupMode mode_, char const* upName_) 1443[[nodiscard]] static void inter_copy_keyvaluepair(Universe* U, Dest L2, int L2_cache_i, Source L, VT vt_, LookupMode mode_, char const* upName_)
1458{ 1444{
1459 int val_i = lua_gettop(L); 1445 int val_i = lua_gettop(L);
1460 int key_i = val_i - 1; 1446 int key_i = val_i - 1;
1461 1447
1462 // Only basic key types are copied over; others ignored 1448 // Only basic key types are copied over; others ignored
1463 if (inter_copy_one(U, L2, 0 /*key*/, L, key_i, VT::KEY, mode_, upName_)) 1449 if (inter_copy_one(U, L2, L2_cache_i, L, key_i, VT::KEY, mode_, upName_))
1464 { 1450 {
1465 char* valPath = (char*) upName_; 1451 char* valPath = (char*) upName_;
1466 if( U->verboseErrors) 1452 if( U->verboseErrors)
@@ -1526,7 +1512,7 @@ static void inter_copy_keyvaluepair(Universe* U, lua_State* L2, int L2_cache_i,
1526*/ 1512*/
1527static constexpr UniqueKey CLONABLES_CACHE_KEY{ 0xD04EE018B3DEE8F5ull }; 1513static constexpr UniqueKey CLONABLES_CACHE_KEY{ 0xD04EE018B3DEE8F5ull };
1528 1514
1529static bool copyclone(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, int source_i_, LookupMode mode_, char const* upName_) 1515[[nodiscard]] static bool copyclone(Universe* U, Dest L2, int L2_cache_i, Source L, int source_i_, LookupMode mode_, char const* upName_)
1530{ 1516{
1531 void* const source = lua_touserdata( L, source_i_); 1517 void* const source = lua_touserdata( L, source_i_);
1532 source_i_ = lua_absindex( L, source_i_); 1518 source_i_ = lua_absindex( L, source_i_);
@@ -1612,7 +1598,10 @@ static bool copyclone(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L,
1612 // assign uservalues 1598 // assign uservalues
1613 while( uvi > 0) 1599 while( uvi > 0)
1614 { 1600 {
1615 inter_copy_one(U, L2, L2_cache_i, L, lua_absindex( L, -1), VT::NORMAL, mode_, upName_); // ... u uv 1601 if (!inter_copy_one(U, L2, L2_cache_i, L, lua_absindex(L, -1), VT::NORMAL, mode_, upName_)) // ... u uv
1602 {
1603 luaL_error(L, "Cannot copy upvalue type '%s'", luaL_typename(L, -1)); // doesn't return
1604 }
1616 lua_pop( L, 1); // ... mt __lanesclone [uv]* 1605 lua_pop( L, 1); // ... mt __lanesclone [uv]*
1617 // this pops the value from the stack 1606 // this pops the value from the stack
1618 lua_setiuservalue( L2, -2, uvi); // ... u 1607 lua_setiuservalue( L2, -2, uvi); // ... u
@@ -1641,7 +1630,7 @@ static bool copyclone(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L,
1641 1630
1642// ################################################################################################# 1631// #################################################################################################
1643 1632
1644static bool inter_copy_userdata(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, int i, VT vt_, LookupMode mode_, char const* upName_) 1633[[nodiscard]] static bool inter_copy_userdata(Universe* U, Dest L2, int L2_cache_i, Source L, int i, VT vt_, LookupMode mode_, char const* upName_)
1645{ 1634{
1646 STACK_CHECK_START_REL(L, 0); 1635 STACK_CHECK_START_REL(L, 0);
1647 STACK_CHECK_START_REL(L2, 0); 1636 STACK_CHECK_START_REL(L2, 0);
@@ -1653,140 +1642,141 @@ static bool inter_copy_userdata(Universe* U, lua_State* L2, int L2_cache_i, lua_
1653 // try clonable userdata first 1642 // try clonable userdata first
1654 if( copyclone( U, L2, L2_cache_i, L, i, mode_, upName_)) 1643 if( copyclone( U, L2, L2_cache_i, L, i, mode_, upName_))
1655 { 1644 {
1656 STACK_CHECK( L, 0); 1645 STACK_CHECK(L, 0);
1657 STACK_CHECK( L2, 1); 1646 STACK_CHECK(L2, 1);
1658 return true; 1647 return true;
1659 } 1648 }
1660 1649
1661 STACK_CHECK( L, 0); 1650 STACK_CHECK(L, 0);
1662 STACK_CHECK( L2, 0); 1651 STACK_CHECK(L2, 0);
1663 1652
1664 // Allow only deep userdata entities to be copied across 1653 // Allow only deep userdata entities to be copied across
1665 DEBUGSPEW_CODE( fprintf( stderr, "USERDATA\n")); 1654 DEBUGSPEW_CODE(fprintf(stderr, "USERDATA\n"));
1666 if( copydeep( U, L2, L2_cache_i, L, i, mode_, upName_)) 1655 if (copydeep(U, L2, L2_cache_i, L, i, mode_, upName_))
1667 { 1656 {
1668 STACK_CHECK( L, 0); 1657 STACK_CHECK(L, 0);
1669 STACK_CHECK( L2, 1); 1658 STACK_CHECK(L2, 1);
1670 return true; 1659 return true;
1671 } 1660 }
1672 1661
1673 STACK_CHECK( L, 0); 1662 STACK_CHECK(L, 0);
1674 STACK_CHECK( L2, 0); 1663 STACK_CHECK(L2, 0);
1675 1664
1676 // Not a deep or clonable full userdata 1665 // Not a deep or clonable full userdata
1677 if( U->demoteFullUserdata) // attempt demotion to light userdata 1666 if (U->demoteFullUserdata) // attempt demotion to light userdata
1678 { 1667 {
1679 void* lud = lua_touserdata( L, i); 1668 void* lud = lua_touserdata(L, i);
1680 lua_pushlightuserdata( L2, lud); 1669 lua_pushlightuserdata(L2, lud);
1681 } 1670 }
1682 else // raise an error 1671 else // raise an error
1683 { 1672 {
1684 (void) luaL_error( L, "can't copy non-deep full userdata across lanes"); 1673 luaL_error(L, "can't copy non-deep full userdata across lanes"); // doesn't return
1685 } 1674 }
1686 1675
1687 STACK_CHECK( L2, 1); 1676 STACK_CHECK(L2, 1);
1688 STACK_CHECK( L, 0); 1677 STACK_CHECK(L, 0);
1689 return true; 1678 return true;
1690} 1679}
1691 1680
1692// ################################################################################################# 1681// #################################################################################################
1693 1682
1694static bool inter_copy_function(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, int source_i_, VT vt_, LookupMode mode_, char const* upName_) 1683[[nodiscard]] static bool inter_copy_function(Universe* U, Dest L2, int L2_cache_i, Source L, int source_i_, VT vt_, LookupMode mode_, char const* upName_)
1695{ 1684{
1696 if (vt_ == VT::KEY) 1685 if (vt_ == VT::KEY)
1697 { 1686 {
1698 return false; 1687 return false;
1699 } 1688 }
1700 1689
1701 STACK_CHECK_START_REL(L, 0); // L (source) // L2 (destination) 1690 STACK_CHECK_START_REL(L, 0); // L (source) // L2 (destination)
1702 STACK_CHECK_START_REL(L2, 0); 1691 STACK_CHECK_START_REL(L2, 0);
1703 DEBUGSPEW_CODE( fprintf( stderr, "FUNCTION %s\n", upName_)); 1692 DEBUGSPEW_CODE(fprintf(stderr, "FUNCTION %s\n", upName_));
1704 1693
1705 if( lua_tocfunction( L, source_i_) == userdata_clone_sentinel) // we are actually copying a clonable full userdata from a keeper 1694 if (lua_tocfunction(L, source_i_) == userdata_clone_sentinel) // we are actually copying a clonable full userdata from a keeper
1706 { 1695 {
1707 // clone the full userdata again 1696 // clone the full userdata again
1708 size_t userdata_size = 0;
1709 void* source;
1710 void* clone;
1711 1697
1712 // let's see if we already restored this userdata 1698 // let's see if we already restored this userdata
1713 lua_getupvalue( L, source_i_, 2); // ... u 1699 lua_getupvalue(L, source_i_, 2); // ... u
1714 source = lua_touserdata( L, -1); 1700 void* source = lua_touserdata(L, -1);
1715 lua_pushlightuserdata( L2, source); // ... source 1701 lua_pushlightuserdata(L2, source); // ... source
1716 lua_rawget( L2, L2_cache_i); // ... u? 1702 lua_rawget(L2, L2_cache_i); // ... u?
1717 if( !lua_isnil( L2, -1)) 1703 if (!lua_isnil(L2, -1))
1718 { 1704 {
1719 lua_pop( L, 1); // ... 1705 lua_pop(L, 1); // ...
1720 STACK_CHECK( L, 0); 1706 STACK_CHECK(L, 0);
1721 STACK_CHECK( L2, 1); 1707 STACK_CHECK(L2, 1);
1722 return true; 1708 return true;
1723 } 1709 }
1724 lua_pop( L2, 1); // ... 1710 lua_pop(L2, 1); // ...
1725 1711
1726 // this function has 2 upvalues: the fqn of its metatable, and the userdata itself 1712 // this function has 2 upvalues: the fqn of its metatable, and the userdata itself
1727 lookup_table( L2, L, source_i_, mode_, upName_); // ... mt 1713 std::ignore = lookup_table(L2, L, source_i_, mode_, upName_); // ... mt
1728 // originally 'source_i_' slot was the proxy closure, but from now on it indexes the actual userdata we extracted from it 1714 // originally 'source_i_' slot was the proxy closure, but from now on it indexes the actual userdata we extracted from it
1729 source_i_ = lua_gettop( L); 1715 source_i_ = lua_gettop(L);
1730 source = lua_touserdata( L, -1); 1716 source = lua_touserdata(L, -1);
1717 void* clone{ nullptr };
1731 // get the number of bytes to allocate for the clone 1718 // get the number of bytes to allocate for the clone
1732 userdata_size = (size_t) lua_rawlen( L, -1); 1719 size_t const userdata_size{ lua_rawlen(L, -1) };
1733 { 1720 {
1734 // extract uservalues (don't transfer them yet) 1721 // extract uservalues (don't transfer them yet)
1735 int uvi = 0; 1722 int uvi = 0;
1736 while( lua_getiuservalue( L, source_i_, ++ uvi) != LUA_TNONE) {} // ... u uv 1723 while (lua_getiuservalue(L, source_i_, ++uvi) != LUA_TNONE) {} // ... u uv
1737 // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now 1724 // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now
1738 lua_pop( L, 1); // ... u [uv]* 1725 lua_pop(L, 1); // ... u [uv]*
1739 -- uvi; 1726 --uvi;
1740 STACK_CHECK( L, uvi + 1); 1727 STACK_CHECK(L, uvi + 1);
1741 // create the clone userdata with the required number of uservalue slots 1728 // create the clone userdata with the required number of uservalue slots
1742 clone = lua_newuserdatauv( L2, userdata_size, uvi); // ... mt u 1729 clone = lua_newuserdatauv(L2, userdata_size, uvi); // ... mt u
1743 // add it in the cache 1730 // add it in the cache
1744 lua_pushlightuserdata( L2, source); // ... mt u source 1731 lua_pushlightuserdata(L2, source); // ... mt u source
1745 lua_pushvalue( L2, -2); // ... mt u source u 1732 lua_pushvalue(L2, -2); // ... mt u source u
1746 lua_rawset( L2, L2_cache_i); // ... mt u 1733 lua_rawset(L2, L2_cache_i); // ... mt u
1747 // set metatable 1734 // set metatable
1748 lua_pushvalue( L2, -2); // ... mt u mt 1735 lua_pushvalue(L2, -2); // ... mt u mt
1749 lua_setmetatable( L2, -2); // ... mt u 1736 lua_setmetatable(L2, -2); // ... mt u
1750 // transfer and assign uservalues 1737 // transfer and assign uservalues
1751 while( uvi > 0) 1738 while (uvi > 0)
1752 { 1739 {
1753 inter_copy_one(U, L2, L2_cache_i, L, lua_absindex(L, -1), vt_, mode_, upName_); // ... mt u uv 1740 if (!inter_copy_one(U, L2, L2_cache_i, L, lua_absindex(L, -1), vt_, mode_, upName_)) // ... mt u uv
1754 lua_pop( L, 1); // ... u [uv]* 1741 {
1742 luaL_error(L, "Cannot copy upvalue type '%s'", luaL_typename(L, -1)); // doesn't return
1743 }
1744 lua_pop(L, 1); // ... u [uv]*
1755 // this pops the value from the stack 1745 // this pops the value from the stack
1756 lua_setiuservalue( L2, -2, uvi); // ... mt u 1746 lua_setiuservalue(L2, -2, uvi); // ... mt u
1757 -- uvi; 1747 -- uvi;
1758 } 1748 }
1759 // when we are done, all uservalues are popped from the stack, we can pop the source as well 1749 // when we are done, all uservalues are popped from the stack, we can pop the source as well
1760 lua_pop( L, 1); // ... 1750 lua_pop(L, 1); // ...
1761 STACK_CHECK( L, 0); 1751 STACK_CHECK(L, 0);
1762 STACK_CHECK( L2, 2); // ... mt u 1752 STACK_CHECK(L2, 2); // ... mt u
1763 } 1753 }
1764 // perform the custom cloning part 1754 // perform the custom cloning part
1765 lua_insert( L2, -2); // ... u mt 1755 lua_insert(L2, -2); // ... u mt
1766 // __lanesclone should always exist because we wouldn't be restoring data from a userdata_clone_sentinel closure to begin with 1756 // __lanesclone should always exist because we wouldn't be restoring data from a userdata_clone_sentinel closure to begin with
1767 lua_getfield(L2, -1, "__lanesclone"); // ... u mt __lanesclone 1757 lua_getfield(L2, -1, "__lanesclone"); // ... u mt __lanesclone
1768 lua_remove( L2, -2); // ... u __lanesclone 1758 lua_remove(L2, -2); // ... u __lanesclone
1769 lua_pushlightuserdata( L2, clone); // ... u __lanesclone clone 1759 lua_pushlightuserdata(L2, clone); // ... u __lanesclone clone
1770 lua_pushlightuserdata( L2, source); // ... u __lanesclone clone source 1760 lua_pushlightuserdata(L2, source); // ... u __lanesclone clone source
1771 lua_pushinteger( L2, userdata_size); // ... u __lanesclone clone source size 1761 lua_pushinteger(L2, userdata_size); // ... u __lanesclone clone source size
1772 // clone:__lanesclone(dest, source, size) 1762 // clone:__lanesclone(dest, source, size)
1773 lua_call( L2, 3, 0); // ... u 1763 lua_call(L2, 3, 0); // ... u
1774 } 1764 }
1775 else // regular function 1765 else // regular function
1776 { 1766 {
1777 DEBUGSPEW_CODE( fprintf( stderr, "FUNCTION %s\n", upName_)); 1767 DEBUGSPEW_CODE(fprintf( stderr, "FUNCTION %s\n", upName_));
1778 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1768 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1779 copy_cached_func( U, L2, L2_cache_i, L, source_i_, mode_, upName_); // ... f 1769 copy_cached_func(U, L2, L2_cache_i, L, source_i_, mode_, upName_); // ... f
1780 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 1770 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1781 } 1771 }
1782 STACK_CHECK( L2, 1); 1772 STACK_CHECK(L2, 1);
1783 STACK_CHECK( L, 0); 1773 STACK_CHECK(L, 0);
1784 return true; 1774 return true;
1785} 1775}
1786 1776
1787// ################################################################################################# 1777// #################################################################################################
1788 1778
1789static bool inter_copy_table(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, int i, VT vt_, LookupMode mode_, char const* upName_) 1779[[nodiscard]] static bool inter_copy_table(Universe* U, Dest L2, int L2_cache_i, Source L, int i, VT vt_, LookupMode mode_, char const* upName_)
1790{ 1780{
1791 if (vt_ == VT::KEY) 1781 if (vt_ == VT::KEY)
1792 { 1782 {
@@ -1795,15 +1785,15 @@ static bool inter_copy_table(Universe* U, lua_State* L2, int L2_cache_i, lua_Sta
1795 1785
1796 STACK_CHECK_START_REL(L, 0); 1786 STACK_CHECK_START_REL(L, 0);
1797 STACK_CHECK_START_REL(L2, 0); 1787 STACK_CHECK_START_REL(L2, 0);
1798 DEBUGSPEW_CODE( fprintf( stderr, "TABLE %s\n", upName_)); 1788 DEBUGSPEW_CODE(fprintf(stderr, "TABLE %s\n", upName_));
1799 1789
1800 /* 1790 /*
1801 * First, let's try to see if this table is special (aka is it some table that we registered in our lookup databases during module registration?) 1791 * First, let's try to see if this table is special (aka is it some table that we registered in our lookup databases during module registration?)
1802 * Note that this table CAN be a module table, but we just didn't register it, in which case we'll send it through the table cloning mechanism 1792 * Note that this table CAN be a module table, but we just didn't register it, in which case we'll send it through the table cloning mechanism
1803 */ 1793 */
1804 if( lookup_table( L2, L, i, mode_, upName_)) 1794 if (lookup_table(L2, L, i, mode_, upName_))
1805 { 1795 {
1806 ASSERT_L( lua_istable( L2, -1) || (lua_tocfunction( L2, -1) == table_lookup_sentinel)); // from lookup datables // can also be table_lookup_sentinel if this is a table we know 1796 ASSERT_L(lua_istable(L2, -1) || (lua_tocfunction(L2, -1) == table_lookup_sentinel)); // from lookup data. can also be table_lookup_sentinel if this is a table we know
1807 return true; 1797 return true;
1808 } 1798 }
1809 1799
@@ -1816,33 +1806,33 @@ static bool inter_copy_table(Universe* U, lua_State* L2, int L2_cache_i, lua_Sta
1816 * Note: Even metatables need to go through this test; to detect 1806 * Note: Even metatables need to go through this test; to detect
1817 * loops such as those in required module tables (getmetatable(lanes).lanes == lanes) 1807 * loops such as those in required module tables (getmetatable(lanes).lanes == lanes)
1818 */ 1808 */
1819 if( push_cached_table( L2, L2_cache_i, L, i)) 1809 if (push_cached_table(L2, L2_cache_i, L, i))
1820 { 1810 {
1821 ASSERT_L( lua_istable( L2, -1)); // from cache 1811 ASSERT_L(lua_istable(L2, -1)); // from cache
1822 return true; 1812 return true;
1823 } 1813 }
1824 ASSERT_L( lua_istable( L2, -1)); 1814 ASSERT_L(lua_istable(L2, -1));
1825 1815
1826 STACK_GROW( L, 2); 1816 STACK_GROW(L, 2);
1827 STACK_GROW( L2, 2); 1817 STACK_GROW(L2, 2);
1828 1818
1829 lua_pushnil( L); // start iteration 1819 lua_pushnil(L); // start iteration
1830 while( lua_next( L, i)) 1820 while (lua_next(L, i))
1831 { 1821 {
1832 // need a function to prevent overflowing the stack with verboseErrors-induced alloca() 1822 // need a function to prevent overflowing the stack with verboseErrors-induced alloca()
1833 inter_copy_keyvaluepair(U, L2, L2_cache_i, L, vt_, mode_, upName_); 1823 inter_copy_keyvaluepair(U, L2, L2_cache_i, L, vt_, mode_, upName_);
1834 lua_pop( L, 1); // pop value (next round) 1824 lua_pop(L, 1); // pop value (next round)
1835 } 1825 }
1836 STACK_CHECK( L, 0); 1826 STACK_CHECK(L, 0);
1837 STACK_CHECK( L2, 1); 1827 STACK_CHECK(L2, 1);
1838 1828
1839 // Metatables are expected to be immutable, and copied only once. 1829 // Metatables are expected to be immutable, and copied only once.
1840 if( push_cached_metatable( U, L2, L2_cache_i, L, i, mode_, upName_)) // ... t mt? 1830 if (push_cached_metatable(U, L2, L2_cache_i, L, i, mode_, upName_)) // ... t mt?
1841 { 1831 {
1842 lua_setmetatable( L2, -2); // ... t 1832 lua_setmetatable(L2, -2); // ... t
1843 } 1833 }
1844 STACK_CHECK( L2, 1); 1834 STACK_CHECK(L2, 1);
1845 STACK_CHECK( L, 0); 1835 STACK_CHECK(L, 0);
1846 return true; 1836 return true;
1847} 1837}
1848 1838
@@ -1858,21 +1848,21 @@ static bool inter_copy_table(Universe* U, lua_State* L2, int L2_cache_i, lua_Sta
1858* 1848*
1859* Returns true if value was pushed, false if its type is non-supported. 1849* Returns true if value was pushed, false if its type is non-supported.
1860*/ 1850*/
1861bool inter_copy_one(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, int i, VT vt_, LookupMode mode_, char const* upName_) 1851[[nodiscard]] bool inter_copy_one(Universe* U, Dest L2, int L2_cache_i, Source L, int i, VT vt_, LookupMode mode_, char const* upName_)
1862{ 1852{
1863 bool ret{ true }; 1853 bool ret{ true };
1864 int val_type = lua_type( L, i); 1854 LuaType val_type{ lua_type_as_enum(L, i) };
1865 static int const pod_mask = (1 << LUA_TNIL) | (1 << LUA_TBOOLEAN) | (1 << LUA_TLIGHTUSERDATA) | (1 << LUA_TNUMBER) | (1 << LUA_TSTRING); 1855 static constexpr int pod_mask = (1 << LUA_TNIL) | (1 << LUA_TBOOLEAN) | (1 << LUA_TLIGHTUSERDATA) | (1 << LUA_TNUMBER) | (1 << LUA_TSTRING);
1866 STACK_GROW( L2, 1); 1856 STACK_GROW( L2, 1);
1867 STACK_CHECK_START_REL(L, 0); // L // L2 1857 STACK_CHECK_START_REL(L, 0); // L // L2
1868 STACK_CHECK_START_REL(L2, 0); // L // L2 1858 STACK_CHECK_START_REL(L2, 0); // L // L2
1869 1859
1870 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "inter_copy_one()\n" INDENT_END)); 1860 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "inter_copy_one()\n" INDENT_END));
1871 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1861 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1872 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s %s: " INDENT_END, lua_type_names[val_type], vt_names[static_cast<int>(vt_)])); 1862 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s %s: " INDENT_END, lua_type_names[val_type], vt_names[static_cast<int>(vt_)]));
1873 1863
1874 // Non-POD can be skipped if its metatable contains { __lanesignore = true } 1864 // Non-POD can be skipped if its metatable contains { __lanesignore = true }
1875 if( ((1 << val_type) & pod_mask) == 0) 1865 if( ((1 << static_cast<int>(val_type)) & pod_mask) == 0)
1876 { 1866 {
1877 if( lua_getmetatable( L, i)) // ... mt 1867 if( lua_getmetatable( L, i)) // ... mt
1878 { 1868 {
@@ -1880,7 +1870,7 @@ bool inter_copy_one(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, in
1880 if( lua_isboolean( L, -1) && lua_toboolean( L, -1)) 1870 if( lua_isboolean( L, -1) && lua_toboolean( L, -1))
1881 { 1871 {
1882 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "__lanesignore -> LUA_TNIL\n" INDENT_END)); 1872 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "__lanesignore -> LUA_TNIL\n" INDENT_END));
1883 val_type = LUA_TNIL; 1873 val_type = LuaType::NIL;
1884 } 1874 }
1885 lua_pop( L, 2); // ... 1875 lua_pop( L, 2); // ...
1886 } 1876 }
@@ -1892,7 +1882,7 @@ bool inter_copy_one(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, in
1892 { 1882 {
1893 /* Basic types allowed both as values, and as table keys */ 1883 /* Basic types allowed both as values, and as table keys */
1894 1884
1895 case LUA_TBOOLEAN: 1885 case LuaType::BOOLEAN:
1896 { 1886 {
1897 int const v{ lua_toboolean(L, i) }; 1887 int const v{ lua_toboolean(L, i) };
1898 DEBUGSPEW_CODE( fprintf( stderr, "%s\n", v ? "true" : "false")); 1888 DEBUGSPEW_CODE( fprintf( stderr, "%s\n", v ? "true" : "false"));
@@ -1900,7 +1890,7 @@ bool inter_copy_one(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, in
1900 } 1890 }
1901 break; 1891 break;
1902 1892
1903 case LUA_TNUMBER: 1893 case LuaType::NUMBER:
1904 /* LNUM patch support (keeping integer accuracy) */ 1894 /* LNUM patch support (keeping integer accuracy) */
1905#if defined LUA_LNUM || LUA_VERSION_NUM >= 503 1895#if defined LUA_LNUM || LUA_VERSION_NUM >= 503
1906 if( lua_isinteger( L, i)) 1896 if( lua_isinteger( L, i))
@@ -1919,7 +1909,7 @@ bool inter_copy_one(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, in
1919 } 1909 }
1920 break; 1910 break;
1921 1911
1922 case LUA_TSTRING: 1912 case LuaType::STRING:
1923 { 1913 {
1924 size_t len; 1914 size_t len;
1925 char const* s = lua_tolstring( L, i, &len); 1915 char const* s = lua_tolstring( L, i, &len);
@@ -1928,7 +1918,7 @@ bool inter_copy_one(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, in
1928 } 1918 }
1929 break; 1919 break;
1930 1920
1931 case LUA_TLIGHTUSERDATA: 1921 case LuaType::LIGHTUSERDATA:
1932 { 1922 {
1933 void* p = lua_touserdata( L, i); 1923 void* p = lua_touserdata( L, i);
1934 DEBUGSPEW_CODE( fprintf( stderr, "%p\n", p)); 1924 DEBUGSPEW_CODE( fprintf( stderr, "%p\n", p));
@@ -1938,11 +1928,11 @@ bool inter_copy_one(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, in
1938 1928
1939 /* The following types are not allowed as table keys */ 1929 /* The following types are not allowed as table keys */
1940 1930
1941 case LUA_TUSERDATA: 1931 case LuaType::USERDATA:
1942 ret = inter_copy_userdata(U, L2, L2_cache_i, L, i, vt_, mode_, upName_); 1932 ret = inter_copy_userdata(U, L2, L2_cache_i, L, i, vt_, mode_, upName_);
1943 break; 1933 break;
1944 1934
1945 case LUA_TNIL: 1935 case LuaType::NIL:
1946 if (vt_ == VT::KEY) 1936 if (vt_ == VT::KEY)
1947 { 1937 {
1948 ret = false; 1938 ret = false;
@@ -1951,29 +1941,31 @@ bool inter_copy_one(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, in
1951 lua_pushnil( L2); 1941 lua_pushnil( L2);
1952 break; 1942 break;
1953 1943
1954 case LUA_TFUNCTION: 1944 case LuaType::FUNCTION:
1955 ret = inter_copy_function(U, L2, L2_cache_i, L, i, vt_, mode_, upName_); 1945 ret = inter_copy_function(U, L2, L2_cache_i, L, i, vt_, mode_, upName_);
1956 break; 1946 break;
1957 1947
1958 case LUA_TTABLE: 1948 case LuaType::TABLE:
1959 ret = inter_copy_table(U, L2, L2_cache_i, L, i, vt_, mode_, upName_); 1949 ret = inter_copy_table(U, L2, L2_cache_i, L, i, vt_, mode_, upName_);
1960 break; 1950 break;
1961 1951
1962 /* The following types cannot be copied */ 1952 /* The following types cannot be copied */
1963 1953
1964 case 10: // LuaJIT CDATA 1954 case LuaType::CDATA:
1965 case LUA_TTHREAD: 1955 case LuaType::THREAD:
1966 ret = false; 1956 ret = false;
1967 break; 1957 break;
1968 } 1958 }
1969 1959
1970 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 1960 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1971 1961
1972 STACK_CHECK( L2, ret ? 1 : 0); 1962 STACK_CHECK( L2, ret ? 1 : 0);
1973 STACK_CHECK( L, 0); 1963 STACK_CHECK( L, 0);
1974 return ret; 1964 return ret;
1975} 1965}
1976 1966
1967// #################################################################################################
1968
1977/* 1969/*
1978* Akin to 'lua_xmove' but copies values between _any_ Lua states. 1970* Akin to 'lua_xmove' but copies values between _any_ Lua states.
1979* 1971*
@@ -1981,124 +1973,137 @@ bool inter_copy_one(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, in
1981* 1973*
1982* Note: Parameters are in this order ('L' = from first) to be same as 'lua_xmove'. 1974* Note: Parameters are in this order ('L' = from first) to be same as 'lua_xmove'.
1983*/ 1975*/
1984int luaG_inter_copy(Universe* U, lua_State* L, lua_State* L2, int n, LookupMode mode_) 1976[[nodiscard]] InterCopyResult luaG_inter_copy(Universe* U, Source L, Dest L2, int n, LookupMode mode_)
1985{ 1977{
1986 int top_L = lua_gettop(L); // ... {}n 1978 int const top_L{ lua_gettop(L) }; // ... {}n
1987 int top_L2 = lua_gettop(L2); // ... 1979 int const top_L2{ lua_gettop(L2) }; // ...
1988 int i, j;
1989 char tmpBuf[16]; 1980 char tmpBuf[16];
1990 char const* pBuf = U->verboseErrors ? tmpBuf : "?"; 1981 char const* pBuf{ U->verboseErrors ? tmpBuf : "?" };
1991 bool copyok{ true };
1992 1982
1993 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy()\n" INDENT_END)); 1983 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "luaG_inter_copy()\n" INDENT_END));
1994 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1984 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1995 1985
1996 if( n > top_L) 1986 if (n > top_L)
1997 { 1987 {
1998 // requesting to copy more than is available? 1988 // requesting to copy more than is available?
1999 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "nothing to copy()\n" INDENT_END)); 1989 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "nothing to copy()\n" INDENT_END));
2000 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 1990 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
2001 return -1; 1991 return InterCopyResult::NotEnoughValues;
2002 } 1992 }
2003 1993
2004 STACK_CHECK_START_REL(L2, 0); 1994 STACK_CHECK_START_REL(L2, 0);
2005 STACK_GROW( L2, n + 1); 1995 STACK_GROW(L2, n + 1);
2006 1996
2007 /* 1997 /*
2008 * Make a cache table for the duration of this copy. Collects tables and 1998 * Make a cache table for the duration of this copy. Collects tables and
2009 * function entries, avoiding the same entries to be passed on as multiple 1999 * function entries, avoiding the same entries to be passed on as multiple
2010 * copies. ESSENTIAL i.e. for handling upvalue tables in the right manner! 2000 * copies. ESSENTIAL i.e. for handling upvalue tables in the right manner!
2011 */ 2001 */
2012 lua_newtable( L2); // ... cache 2002 lua_newtable(L2); // ... cache
2013 2003
2014 STACK_CHECK_START_REL(L, 0); 2004 STACK_CHECK_START_REL(L, 0);
2015 for( i = top_L - n + 1, j = 1; i <= top_L; ++ i, ++ j) 2005 bool copyok{ true };
2006 for (int i = top_L - n + 1, j = 1; i <= top_L; ++i, ++j)
2016 { 2007 {
2017 if( U->verboseErrors) 2008 if (U->verboseErrors)
2018 { 2009 {
2019 sprintf( tmpBuf, "arg_%d", j); 2010 sprintf(tmpBuf, "arg_%d", j);
2020 } 2011 }
2021 copyok = inter_copy_one(U, L2, top_L2 + 1, L, i, VT::NORMAL, mode_, pBuf); // ... cache {}n 2012 copyok = inter_copy_one(U, L2, top_L2 + 1, L, i, VT::NORMAL, mode_, pBuf); // ... cache {}n
2022 if( !copyok) 2013 if (!copyok)
2023 { 2014 {
2024 break; 2015 break;
2025 } 2016 }
2026 } 2017 }
2027 STACK_CHECK( L, 0); 2018 STACK_CHECK(L, 0);
2028 2019
2029 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 2020 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
2030 2021
2031 if( copyok) 2022 if (copyok)
2032 { 2023 {
2033 STACK_CHECK( L2, n + 1); 2024 STACK_CHECK(L2, n + 1);
2034 // Remove the cache table. Persistent caching would cause i.e. multiple 2025 // Remove the cache table. Persistent caching would cause i.e. multiple
2035 // messages passed in the same table to use the same table also in receiving end. 2026 // messages passed in the same table to use the same table also in receiving end.
2036 lua_remove( L2, top_L2 + 1); 2027 lua_remove(L2, top_L2 + 1);
2037 return 0; 2028 return InterCopyResult::Success;
2038 } 2029 }
2039 2030
2040 // error -> pop everything from the target state stack 2031 // error -> pop everything from the target state stack
2041 lua_settop( L2, top_L2); 2032 lua_settop(L2, top_L2);
2042 STACK_CHECK( L2, 0); 2033 STACK_CHECK(L2, 0);
2043 return -2; 2034 return InterCopyResult::Error;
2044} 2035}
2045 2036
2037// #################################################################################################
2046 2038
2047int luaG_inter_move(Universe* U, lua_State* L, lua_State* L2, int n, LookupMode mode_) 2039[[nodiscard]] InterCopyResult luaG_inter_move(Universe* U, Source L, Dest L2, int n_, LookupMode mode_)
2048{ 2040{
2049 int ret = luaG_inter_copy( U, L, L2, n, mode_); 2041 InterCopyResult const ret{ luaG_inter_copy(U, L, L2, n_, mode_) };
2050 lua_pop( L, (int) n); 2042 lua_pop( L, n_);
2051 return ret; 2043 return ret;
2052} 2044}
2053 2045
2054int luaG_inter_copy_package( Universe* U, lua_State* L, lua_State* L2, int package_idx_, LookupMode mode_) 2046// #################################################################################################
2047
2048// transfers stuff from L->_G["package"] to L2->_G["package"]
2049// returns InterCopyResult::Success if everything is fine
2050// returns InterCopyResult::Error if pushed an error message in L
2051// else raise an error in L
2052[[nodiscard]] InterCopyResult luaG_inter_copy_package(Universe* U, Source L, Dest L2, int package_idx_, LookupMode mode_)
2055{ 2053{
2056 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy_package()\n" INDENT_END)); 2054 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "luaG_inter_copy_package()\n" INDENT_END));
2057 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 2055 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
2058 // package 2056 // package
2059 STACK_CHECK_START_REL(L, 0); 2057 STACK_CHECK_START_REL(L, 0);
2060 STACK_CHECK_START_REL(L2, 0); 2058 STACK_CHECK_START_REL(L2, 0);
2061 package_idx_ = lua_absindex( L, package_idx_); 2059 package_idx_ = lua_absindex(L, package_idx_);
2062 if( lua_type( L, package_idx_) != LUA_TTABLE) 2060 if (lua_type(L, package_idx_) != LUA_TTABLE)
2063 { 2061 {
2064 lua_pushfstring( L, "expected package as table, got %s", luaL_typename( L, package_idx_)); 2062 lua_pushfstring(L, "expected package as table, got %s", luaL_typename(L, package_idx_));
2065 STACK_CHECK( L, 1); 2063 STACK_CHECK(L, 1);
2066 // raise the error when copying from lane to lane, else just leave it on the stack to be raised later 2064 // raise the error when copying from lane to lane, else just leave it on the stack to be raised later
2067 return (mode_ == LookupMode::LaneBody) ? lua_error(L) : 1; 2065 if (mode_ == LookupMode::LaneBody)
2066 {
2067 lua_error(L); // doesn't return
2068 }
2069 return InterCopyResult::Error;
2068 } 2070 }
2069 lua_getglobal( L2, "package"); 2071 lua_getglobal(L2, "package");
2070 if( !lua_isnil( L2, -1)) // package library not loaded: do nothing 2072 if (!lua_isnil(L2, -1)) // package library not loaded: do nothing
2071 { 2073 {
2072 int i;
2073 // package.loaders is renamed package.searchers in Lua 5.2 2074 // package.loaders is renamed package.searchers in Lua 5.2
2074 // but don't copy it anyway, as the function names change depending on the slot index! 2075 // but don't copy it anyway, as the function names change depending on the slot index!
2075 // users should provide an on_state_create function to setup custom loaders instead 2076 // users should provide an on_state_create function to setup custom loaders instead
2076 // don't copy package.preload in keeper states (they don't know how to translate functions) 2077 // don't copy package.preload in keeper states (they don't know how to translate functions)
2077 char const* entries[] = { "path", "cpath", (mode_ == LookupMode::LaneBody) ? "preload" : nullptr /*, (LUA_VERSION_NUM == 501) ? "loaders" : "searchers"*/, nullptr }; 2078 char const* entries[] = { "path", "cpath", (mode_ == LookupMode::LaneBody) ? "preload" : nullptr /*, (LUA_VERSION_NUM == 501) ? "loaders" : "searchers"*/, nullptr };
2078 for( i = 0; entries[i]; ++ i) 2079 for (char const* const entry : entries)
2079 { 2080 {
2080 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "package.%s\n" INDENT_END, entries[i])); 2081 if (!entry)
2081 lua_getfield( L, package_idx_, entries[i]);
2082 if( lua_isnil( L, -1))
2083 { 2082 {
2084 lua_pop( L, 1); 2083 continue;
2084 }
2085 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "package.%s\n" INDENT_END, entry));
2086 lua_getfield(L, package_idx_, entry);
2087 if (lua_isnil(L, -1))
2088 {
2089 lua_pop(L, 1);
2085 } 2090 }
2086 else 2091 else
2087 { 2092 {
2088 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 2093 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
2089 luaG_inter_move( U, L, L2, 1, mode_); // moves the entry to L2 2094 std::ignore = luaG_inter_move(U, L, L2, 1, mode_); // moves the entry to L2
2090 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 2095 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
2091 lua_setfield( L2, -2, entries[i]); // set package[entries[i]] 2096 lua_setfield(L2, -2, entry); // set package[entry]
2092 } 2097 }
2093 } 2098 }
2094 } 2099 }
2095 else 2100 else
2096 { 2101 {
2097 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "'package' not loaded, nothing to do\n" INDENT_END)); 2102 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "'package' not loaded, nothing to do\n" INDENT_END));
2098 } 2103 }
2099 lua_pop( L2, 1); 2104 lua_pop(L2, 1);
2100 STACK_CHECK( L2, 0); 2105 STACK_CHECK(L2, 0);
2101 STACK_CHECK( L, 0); 2106 STACK_CHECK(L, 0);
2102 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 2107 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
2103 return 0; 2108 return InterCopyResult::Success;
2104} 2109}
diff --git a/src/tools.h b/src/tools.h
index c1a8534..dce7378 100644
--- a/src/tools.h
+++ b/src/tools.h
@@ -1,23 +1,21 @@
1#pragma once 1#pragma once
2 2
3#include "threading.h"
4#include "deep.h" 3#include "deep.h"
5
6#include "macros_and_utils.h" 4#include "macros_and_utils.h"
7 5
8// forwards 6// forwards
9struct Universe; 7class Universe;
10 8
11// ################################################################################################ 9// ################################################################################################
12 10
13#ifdef _DEBUG 11#ifdef _DEBUG
14void luaG_dump( lua_State* L); 12void luaG_dump(lua_State* L);
15#endif // _DEBUG 13#endif // _DEBUG
16 14
17// ################################################################################################ 15// ################################################################################################
18 16
19void push_registry_subtable_mode( lua_State* L, UniqueKey key_, const char* mode_); 17void push_registry_subtable_mode(lua_State* L, UniqueKey key_, const char* mode_);
20void push_registry_subtable( lua_State* L, UniqueKey key_); 18void push_registry_subtable(lua_State* L, UniqueKey key_);
21 19
22enum class VT 20enum class VT
23{ 21{
@@ -25,19 +23,26 @@ enum class VT
25 KEY, 23 KEY,
26 METATABLE 24 METATABLE
27}; 25};
28bool inter_copy_one(Universe* U, lua_State* L2, int L2_cache_i, lua_State* L, int i, VT vt_, LookupMode mode_, char const* upName_);
29 26
30// ################################################################################################ 27enum class InterCopyResult
28{
29 Success,
30 NotEnoughValues,
31 Error
32};
33
34[[nodiscard]] bool inter_copy_one(Universe* U, Dest L2, int L2_cache_i, Source L, int i, VT vt_, LookupMode mode_, char const* upName_);
31 35
32int luaG_inter_copy_package( Universe* U, lua_State* L, lua_State* L2, int package_idx_, LookupMode mode_); 36// ################################################################################################
33 37
34int luaG_inter_copy(Universe* U, lua_State* L, lua_State* L2, int n, LookupMode mode_); 38[[nodiscard]] InterCopyResult luaG_inter_copy_package(Universe* U, Source L, Dest L2, int package_idx_, LookupMode mode_);
35int luaG_inter_move(Universe* U, lua_State* L, lua_State* L2, int n, LookupMode mode_); 39[[nodiscard]] InterCopyResult luaG_inter_copy(Universe* U, Source L, Dest L2, int n, LookupMode mode_);
40[[nodiscard]] InterCopyResult luaG_inter_move(Universe* U, Source L, Dest L2, int n, LookupMode mode_);
36 41
37int luaG_nameof( lua_State* L); 42[[nodiscard]] int luaG_nameof(lua_State* L);
38 43
39void populate_func_lookup_table( lua_State* L, int _i, char const* _name); 44void populate_func_lookup_table(lua_State* L, int _i, char const* _name);
40void initialize_allocator_function( Universe* U, lua_State* L); 45void initialize_allocator_function(Universe* U, lua_State* L);
41 46
42// ################################################################################################ 47// ################################################################################################
43 48
diff --git a/src/uniquekey.h b/src/uniquekey.h
index e592f0a..a89ecd3 100644
--- a/src/uniquekey.h
+++ b/src/uniquekey.h
@@ -13,7 +13,7 @@ class UniqueKey
13 13
14 public: 14 public:
15 15
16 constexpr UniqueKey(uint64_t val_) 16 constexpr explicit UniqueKey(uint64_t val_)
17#if LUAJIT_FLAVOR() == 64 // building against LuaJIT headers for 64 bits, light userdata is restricted to 47 significant bits, because LuaJIT uses the other bits for internal optimizations 17#if LUAJIT_FLAVOR() == 64 // building against LuaJIT headers for 64 bits, light userdata is restricted to 47 significant bits, because LuaJIT uses the other bits for internal optimizations
18 : m_storage{ static_cast<uintptr_t>(val_ & 0x7fffffffffffull) } 18 : m_storage{ static_cast<uintptr_t>(val_ & 0x7fffffffffffull) }
19#else // LUAJIT_FLAVOR() 19#else // LUAJIT_FLAVOR()
diff --git a/src/universe.cpp b/src/universe.cpp
index 66da147..4c53987 100644
--- a/src/universe.cpp
+++ b/src/universe.cpp
@@ -43,6 +43,35 @@ static constexpr UniqueKey UNIVERSE_LIGHT_REGKEY{ 0x3663C07C742CEB81ull };
43 43
44// ################################################################################################ 44// ################################################################################################
45 45
46Universe::Universe()
47{
48 //---
49 // Linux needs SCHED_RR to change thread priorities, and that is only
50 // allowed for sudo'ers. SCHED_OTHER (default) has no priorities.
51 // SCHED_OTHER threads are always lower priority than SCHED_RR.
52 //
53 // ^-- those apply to 2.6 kernel. IF **wishful thinking** these
54 // constraints will change in the future, non-sudo priorities can
55 // be enabled also for Linux.
56 //
57#ifdef PLATFORM_LINUX
58 // If lower priorities (-2..-1) are wanted, we need to lift the main
59 // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below
60 // the launched threads (even -2).
61 //
62#ifdef LINUX_SCHED_RR
63 if (m_sudo)
64 {
65 struct sched_param sp;
66 sp.sched_priority = _PRIO_0;
67 PT_CALL(pthread_setschedparam(pthread_self(), SCHED_RR, &sp));
68 }
69#endif // LINUX_SCHED_RR
70#endif // PLATFORM_LINUX
71}
72
73// ################################################################################################
74
46// only called from the master state 75// only called from the master state
47Universe* universe_create(lua_State* L) 76Universe* universe_create(lua_State* L)
48{ 77{
@@ -51,7 +80,7 @@ Universe* universe_create(lua_State* L)
51 U->Universe::Universe(); 80 U->Universe::Universe();
52 STACK_CHECK_START_REL(L, 1); 81 STACK_CHECK_START_REL(L, 1);
53 UNIVERSE_FULL_REGKEY.setValue(L, [](lua_State* L) { lua_pushvalue(L, -2); }); 82 UNIVERSE_FULL_REGKEY.setValue(L, [](lua_State* L) { lua_pushvalue(L, -2); });
54 UNIVERSE_LIGHT_REGKEY.setValue(L, [U](lua_State* L) { lua_pushlightuserdata( L, U); }); 83 UNIVERSE_LIGHT_REGKEY.setValue(L, [U](lua_State* L) { lua_pushlightuserdata(L, U); });
55 STACK_CHECK(L, 1); 84 STACK_CHECK(L, 1);
56 return U; 85 return U;
57} 86}
@@ -62,8 +91,8 @@ void universe_store(lua_State* L, Universe* U)
62{ 91{
63 ASSERT_L(!U || universe_get(L) == nullptr); 92 ASSERT_L(!U || universe_get(L) == nullptr);
64 STACK_CHECK_START_REL(L, 0); 93 STACK_CHECK_START_REL(L, 0);
65 UNIVERSE_LIGHT_REGKEY.setValue(L, [U](lua_State* L) { U ? lua_pushlightuserdata( L, U) : lua_pushnil( L); }); 94 UNIVERSE_LIGHT_REGKEY.setValue(L, [U](lua_State* L) { U ? lua_pushlightuserdata(L, U) : lua_pushnil(L); });
66 STACK_CHECK( L, 0); 95 STACK_CHECK(L, 0);
67} 96}
68 97
69// ################################################################################################ 98// ################################################################################################
@@ -72,6 +101,6 @@ Universe* universe_get(lua_State* L)
72{ 101{
73 STACK_CHECK_START_REL(L, 0); 102 STACK_CHECK_START_REL(L, 0);
74 Universe* const universe{ UNIVERSE_LIGHT_REGKEY.readLightUserDataValue<Universe>(L) }; 103 Universe* const universe{ UNIVERSE_LIGHT_REGKEY.readLightUserDataValue<Universe>(L) };
75 STACK_CHECK( L, 0); 104 STACK_CHECK(L, 0);
76 return universe; 105 return universe;
77} 106}
diff --git a/src/universe.h b/src/universe.h
index 6a65888..113ed21 100644
--- a/src/universe.h
+++ b/src/universe.h
@@ -35,7 +35,7 @@ class AllocatorDefinition
35 lua_Alloc m_allocF{ nullptr }; 35 lua_Alloc m_allocF{ nullptr };
36 void* m_allocUD{ nullptr }; 36 void* m_allocUD{ nullptr };
37 37
38 static void* operator new(size_t size_, lua_State* L) noexcept { return lua_newuserdatauv(L, size_, 0); } 38 [[nodiscard]] static void* operator new(size_t size_, lua_State* L) noexcept { return lua_newuserdatauv(L, size_, 0); }
39 // always embedded somewhere else or "in-place constructed" as a full userdata 39 // always embedded somewhere else or "in-place constructed" as a full userdata
40 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception 40 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception
41 static void operator delete([[maybe_unused]] void* p_, lua_State* L) { ASSERT_L(!"should never be called") }; 41 static void operator delete([[maybe_unused]] void* p_, lua_State* L) { ASSERT_L(!"should never be called") };
@@ -81,7 +81,7 @@ class ProtectedAllocator : public AllocatorDefinition
81 81
82 std::mutex m_lock; 82 std::mutex m_lock;
83 83
84 static void* protected_lua_Alloc(void* ud_, void* ptr_, size_t osize_, size_t nsize_) 84 [[nodiscard]] static void* protected_lua_Alloc(void* ud_, void* ptr_, size_t osize_, size_t nsize_)
85 { 85 {
86 ProtectedAllocator* const allocator{ static_cast<ProtectedAllocator*>(ud_) }; 86 ProtectedAllocator* const allocator{ static_cast<ProtectedAllocator*>(ud_) };
87 std::lock_guard<std::mutex> guard{ allocator->m_lock }; 87 std::lock_guard<std::mutex> guard{ allocator->m_lock };
@@ -91,7 +91,7 @@ class ProtectedAllocator : public AllocatorDefinition
91 public: 91 public:
92 92
93 // we are not like our base class: we can't be created inside a full userdata (or we would have to install a metatable and __gc handler to destroy ourselves properly) 93 // we are not like our base class: we can't be created inside a full userdata (or we would have to install a metatable and __gc handler to destroy ourselves properly)
94 static void* operator new(size_t size_, lua_State* L) noexcept = delete; 94 [[nodiscard]] static void* operator new(size_t size_, lua_State* L) noexcept = delete;
95 static void operator delete(void* p_, lua_State* L) = delete; 95 static void operator delete(void* p_, lua_State* L) = delete;
96 96
97 AllocatorDefinition makeDefinition() 97 AllocatorDefinition makeDefinition()
@@ -119,9 +119,17 @@ class ProtectedAllocator : public AllocatorDefinition
119 119
120// everything regarding the Lanes universe is stored in that global structure 120// everything regarding the Lanes universe is stored in that global structure
121// held as a full userdata in the master Lua state that required it for the first time 121// held as a full userdata in the master Lua state that required it for the first time
122// don't forget to initialize all members in LG_configure() 122class Universe
123struct Universe
124{ 123{
124 public:
125
126#ifdef PLATFORM_LINUX
127 // Linux needs to check, whether it's been run as root
128 bool const m_sudo{ geteuid() == 0 };
129#else
130 bool const m_sudo{ false };
131#endif // PLATFORM_LINUX
132
125 // for verbose errors 133 // for verbose errors
126 bool verboseErrors{ false }; 134 bool verboseErrors{ false };
127 135
@@ -155,20 +163,28 @@ struct Universe
155 // require() serialization 163 // require() serialization
156 std::recursive_mutex require_cs; 164 std::recursive_mutex require_cs;
157 165
166 // metatable unique identifiers
158 std::atomic<lua_Integer> next_mt_id{ 1 }; 167 std::atomic<lua_Integer> next_mt_id{ 1 };
159 168
160#if USE_DEBUG_SPEW() 169#if USE_DEBUG_SPEW()
161 int debugspew_indent_depth{ 0 }; 170 std::atomic<int> debugspew_indent_depth{ 0 };
162#endif // USE_DEBUG_SPEW() 171#endif // USE_DEBUG_SPEW()
163 172
164 Lane* volatile selfdestruct_first{ nullptr }; 173 Lane* volatile selfdestruct_first{ nullptr };
165 // After a lane has removed itself from the chain, it still performs some processing. 174 // After a lane has removed itself from the chain, it still performs some processing.
166 // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads 175 // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads
167 int volatile selfdestructing_count{ 0 }; 176 std::atomic<int> selfdestructing_count{ 0 };
177
178 Universe();
179 ~Universe() = default;
180 Universe(Universe const&) = delete;
181 Universe(Universe&&) = delete;
182 Universe& operator=(Universe const&) = delete;
183 Universe& operator=(Universe&&) = delete;
168}; 184};
169 185
170// ################################################################################################ 186// ################################################################################################
171 187
172Universe* universe_get(lua_State* L); 188[[nodiscard]] Universe* universe_get(lua_State* L);
173Universe* universe_create(lua_State* L); 189[[nodiscard]] Universe* universe_create(lua_State* L);
174void universe_store(lua_State* L, Universe* U); 190void universe_store(lua_State* L, Universe* U);