From 930bfb287de0a38746493463016d0e4cea153ac0 Mon Sep 17 00:00:00 2001 From: Benoit Germain Date: Tue, 19 Mar 2024 15:13:45 +0100 Subject: C++ migration: changed file extensions from .c to .cpp --- src/Makefile | 2 +- src/cancel.c | 302 -------- src/cancel.cpp | 302 ++++++++ src/compat.c | 96 --- src/compat.cpp | 96 +++ src/deep.c | 501 ------------- src/deep.cpp | 501 +++++++++++++ src/keeper.c | 825 --------------------- src/keeper.cpp | 825 +++++++++++++++++++++ src/lanes.c | 2142 ----------------------------------------------------- src/lanes.cpp | 2142 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/linda.c | 945 ----------------------- src/linda.cpp | 945 +++++++++++++++++++++++ src/state.c | 442 ----------- src/state.cpp | 442 +++++++++++ src/threading.c | 1041 -------------------------- src/threading.cpp | 1041 ++++++++++++++++++++++++++ src/tools.c | 2080 --------------------------------------------------- src/tools.cpp | 2080 +++++++++++++++++++++++++++++++++++++++++++++++++++ src/universe.c | 75 -- src/universe.cpp | 75 ++ 21 files changed, 8450 insertions(+), 8450 deletions(-) delete mode 100644 src/cancel.c create mode 100644 src/cancel.cpp delete mode 100644 src/compat.c create mode 100644 src/compat.cpp delete mode 100644 src/deep.c create mode 100644 src/deep.cpp delete mode 100644 src/keeper.c create mode 100644 src/keeper.cpp delete mode 100644 src/lanes.c create mode 100644 src/lanes.cpp delete mode 100644 src/linda.c create mode 100644 src/linda.cpp delete mode 100644 src/state.c create mode 100644 src/state.cpp delete mode 100644 src/threading.c create mode 100644 src/threading.cpp delete mode 100644 src/tools.c create mode 100644 src/tools.cpp delete mode 100644 src/universe.c create mode 100644 src/universe.cpp (limited to 'src') diff --git a/src/Makefile b/src/Makefile index c4d4c30..cef4174 100644 --- a/src/Makefile +++ b/src/Makefile @@ -119,7 +119,7 @@ MODULE_DIR=$(MODULE) #--- all: $(MODULE)/core.$(_SO) -%.o: %.c *.h Makefile +%.o: %.cpp *.h Makefile # Note: Don't put $(LUA_LIBS) ahead of $^; MSYS will not like that (I think) # diff --git a/src/cancel.c b/src/cancel.c deleted file mode 100644 index 0a5adb6..0000000 --- a/src/cancel.c +++ /dev/null @@ -1,302 +0,0 @@ -/* --- --- CANCEL.C --- --- Lane cancellation support --- --- Author: Benoit Germain --- ---[[ -=============================================================================== - -Copyright (C) 2011-2019 Benoit Germain - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -=============================================================================== -]]-- -*/ - -#include -#include - -#include "threading.h" -#include "cancel.h" -#include "tools.h" -#include "lanes_private.h" - -// ################################################################################################ -// ################################################################################################ - -/* -* Check if the thread in question ('L') has been signalled for cancel. -* -* Called by cancellation hooks and/or pending Linda operations (because then -* the check won't affect performance). -* -* Returns TRUE if any locks are to be exited, and 'cancel_error()' called, -* to make execution of the lane end. -*/ -static inline enum e_cancel_request cancel_test( lua_State* L) -{ - Lane* const s = get_lane_from_registry( L); - // 's' is NULL for the original main state (and no-one can cancel that) - return s ? s->cancel_request : CANCEL_NONE; -} - -// ################################################################################################ - -//--- -// bool = cancel_test() -// -// Available inside the global namespace of lanes -// returns a boolean saying if a cancel request is pending -// -LUAG_FUNC( cancel_test) -{ - enum e_cancel_request test = cancel_test( L); - lua_pushboolean( L, test != CANCEL_NONE); - return 1; -} - -// ################################################################################################ -// ################################################################################################ - -static void cancel_hook( lua_State* L, lua_Debug* ar) -{ - (void)ar; - DEBUGSPEW_CODE( fprintf( stderr, "cancel_hook\n")); - if( cancel_test( L) != CANCEL_NONE) - { - lua_sethook( L, NULL, 0, 0); - cancel_error( L); - } -} - -// ################################################################################################ -// ################################################################################################ - -//--- -// = thread_cancel( lane_ud [,timeout_secs=0.0] [,force_kill_bool=false] ) -// -// The originator thread asking us specifically to cancel the other thread. -// -// 'timeout': <0: wait forever, until the lane is finished -// 0.0: just signal it to cancel, no time waited -// >0: time to wait for the lane to detect cancellation -// -// 'force_kill': if true, and lane does not detect cancellation within timeout, -// it is forcefully killed. Using this with 0.0 timeout means just kill -// (unless the lane is already finished). -// -// Returns: true if the lane was already finished (DONE/ERROR_ST/CANCELLED) or if we -// managed to cancel it. -// false if the cancellation timed out, or a kill was needed. -// - -// ################################################################################################ - -static cancel_result thread_cancel_soft( Lane* s, double secs_, bool_t wake_lindas_) -{ - s->cancel_request = CANCEL_SOFT; // it's now signaled to stop - // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own - if( wake_lindas_) // wake the thread so that execution returns from any pending linda operation if desired - { - SIGNAL_T *waiting_on = s->waiting_on; - if( s->status == WAITING && waiting_on != NULL) - { - SIGNAL_ALL( waiting_on); - } - } - - return THREAD_WAIT( &s->thread, secs_, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout; -} - -// ################################################################################################ - -static cancel_result thread_cancel_hard( lua_State* L, Lane* s, double secs_, bool_t force_, double waitkill_timeout_) -{ - cancel_result result; - - s->cancel_request = CANCEL_HARD; // it's now signaled to stop - { - SIGNAL_T *waiting_on = s->waiting_on; - if( s->status == WAITING && waiting_on != NULL) - { - SIGNAL_ALL( waiting_on); - } - } - - result = THREAD_WAIT( &s->thread, secs_, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout; - - if( (result == CR_Timeout) && force_) - { - // Killing is asynchronous; we _will_ wait for it to be done at - // GC, to make sure the data structure can be released (alternative - // would be use of "cancellation cleanup handlers" that at least - // PThread seems to have). - // - THREAD_KILL( &s->thread); -#if THREADAPI == THREADAPI_PTHREAD - // pthread: make sure the thread is really stopped! - // note that this may block forever if the lane doesn't call a cancellation point and pthread doesn't honor PTHREAD_CANCEL_ASYNCHRONOUS - result = THREAD_WAIT( &s->thread, waitkill_timeout_, &s->done_signal, &s->done_lock, &s->status); - if( result == CR_Timeout) - { - return luaL_error( L, "force-killed lane failed to terminate within %f second%s", waitkill_timeout_, waitkill_timeout_ > 1 ? "s" : ""); - } -#else - (void) waitkill_timeout_; // unused - (void) L; // unused -#endif // THREADAPI == THREADAPI_PTHREAD - s->mstatus = KILLED; // mark 'gc' to wait for it - // note that s->status value must remain to whatever it was at the time of the kill - // because we need to know if we can lua_close() the Lua State or not. - result = CR_Killed; - } - return result; -} - -// ################################################################################################ - -cancel_result thread_cancel( lua_State* L, Lane* s, CancelOp op_, double secs_, bool_t force_, double waitkill_timeout_) -{ - // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here - // We can read 's->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN) - if( s->mstatus == KILLED) - { - return CR_Killed; - } - - if( s->status >= DONE) - { - // say "ok" by default, including when lane is already done - return CR_Cancelled; - } - - // signal the linda the wake up the thread so that it can react to the cancel query - // let us hope we never land here with a pointer on a linda that has been destroyed... - if( op_ == CO_Soft) - { - return thread_cancel_soft( s, secs_, force_); - } - - return thread_cancel_hard( L, s, secs_, force_, waitkill_timeout_); -} - -// ################################################################################################ -// ################################################################################################ - -// > 0: the mask -// = 0: soft -// < 0: hard -static CancelOp which_op( lua_State* L, int idx_) -{ - if( lua_type( L, idx_) == LUA_TSTRING) - { - CancelOp op = CO_Invalid; - char const* str = lua_tostring( L, idx_); - if( strcmp( str, "soft") == 0) - { - op = CO_Soft; - } - else if( strcmp( str, "count") == 0) - { - op = CO_Count; - } - else if( strcmp( str, "line") == 0) - { - op = CO_Line; - } - else if( strcmp( str, "call") == 0) - { - op = CO_Call; - } - else if( strcmp( str, "ret") == 0) - { - op = CO_Ret; - } - else if( strcmp( str, "hard") == 0) - { - op = CO_Hard; - } - lua_remove( L, idx_); // argument is processed, remove it - if( op == CO_Invalid) - { - luaL_error( L, "invalid hook option %s", str); - } - return op; - } - return CO_Hard; -} -// ################################################################################################ - -// bool[,reason] = lane_h:cancel( [mode, hookcount] [, timeout] [, force [, forcekill_timeout]]) -LUAG_FUNC( thread_cancel) -{ - Lane* s = lua_toLane( L, 1); - double secs = 0.0; - CancelOp op = which_op( L, 2); // this removes the op string from the stack - - if( op > 0) // hook is requested - { - int hook_count = (int) lua_tointeger( L, 2); - lua_remove( L, 2); // argument is processed, remove it - if( hook_count < 1) - { - return luaL_error( L, "hook count cannot be < 1"); - } - lua_sethook( s->L, cancel_hook, op, hook_count); - } - - if( lua_type( L, 2) == LUA_TNUMBER) - { - secs = lua_tonumber( L, 2); - lua_remove( L, 2); // argument is processed, remove it - if( secs < 0.0) - { - return luaL_error( L, "cancel timeout cannot be < 0"); - } - } - - { - bool_t force = lua_toboolean( L, 2); // FALSE if nothing there - double forcekill_timeout = luaL_optnumber( L, 3, 0.0); - - switch( thread_cancel( L, s, op, secs, force, forcekill_timeout)) - { - case CR_Timeout: - lua_pushboolean( L, 0); - lua_pushstring( L, "timeout"); - return 2; - - case CR_Cancelled: - lua_pushboolean( L, 1); - push_thread_status( L, s); - return 2; - - case CR_Killed: - lua_pushboolean( L, 1); - push_thread_status( L, s); - return 2; - } - } - // should never happen, only here to prevent the compiler from complaining of "not all control paths returning a value" - return 0; -} diff --git a/src/cancel.cpp b/src/cancel.cpp new file mode 100644 index 0000000..0a5adb6 --- /dev/null +++ b/src/cancel.cpp @@ -0,0 +1,302 @@ +/* +-- +-- CANCEL.C +-- +-- Lane cancellation support +-- +-- Author: Benoit Germain +-- +--[[ +=============================================================================== + +Copyright (C) 2011-2019 Benoit Germain + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== +]]-- +*/ + +#include +#include + +#include "threading.h" +#include "cancel.h" +#include "tools.h" +#include "lanes_private.h" + +// ################################################################################################ +// ################################################################################################ + +/* +* Check if the thread in question ('L') has been signalled for cancel. +* +* Called by cancellation hooks and/or pending Linda operations (because then +* the check won't affect performance). +* +* Returns TRUE if any locks are to be exited, and 'cancel_error()' called, +* to make execution of the lane end. +*/ +static inline enum e_cancel_request cancel_test( lua_State* L) +{ + Lane* const s = get_lane_from_registry( L); + // 's' is NULL for the original main state (and no-one can cancel that) + return s ? s->cancel_request : CANCEL_NONE; +} + +// ################################################################################################ + +//--- +// bool = cancel_test() +// +// Available inside the global namespace of lanes +// returns a boolean saying if a cancel request is pending +// +LUAG_FUNC( cancel_test) +{ + enum e_cancel_request test = cancel_test( L); + lua_pushboolean( L, test != CANCEL_NONE); + return 1; +} + +// ################################################################################################ +// ################################################################################################ + +static void cancel_hook( lua_State* L, lua_Debug* ar) +{ + (void)ar; + DEBUGSPEW_CODE( fprintf( stderr, "cancel_hook\n")); + if( cancel_test( L) != CANCEL_NONE) + { + lua_sethook( L, NULL, 0, 0); + cancel_error( L); + } +} + +// ################################################################################################ +// ################################################################################################ + +//--- +// = thread_cancel( lane_ud [,timeout_secs=0.0] [,force_kill_bool=false] ) +// +// The originator thread asking us specifically to cancel the other thread. +// +// 'timeout': <0: wait forever, until the lane is finished +// 0.0: just signal it to cancel, no time waited +// >0: time to wait for the lane to detect cancellation +// +// 'force_kill': if true, and lane does not detect cancellation within timeout, +// it is forcefully killed. Using this with 0.0 timeout means just kill +// (unless the lane is already finished). +// +// Returns: true if the lane was already finished (DONE/ERROR_ST/CANCELLED) or if we +// managed to cancel it. +// false if the cancellation timed out, or a kill was needed. +// + +// ################################################################################################ + +static cancel_result thread_cancel_soft( Lane* s, double secs_, bool_t wake_lindas_) +{ + s->cancel_request = CANCEL_SOFT; // it's now signaled to stop + // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own + if( wake_lindas_) // wake the thread so that execution returns from any pending linda operation if desired + { + SIGNAL_T *waiting_on = s->waiting_on; + if( s->status == WAITING && waiting_on != NULL) + { + SIGNAL_ALL( waiting_on); + } + } + + return THREAD_WAIT( &s->thread, secs_, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout; +} + +// ################################################################################################ + +static cancel_result thread_cancel_hard( lua_State* L, Lane* s, double secs_, bool_t force_, double waitkill_timeout_) +{ + cancel_result result; + + s->cancel_request = CANCEL_HARD; // it's now signaled to stop + { + SIGNAL_T *waiting_on = s->waiting_on; + if( s->status == WAITING && waiting_on != NULL) + { + SIGNAL_ALL( waiting_on); + } + } + + result = THREAD_WAIT( &s->thread, secs_, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout; + + if( (result == CR_Timeout) && force_) + { + // Killing is asynchronous; we _will_ wait for it to be done at + // GC, to make sure the data structure can be released (alternative + // would be use of "cancellation cleanup handlers" that at least + // PThread seems to have). + // + THREAD_KILL( &s->thread); +#if THREADAPI == THREADAPI_PTHREAD + // pthread: make sure the thread is really stopped! + // note that this may block forever if the lane doesn't call a cancellation point and pthread doesn't honor PTHREAD_CANCEL_ASYNCHRONOUS + result = THREAD_WAIT( &s->thread, waitkill_timeout_, &s->done_signal, &s->done_lock, &s->status); + if( result == CR_Timeout) + { + return luaL_error( L, "force-killed lane failed to terminate within %f second%s", waitkill_timeout_, waitkill_timeout_ > 1 ? "s" : ""); + } +#else + (void) waitkill_timeout_; // unused + (void) L; // unused +#endif // THREADAPI == THREADAPI_PTHREAD + s->mstatus = KILLED; // mark 'gc' to wait for it + // note that s->status value must remain to whatever it was at the time of the kill + // because we need to know if we can lua_close() the Lua State or not. + result = CR_Killed; + } + return result; +} + +// ################################################################################################ + +cancel_result thread_cancel( lua_State* L, Lane* s, CancelOp op_, double secs_, bool_t force_, double waitkill_timeout_) +{ + // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here + // We can read 's->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN) + if( s->mstatus == KILLED) + { + return CR_Killed; + } + + if( s->status >= DONE) + { + // say "ok" by default, including when lane is already done + return CR_Cancelled; + } + + // signal the linda the wake up the thread so that it can react to the cancel query + // let us hope we never land here with a pointer on a linda that has been destroyed... + if( op_ == CO_Soft) + { + return thread_cancel_soft( s, secs_, force_); + } + + return thread_cancel_hard( L, s, secs_, force_, waitkill_timeout_); +} + +// ################################################################################################ +// ################################################################################################ + +// > 0: the mask +// = 0: soft +// < 0: hard +static CancelOp which_op( lua_State* L, int idx_) +{ + if( lua_type( L, idx_) == LUA_TSTRING) + { + CancelOp op = CO_Invalid; + char const* str = lua_tostring( L, idx_); + if( strcmp( str, "soft") == 0) + { + op = CO_Soft; + } + else if( strcmp( str, "count") == 0) + { + op = CO_Count; + } + else if( strcmp( str, "line") == 0) + { + op = CO_Line; + } + else if( strcmp( str, "call") == 0) + { + op = CO_Call; + } + else if( strcmp( str, "ret") == 0) + { + op = CO_Ret; + } + else if( strcmp( str, "hard") == 0) + { + op = CO_Hard; + } + lua_remove( L, idx_); // argument is processed, remove it + if( op == CO_Invalid) + { + luaL_error( L, "invalid hook option %s", str); + } + return op; + } + return CO_Hard; +} +// ################################################################################################ + +// bool[,reason] = lane_h:cancel( [mode, hookcount] [, timeout] [, force [, forcekill_timeout]]) +LUAG_FUNC( thread_cancel) +{ + Lane* s = lua_toLane( L, 1); + double secs = 0.0; + CancelOp op = which_op( L, 2); // this removes the op string from the stack + + if( op > 0) // hook is requested + { + int hook_count = (int) lua_tointeger( L, 2); + lua_remove( L, 2); // argument is processed, remove it + if( hook_count < 1) + { + return luaL_error( L, "hook count cannot be < 1"); + } + lua_sethook( s->L, cancel_hook, op, hook_count); + } + + if( lua_type( L, 2) == LUA_TNUMBER) + { + secs = lua_tonumber( L, 2); + lua_remove( L, 2); // argument is processed, remove it + if( secs < 0.0) + { + return luaL_error( L, "cancel timeout cannot be < 0"); + } + } + + { + bool_t force = lua_toboolean( L, 2); // FALSE if nothing there + double forcekill_timeout = luaL_optnumber( L, 3, 0.0); + + switch( thread_cancel( L, s, op, secs, force, forcekill_timeout)) + { + case CR_Timeout: + lua_pushboolean( L, 0); + lua_pushstring( L, "timeout"); + return 2; + + case CR_Cancelled: + lua_pushboolean( L, 1); + push_thread_status( L, s); + return 2; + + case CR_Killed: + lua_pushboolean( L, 1); + push_thread_status( L, s); + return 2; + } + } + // should never happen, only here to prevent the compiler from complaining of "not all control paths returning a value" + return 0; +} diff --git a/src/compat.c b/src/compat.c deleted file mode 100644 index 19159a9..0000000 --- a/src/compat.c +++ /dev/null @@ -1,96 +0,0 @@ -/* - * ############################################################################################### - * ######################################### Lua 5.1/5.2 ######################################### - * ############################################################################################### - */ -#include "compat.h" -#include "macros_and_utils.h" - -/* -** Copied from Lua 5.2 loadlib.c -*/ -#if LUA_VERSION_NUM == 501 -static int luaL_getsubtable (lua_State *L, int idx, const char *fname) -{ - lua_getfield(L, idx, fname); - if (lua_istable(L, -1)) - return 1; /* table already there */ - else - { - lua_pop(L, 1); /* remove previous result */ - idx = lua_absindex(L, idx); - lua_newtable(L); - lua_pushvalue(L, -1); /* copy to be left at top */ - lua_setfield(L, idx, fname); /* assign new table to field */ - return 0; /* false, because did not find table there */ - } -} - -void luaL_requiref (lua_State *L, const char *modname, lua_CFunction openf, int glb) -{ - lua_pushcfunction(L, openf); - lua_pushstring(L, modname); /* argument to open function */ - lua_call(L, 1, 1); /* open module */ - luaL_getsubtable(L, LUA_REGISTRYINDEX, LUA_LOADED_TABLE); - lua_pushvalue(L, -2); /* make copy of module (call result) */ - lua_setfield(L, -2, modname); /* _LOADED[modname] = module */ - lua_pop(L, 1); /* remove _LOADED table */ - if (glb) - { - lua_pushvalue(L, -1); /* copy of 'mod' */ - lua_setglobal(L, modname); /* _G[modname] = module */ - } -} -#endif // LUA_VERSION_NUM - -#if LUA_VERSION_NUM < 504 - -void* lua_newuserdatauv( lua_State* L, size_t sz, int nuvalue) -{ - ASSERT_L( nuvalue <= 1); - return lua_newuserdata( L, sz); -} - -int lua_getiuservalue( lua_State* L, int idx, int n) -{ - if( n > 1) - { - lua_pushnil( L); - return LUA_TNONE; - } - lua_getuservalue( L, idx); - -#if LUA_VERSION_NUM == 501 - /* default environment is not a nil (see lua_getfenv) */ - lua_getglobal(L, "package"); - if (lua_rawequal(L, -2, -1) || lua_rawequal(L, -2, LUA_GLOBALSINDEX)) - { - lua_pop(L, 2); - lua_pushnil( L); - - return LUA_TNONE; - } - lua_pop(L, 1); /* remove package */ -#endif - - return lua_type( L, -1); -} - -int lua_setiuservalue( lua_State* L, int idx, int n) -{ - if( n > 1 -#if LUA_VERSION_NUM == 501 - || lua_type( L, -1) != LUA_TTABLE -#endif - ) - { - lua_pop( L, 1); - return 0; - } - - (void) lua_setuservalue( L, idx); - return 1; // I guess anything non-0 is ok -} - -#endif // LUA_VERSION_NUM - diff --git a/src/compat.cpp b/src/compat.cpp new file mode 100644 index 0000000..19159a9 --- /dev/null +++ b/src/compat.cpp @@ -0,0 +1,96 @@ +/* + * ############################################################################################### + * ######################################### Lua 5.1/5.2 ######################################### + * ############################################################################################### + */ +#include "compat.h" +#include "macros_and_utils.h" + +/* +** Copied from Lua 5.2 loadlib.c +*/ +#if LUA_VERSION_NUM == 501 +static int luaL_getsubtable (lua_State *L, int idx, const char *fname) +{ + lua_getfield(L, idx, fname); + if (lua_istable(L, -1)) + return 1; /* table already there */ + else + { + lua_pop(L, 1); /* remove previous result */ + idx = lua_absindex(L, idx); + lua_newtable(L); + lua_pushvalue(L, -1); /* copy to be left at top */ + lua_setfield(L, idx, fname); /* assign new table to field */ + return 0; /* false, because did not find table there */ + } +} + +void luaL_requiref (lua_State *L, const char *modname, lua_CFunction openf, int glb) +{ + lua_pushcfunction(L, openf); + lua_pushstring(L, modname); /* argument to open function */ + lua_call(L, 1, 1); /* open module */ + luaL_getsubtable(L, LUA_REGISTRYINDEX, LUA_LOADED_TABLE); + lua_pushvalue(L, -2); /* make copy of module (call result) */ + lua_setfield(L, -2, modname); /* _LOADED[modname] = module */ + lua_pop(L, 1); /* remove _LOADED table */ + if (glb) + { + lua_pushvalue(L, -1); /* copy of 'mod' */ + lua_setglobal(L, modname); /* _G[modname] = module */ + } +} +#endif // LUA_VERSION_NUM + +#if LUA_VERSION_NUM < 504 + +void* lua_newuserdatauv( lua_State* L, size_t sz, int nuvalue) +{ + ASSERT_L( nuvalue <= 1); + return lua_newuserdata( L, sz); +} + +int lua_getiuservalue( lua_State* L, int idx, int n) +{ + if( n > 1) + { + lua_pushnil( L); + return LUA_TNONE; + } + lua_getuservalue( L, idx); + +#if LUA_VERSION_NUM == 501 + /* default environment is not a nil (see lua_getfenv) */ + lua_getglobal(L, "package"); + if (lua_rawequal(L, -2, -1) || lua_rawequal(L, -2, LUA_GLOBALSINDEX)) + { + lua_pop(L, 2); + lua_pushnil( L); + + return LUA_TNONE; + } + lua_pop(L, 1); /* remove package */ +#endif + + return lua_type( L, -1); +} + +int lua_setiuservalue( lua_State* L, int idx, int n) +{ + if( n > 1 +#if LUA_VERSION_NUM == 501 + || lua_type( L, -1) != LUA_TTABLE +#endif + ) + { + lua_pop( L, 1); + return 0; + } + + (void) lua_setuservalue( L, idx); + return 1; // I guess anything non-0 is ok +} + +#endif // LUA_VERSION_NUM + diff --git a/src/deep.c b/src/deep.c deleted file mode 100644 index 58da457..0000000 --- a/src/deep.c +++ /dev/null @@ -1,501 +0,0 @@ -/* - * DEEP.C Copyright (c) 2017, Benoit Germain - * - * Deep userdata support, separate in its own source file to help integration - * without enforcing a Lanes dependency - */ - -/* -=============================================================================== - -Copyright (C) 2002-10 Asko Kauppi - 2011-17 Benoit Germain - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -=============================================================================== -*/ - -#include -#include -#include -#include -#include -#if !defined(__APPLE__) -#include -#endif - -#include "compat.h" -#include "deep.h" -#include "tools.h" -#include "universe.h" -#include "uniquekey.h" - -/*-- Metatable copying --*/ - -/*---=== Deep userdata ===---*/ - -/* -* 'registry[REGKEY]' is a two-way lookup table for 'idfunc's and those type's -* metatables: -* -* metatable -> idfunc -* idfunc -> metatable -*/ -// crc64/we of string "DEEP_LOOKUP_KEY" generated at http://www.nitrxgen.net/hashgen/ -static DECLARE_CONST_UNIQUE_KEY( DEEP_LOOKUP_KEY, 0x9fb9b4f3f633d83d); - -/* - * The deep proxy cache is a weak valued table listing all deep UD proxies indexed by the deep UD that they are proxying - * crc64/we of string "DEEP_PROXY_CACHE_KEY" generated at http://www.nitrxgen.net/hashgen/ -*/ -static DECLARE_CONST_UNIQUE_KEY( DEEP_PROXY_CACHE_KEY, 0x05773d6fc26be106); - -/* -* Sets up [-1]<->[-2] two-way lookups, and ensures the lookup table exists. -* Pops the both values off the stack. -*/ -static void set_deep_lookup( lua_State* L) -{ - STACK_GROW( L, 3); - STACK_CHECK( L, 2); // a b - push_registry_subtable( L, DEEP_LOOKUP_KEY); // a b {} - STACK_MID( L, 3); - lua_insert( L, -3); // {} a b - lua_pushvalue( L, -1); // {} a b b - lua_pushvalue( L,-3); // {} a b b a - lua_rawset( L, -5); // {} a b - lua_rawset( L, -3); // {} - lua_pop( L, 1); // - STACK_END( L, 0); -} - -/* -* Pops the key (metatable or idfunc) off the stack, and replaces with the -* deep lookup value (idfunc/metatable/nil). -*/ -static void get_deep_lookup( lua_State* L) -{ - STACK_GROW( L, 1); - STACK_CHECK( L, 1); // a - REGISTRY_GET( L, DEEP_LOOKUP_KEY); // a {} - if( !lua_isnil( L, -1)) - { - lua_insert( L, -2); // {} a - lua_rawget( L, -2); // {} b - } - lua_remove( L, -2); // a|b - STACK_END( L, 1); -} - -/* -* Return the registered ID function for 'index' (deep userdata proxy), -* or NULL if 'index' is not a deep userdata proxy. -*/ -static inline luaG_IdFunction get_idfunc( lua_State* L, int index, LookupMode mode_) -{ - // when looking inside a keeper, we are 100% sure the object is a deep userdata - if( mode_ == eLM_FromKeeper) - { - DeepPrelude** proxy = (DeepPrelude**) lua_touserdata( L, index); - // we can (and must) cast and fetch the internally stored idfunc - return (*proxy)->idfunc; - } - else - { - // essentially we are making sure that the metatable of the object we want to copy is stored in our metatable/idfunc database - // it is the only way to ensure that the userdata is indeed a deep userdata! - // of course, we could just trust the caller, but we won't - luaG_IdFunction ret; - STACK_GROW( L, 1); - STACK_CHECK( L, 0); - - if( !lua_getmetatable( L, index)) // deep ... metatable? - { - return NULL; // no metatable: can't be a deep userdata object! - } - - // replace metatable with the idfunc pointer, if it is actually a deep userdata - get_deep_lookup( L); // deep ... idfunc|nil - - ret = (luaG_IdFunction) lua_touserdata( L, -1); // NULL if not a userdata - lua_pop( L, 1); - STACK_END( L, 0); - return ret; - } -} - - -void free_deep_prelude( lua_State* L, DeepPrelude* prelude_) -{ - // Call 'idfunc( "delete", deep_ptr )' to make deep cleanup - lua_pushlightuserdata( L, prelude_); - ASSERT_L( prelude_->idfunc); - prelude_->idfunc( L, eDO_delete); -} - - -/* - * void= mt.__gc( proxy_ud ) - * - * End of life for a proxy object; reduce the deep reference count and clean it up if reaches 0. - * - */ -static int deep_userdata_gc( lua_State* L) -{ - DeepPrelude** proxy = (DeepPrelude**) lua_touserdata( L, 1); - DeepPrelude* p = *proxy; - Universe* U = universe_get( L); - int v; - - // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded - // in that case, we are not multithreaded and locking isn't necessary anyway - if( U) MUTEX_LOCK( &U->deep_lock); - v = -- (p->refcount); - if (U) MUTEX_UNLOCK( &U->deep_lock); - - if( v == 0) - { - // retrieve wrapped __gc - lua_pushvalue( L, lua_upvalueindex( 1)); // self __gc? - if( !lua_isnil( L, -1)) - { - lua_insert( L, -2); // __gc self - lua_call( L, 1, 0); // - } - // 'idfunc' expects a clean stack to work on - lua_settop( L, 0); - free_deep_prelude( L, p); - - // top was set to 0, then userdata was pushed. "delete" might want to pop the userdata (we don't care), but should not push anything! - if ( lua_gettop( L) > 1) - { - luaL_error( L, "Bad idfunc(eDO_delete): should not push anything"); - } - } - *proxy = NULL; // make sure we don't use it any more, just in case - return 0; -} - - -/* - * Push a proxy userdata on the stack. - * returns NULL if ok, else some error string related to bad idfunc behavior or module require problem - * (error cannot happen with mode_ == eLM_ToKeeper) - * - * Initializes necessary structures if it's the first time 'idfunc' is being - * used in this Lua state (metatable, registring it). Otherwise, increments the - * reference count. - */ -char const* push_deep_proxy( Universe* U, lua_State* L, DeepPrelude* prelude, int nuv_, LookupMode mode_) -{ - DeepPrelude** proxy; - - // Check if a proxy already exists - push_registry_subtable_mode( L, DEEP_PROXY_CACHE_KEY, "v"); // DPC - lua_pushlightuserdata( L, prelude); // DPC deep - lua_rawget( L, -2); // DPC proxy - if ( !lua_isnil( L, -1)) - { - lua_remove( L, -2); // proxy - return NULL; - } - else - { - lua_pop( L, 1); // DPC - } - - // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded - // in that case, we are not multithreaded and locking isn't necessary anyway - if( U) MUTEX_LOCK( &U->deep_lock); - ++ (prelude->refcount); // one more proxy pointing to this deep data - if( U) MUTEX_UNLOCK( &U->deep_lock); - - STACK_GROW( L, 7); - STACK_CHECK( L, 0); - - // a new full userdata, fitted with the specified number of uservalue slots (always 1 for Lua < 5.4) - proxy = (DeepPrelude**) lua_newuserdatauv( L, sizeof(DeepPrelude*), nuv_); // DPC proxy - ASSERT_L( proxy); - *proxy = prelude; - - // Get/create metatable for 'idfunc' (in this state) - lua_pushlightuserdata( L, (void*)(ptrdiff_t)(prelude->idfunc)); // DPC proxy idfunc - get_deep_lookup( L); // DPC proxy metatable? - - if( lua_isnil( L, -1)) // // No metatable yet. - { - char const* modname; - int oldtop = lua_gettop( L); // DPC proxy nil - lua_pop( L, 1); // DPC proxy - // 1 - make one and register it - if( mode_ != eLM_ToKeeper) - { - (void) prelude->idfunc( L, eDO_metatable); // DPC proxy metatable - if( lua_gettop( L) - oldtop != 0 || !lua_istable( L, -1)) - { - lua_settop( L, oldtop); // DPC proxy X - lua_pop( L, 3); // - return "Bad idfunc(eOP_metatable): unexpected pushed value"; - } - // if the metatable contains a __gc, we will call it from our own - lua_getfield( L, -1, "__gc"); // DPC proxy metatable __gc - } - else - { - // keepers need a minimal metatable that only contains our own __gc - lua_newtable( L); // DPC proxy metatable - lua_pushnil( L); // DPC proxy metatable nil - } - if( lua_isnil( L, -1)) - { - // Add our own '__gc' method - lua_pop( L, 1); // DPC proxy metatable - lua_pushcfunction( L, deep_userdata_gc); // DPC proxy metatable deep_userdata_gc - } - else - { - // Add our own '__gc' method wrapping the original - lua_pushcclosure( L, deep_userdata_gc, 1); // DPC proxy metatable deep_userdata_gc - } - lua_setfield( L, -2, "__gc"); // DPC proxy metatable - - // Memorize for later rounds - lua_pushvalue( L, -1); // DPC proxy metatable metatable - lua_pushlightuserdata( L, (void*)(ptrdiff_t)(prelude->idfunc)); // DPC proxy metatable metatable idfunc - set_deep_lookup( L); // DPC proxy metatable - - // 2 - cause the target state to require the module that exported the idfunc - // this is needed because we must make sure the shared library is still loaded as long as we hold a pointer on the idfunc - { - int oldtop_module = lua_gettop( L); - modname = (char const*) prelude->idfunc( L, eDO_module); // DPC proxy metatable - // make sure the function pushed nothing on the stack! - if( lua_gettop( L) - oldtop_module != 0) - { - lua_pop( L, 3); // - return "Bad idfunc(eOP_module): should not push anything"; - } - } - if( NULL != modname) // we actually got a module name - { - // L.registry._LOADED exists without having registered the 'package' library. - lua_getglobal( L, "require"); // DPC proxy metatable require() - // check that the module is already loaded (or being loaded, we are happy either way) - if( lua_isfunction( L, -1)) - { - lua_pushstring( L, modname); // DPC proxy metatable require() "module" - lua_getfield( L, LUA_REGISTRYINDEX, LUA_LOADED_TABLE); // DPC proxy metatable require() "module" _R._LOADED - if( lua_istable( L, -1)) - { - bool_t alreadyloaded; - lua_pushvalue( L, -2); // DPC proxy metatable require() "module" _R._LOADED "module" - lua_rawget( L, -2); // DPC proxy metatable require() "module" _R._LOADED module - alreadyloaded = lua_toboolean( L, -1); - if( !alreadyloaded) // not loaded - { - int require_result; - lua_pop( L, 2); // DPC proxy metatable require() "module" - // require "modname" - require_result = lua_pcall( L, 1, 0, 0); // DPC proxy metatable error? - if( require_result != LUA_OK) - { - // failed, return the error message - lua_pushfstring( L, "error while requiring '%s' identified by idfunc(eOP_module): ", modname); - lua_insert( L, -2); // DPC proxy metatable prefix error - lua_concat( L, 2); // DPC proxy metatable error - return lua_tostring( L, -1); - } - } - else // already loaded, we are happy - { - lua_pop( L, 4); // DPC proxy metatable - } - } - else // no L.registry._LOADED; can this ever happen? - { - lua_pop( L, 6); // - return "unexpected error while requiring a module identified by idfunc(eOP_module)"; - } - } - else // a module name, but no require() function :-( - { - lua_pop( L, 4); // - return "lanes receiving deep userdata should register the 'package' library"; - } - } - } - STACK_MID( L, 2); // DPC proxy metatable - ASSERT_L( lua_isuserdata( L, -2)); - ASSERT_L( lua_istable( L, -1)); - lua_setmetatable( L, -2); // DPC proxy - - // If we're here, we obviously had to create a new proxy, so cache it. - lua_pushlightuserdata( L, prelude); // DPC proxy deep - lua_pushvalue( L, -2); // DPC proxy deep proxy - lua_rawset( L, -4); // DPC proxy - lua_remove( L, -2); // proxy - ASSERT_L( lua_isuserdata( L, -1)); - STACK_END( L, 0); - return NULL; -} - -/* -* Create a deep userdata -* -* proxy_ud= deep_userdata( idfunc [, ...] ) -* -* Creates a deep userdata entry of the type defined by 'idfunc'. -* Parameters found on the stack are left as is passed on to the 'idfunc' "new" invocation. -* -* 'idfunc' must fulfill the following features: -* -* lightuserdata = idfunc( eDO_new [, ...] ) -- creates a new deep data instance -* void = idfunc( eDO_delete, lightuserdata ) -- releases a deep data instance -* tbl = idfunc( eDO_metatable ) -- gives metatable for userdata proxies -* -* Reference counting and true userdata proxying are taken care of for the -* actual data type. -* -* Types using the deep userdata system (and only those!) can be passed between -* separate Lua states via 'luaG_inter_move()'. -* -* Returns: 'proxy' userdata for accessing the deep data via 'luaG_todeep()' -*/ -int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc, int nuv_) -{ - char const* errmsg; - - STACK_GROW( L, 1); - STACK_CHECK( L, 0); - { - int const oldtop = lua_gettop( L); - DeepPrelude* prelude = (DeepPrelude*) idfunc( L, eDO_new); - if( prelude == NULL) - { - return luaL_error( L, "idfunc(eDO_new) failed to create deep userdata (out of memory)"); - } - if( prelude->magic.value != DEEP_VERSION.value) - { - // just in case, don't leak the newly allocated deep userdata object - lua_pushlightuserdata( L, prelude); - idfunc( L, eDO_delete); - return luaL_error( L, "Bad idfunc(eDO_new): DEEP_VERSION is incorrect, rebuild your implementation with the latest deep implementation"); - } - prelude->refcount = 0; // 'push_deep_proxy' will lift it to 1 - prelude->idfunc = idfunc; - - if( lua_gettop( L) - oldtop != 0) - { - // just in case, don't leak the newly allocated deep userdata object - lua_pushlightuserdata( L, prelude); - idfunc( L, eDO_delete); - return luaL_error( L, "Bad idfunc(eDO_new): should not push anything on the stack"); - } - errmsg = push_deep_proxy( universe_get( L), L, prelude, nuv_, eLM_LaneBody); // proxy - if( errmsg != NULL) - { - return luaL_error( L, errmsg); - } - } - STACK_END( L, 1); - return 1; -} - - -/* -* Access deep userdata through a proxy. -* -* Reference count is not changed, and access to the deep userdata is not -* serialized. It is the module's responsibility to prevent conflicting usage. -*/ -void* luaG_todeep( lua_State* L, luaG_IdFunction idfunc, int index) -{ - DeepPrelude** proxy; - - STACK_CHECK( L, 0); - // ensure it is actually a deep userdata - if( get_idfunc( L, index, eLM_LaneBody) != idfunc) - { - return NULL; // no metatable, or wrong kind - } - - proxy = (DeepPrelude**) lua_touserdata( L, index); - STACK_END( L, 0); - - return *proxy; -} - - -/* - * Copy deep userdata between two separate Lua states (from L to L2) - * - * Returns: - * the id function of the copied value, or NULL for non-deep userdata - * (not copied) - */ -bool_t copydeep( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) -{ - char const* errmsg; - luaG_IdFunction idfunc = get_idfunc( L, i, mode_); - int nuv = 0; - - if( idfunc == NULL) - { - return FALSE; // not a deep userdata - } - - STACK_CHECK( L, 0); - STACK_CHECK( L2, 0); - - // extract all uservalues of the source - while( lua_getiuservalue( L, i, nuv + 1) != LUA_TNONE) // ... u [uv]* nil - { - ++ nuv; - } - // last call returned TNONE and pushed nil, that we don't need - lua_pop( L, 1); // ... u [uv]* - STACK_MID( L, nuv); - - errmsg = push_deep_proxy( U, L2, *(DeepPrelude**) lua_touserdata( L, i), nuv, mode_); // u - - // transfer all uservalues of the source in the destination - { - int const clone_i = lua_gettop( L2); - while( nuv) - { - inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), VT_NORMAL, mode_, upName_); // u uv - lua_pop( L, 1); // ... u [uv]* - // this pops the value from the stack - lua_setiuservalue( L2, clone_i, nuv); // u - -- nuv; - } - } - - STACK_END( L2, 1); - STACK_END( L, 0); - - if( errmsg != NULL) - { - // raise the error in the proper state (not the keeper) - lua_State* errL = (mode_ == eLM_FromKeeper) ? L2 : L; - luaL_error( errL, errmsg); - } - return TRUE; -} \ No newline at end of file diff --git a/src/deep.cpp b/src/deep.cpp new file mode 100644 index 0000000..58da457 --- /dev/null +++ b/src/deep.cpp @@ -0,0 +1,501 @@ +/* + * DEEP.C Copyright (c) 2017, Benoit Germain + * + * Deep userdata support, separate in its own source file to help integration + * without enforcing a Lanes dependency + */ + +/* +=============================================================================== + +Copyright (C) 2002-10 Asko Kauppi + 2011-17 Benoit Germain + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== +*/ + +#include +#include +#include +#include +#include +#if !defined(__APPLE__) +#include +#endif + +#include "compat.h" +#include "deep.h" +#include "tools.h" +#include "universe.h" +#include "uniquekey.h" + +/*-- Metatable copying --*/ + +/*---=== Deep userdata ===---*/ + +/* +* 'registry[REGKEY]' is a two-way lookup table for 'idfunc's and those type's +* metatables: +* +* metatable -> idfunc +* idfunc -> metatable +*/ +// crc64/we of string "DEEP_LOOKUP_KEY" generated at http://www.nitrxgen.net/hashgen/ +static DECLARE_CONST_UNIQUE_KEY( DEEP_LOOKUP_KEY, 0x9fb9b4f3f633d83d); + +/* + * The deep proxy cache is a weak valued table listing all deep UD proxies indexed by the deep UD that they are proxying + * crc64/we of string "DEEP_PROXY_CACHE_KEY" generated at http://www.nitrxgen.net/hashgen/ +*/ +static DECLARE_CONST_UNIQUE_KEY( DEEP_PROXY_CACHE_KEY, 0x05773d6fc26be106); + +/* +* Sets up [-1]<->[-2] two-way lookups, and ensures the lookup table exists. +* Pops the both values off the stack. +*/ +static void set_deep_lookup( lua_State* L) +{ + STACK_GROW( L, 3); + STACK_CHECK( L, 2); // a b + push_registry_subtable( L, DEEP_LOOKUP_KEY); // a b {} + STACK_MID( L, 3); + lua_insert( L, -3); // {} a b + lua_pushvalue( L, -1); // {} a b b + lua_pushvalue( L,-3); // {} a b b a + lua_rawset( L, -5); // {} a b + lua_rawset( L, -3); // {} + lua_pop( L, 1); // + STACK_END( L, 0); +} + +/* +* Pops the key (metatable or idfunc) off the stack, and replaces with the +* deep lookup value (idfunc/metatable/nil). +*/ +static void get_deep_lookup( lua_State* L) +{ + STACK_GROW( L, 1); + STACK_CHECK( L, 1); // a + REGISTRY_GET( L, DEEP_LOOKUP_KEY); // a {} + if( !lua_isnil( L, -1)) + { + lua_insert( L, -2); // {} a + lua_rawget( L, -2); // {} b + } + lua_remove( L, -2); // a|b + STACK_END( L, 1); +} + +/* +* Return the registered ID function for 'index' (deep userdata proxy), +* or NULL if 'index' is not a deep userdata proxy. +*/ +static inline luaG_IdFunction get_idfunc( lua_State* L, int index, LookupMode mode_) +{ + // when looking inside a keeper, we are 100% sure the object is a deep userdata + if( mode_ == eLM_FromKeeper) + { + DeepPrelude** proxy = (DeepPrelude**) lua_touserdata( L, index); + // we can (and must) cast and fetch the internally stored idfunc + return (*proxy)->idfunc; + } + else + { + // essentially we are making sure that the metatable of the object we want to copy is stored in our metatable/idfunc database + // it is the only way to ensure that the userdata is indeed a deep userdata! + // of course, we could just trust the caller, but we won't + luaG_IdFunction ret; + STACK_GROW( L, 1); + STACK_CHECK( L, 0); + + if( !lua_getmetatable( L, index)) // deep ... metatable? + { + return NULL; // no metatable: can't be a deep userdata object! + } + + // replace metatable with the idfunc pointer, if it is actually a deep userdata + get_deep_lookup( L); // deep ... idfunc|nil + + ret = (luaG_IdFunction) lua_touserdata( L, -1); // NULL if not a userdata + lua_pop( L, 1); + STACK_END( L, 0); + return ret; + } +} + + +void free_deep_prelude( lua_State* L, DeepPrelude* prelude_) +{ + // Call 'idfunc( "delete", deep_ptr )' to make deep cleanup + lua_pushlightuserdata( L, prelude_); + ASSERT_L( prelude_->idfunc); + prelude_->idfunc( L, eDO_delete); +} + + +/* + * void= mt.__gc( proxy_ud ) + * + * End of life for a proxy object; reduce the deep reference count and clean it up if reaches 0. + * + */ +static int deep_userdata_gc( lua_State* L) +{ + DeepPrelude** proxy = (DeepPrelude**) lua_touserdata( L, 1); + DeepPrelude* p = *proxy; + Universe* U = universe_get( L); + int v; + + // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded + // in that case, we are not multithreaded and locking isn't necessary anyway + if( U) MUTEX_LOCK( &U->deep_lock); + v = -- (p->refcount); + if (U) MUTEX_UNLOCK( &U->deep_lock); + + if( v == 0) + { + // retrieve wrapped __gc + lua_pushvalue( L, lua_upvalueindex( 1)); // self __gc? + if( !lua_isnil( L, -1)) + { + lua_insert( L, -2); // __gc self + lua_call( L, 1, 0); // + } + // 'idfunc' expects a clean stack to work on + lua_settop( L, 0); + free_deep_prelude( L, p); + + // top was set to 0, then userdata was pushed. "delete" might want to pop the userdata (we don't care), but should not push anything! + if ( lua_gettop( L) > 1) + { + luaL_error( L, "Bad idfunc(eDO_delete): should not push anything"); + } + } + *proxy = NULL; // make sure we don't use it any more, just in case + return 0; +} + + +/* + * Push a proxy userdata on the stack. + * returns NULL if ok, else some error string related to bad idfunc behavior or module require problem + * (error cannot happen with mode_ == eLM_ToKeeper) + * + * Initializes necessary structures if it's the first time 'idfunc' is being + * used in this Lua state (metatable, registring it). Otherwise, increments the + * reference count. + */ +char const* push_deep_proxy( Universe* U, lua_State* L, DeepPrelude* prelude, int nuv_, LookupMode mode_) +{ + DeepPrelude** proxy; + + // Check if a proxy already exists + push_registry_subtable_mode( L, DEEP_PROXY_CACHE_KEY, "v"); // DPC + lua_pushlightuserdata( L, prelude); // DPC deep + lua_rawget( L, -2); // DPC proxy + if ( !lua_isnil( L, -1)) + { + lua_remove( L, -2); // proxy + return NULL; + } + else + { + lua_pop( L, 1); // DPC + } + + // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded + // in that case, we are not multithreaded and locking isn't necessary anyway + if( U) MUTEX_LOCK( &U->deep_lock); + ++ (prelude->refcount); // one more proxy pointing to this deep data + if( U) MUTEX_UNLOCK( &U->deep_lock); + + STACK_GROW( L, 7); + STACK_CHECK( L, 0); + + // a new full userdata, fitted with the specified number of uservalue slots (always 1 for Lua < 5.4) + proxy = (DeepPrelude**) lua_newuserdatauv( L, sizeof(DeepPrelude*), nuv_); // DPC proxy + ASSERT_L( proxy); + *proxy = prelude; + + // Get/create metatable for 'idfunc' (in this state) + lua_pushlightuserdata( L, (void*)(ptrdiff_t)(prelude->idfunc)); // DPC proxy idfunc + get_deep_lookup( L); // DPC proxy metatable? + + if( lua_isnil( L, -1)) // // No metatable yet. + { + char const* modname; + int oldtop = lua_gettop( L); // DPC proxy nil + lua_pop( L, 1); // DPC proxy + // 1 - make one and register it + if( mode_ != eLM_ToKeeper) + { + (void) prelude->idfunc( L, eDO_metatable); // DPC proxy metatable + if( lua_gettop( L) - oldtop != 0 || !lua_istable( L, -1)) + { + lua_settop( L, oldtop); // DPC proxy X + lua_pop( L, 3); // + return "Bad idfunc(eOP_metatable): unexpected pushed value"; + } + // if the metatable contains a __gc, we will call it from our own + lua_getfield( L, -1, "__gc"); // DPC proxy metatable __gc + } + else + { + // keepers need a minimal metatable that only contains our own __gc + lua_newtable( L); // DPC proxy metatable + lua_pushnil( L); // DPC proxy metatable nil + } + if( lua_isnil( L, -1)) + { + // Add our own '__gc' method + lua_pop( L, 1); // DPC proxy metatable + lua_pushcfunction( L, deep_userdata_gc); // DPC proxy metatable deep_userdata_gc + } + else + { + // Add our own '__gc' method wrapping the original + lua_pushcclosure( L, deep_userdata_gc, 1); // DPC proxy metatable deep_userdata_gc + } + lua_setfield( L, -2, "__gc"); // DPC proxy metatable + + // Memorize for later rounds + lua_pushvalue( L, -1); // DPC proxy metatable metatable + lua_pushlightuserdata( L, (void*)(ptrdiff_t)(prelude->idfunc)); // DPC proxy metatable metatable idfunc + set_deep_lookup( L); // DPC proxy metatable + + // 2 - cause the target state to require the module that exported the idfunc + // this is needed because we must make sure the shared library is still loaded as long as we hold a pointer on the idfunc + { + int oldtop_module = lua_gettop( L); + modname = (char const*) prelude->idfunc( L, eDO_module); // DPC proxy metatable + // make sure the function pushed nothing on the stack! + if( lua_gettop( L) - oldtop_module != 0) + { + lua_pop( L, 3); // + return "Bad idfunc(eOP_module): should not push anything"; + } + } + if( NULL != modname) // we actually got a module name + { + // L.registry._LOADED exists without having registered the 'package' library. + lua_getglobal( L, "require"); // DPC proxy metatable require() + // check that the module is already loaded (or being loaded, we are happy either way) + if( lua_isfunction( L, -1)) + { + lua_pushstring( L, modname); // DPC proxy metatable require() "module" + lua_getfield( L, LUA_REGISTRYINDEX, LUA_LOADED_TABLE); // DPC proxy metatable require() "module" _R._LOADED + if( lua_istable( L, -1)) + { + bool_t alreadyloaded; + lua_pushvalue( L, -2); // DPC proxy metatable require() "module" _R._LOADED "module" + lua_rawget( L, -2); // DPC proxy metatable require() "module" _R._LOADED module + alreadyloaded = lua_toboolean( L, -1); + if( !alreadyloaded) // not loaded + { + int require_result; + lua_pop( L, 2); // DPC proxy metatable require() "module" + // require "modname" + require_result = lua_pcall( L, 1, 0, 0); // DPC proxy metatable error? + if( require_result != LUA_OK) + { + // failed, return the error message + lua_pushfstring( L, "error while requiring '%s' identified by idfunc(eOP_module): ", modname); + lua_insert( L, -2); // DPC proxy metatable prefix error + lua_concat( L, 2); // DPC proxy metatable error + return lua_tostring( L, -1); + } + } + else // already loaded, we are happy + { + lua_pop( L, 4); // DPC proxy metatable + } + } + else // no L.registry._LOADED; can this ever happen? + { + lua_pop( L, 6); // + return "unexpected error while requiring a module identified by idfunc(eOP_module)"; + } + } + else // a module name, but no require() function :-( + { + lua_pop( L, 4); // + return "lanes receiving deep userdata should register the 'package' library"; + } + } + } + STACK_MID( L, 2); // DPC proxy metatable + ASSERT_L( lua_isuserdata( L, -2)); + ASSERT_L( lua_istable( L, -1)); + lua_setmetatable( L, -2); // DPC proxy + + // If we're here, we obviously had to create a new proxy, so cache it. + lua_pushlightuserdata( L, prelude); // DPC proxy deep + lua_pushvalue( L, -2); // DPC proxy deep proxy + lua_rawset( L, -4); // DPC proxy + lua_remove( L, -2); // proxy + ASSERT_L( lua_isuserdata( L, -1)); + STACK_END( L, 0); + return NULL; +} + +/* +* Create a deep userdata +* +* proxy_ud= deep_userdata( idfunc [, ...] ) +* +* Creates a deep userdata entry of the type defined by 'idfunc'. +* Parameters found on the stack are left as is passed on to the 'idfunc' "new" invocation. +* +* 'idfunc' must fulfill the following features: +* +* lightuserdata = idfunc( eDO_new [, ...] ) -- creates a new deep data instance +* void = idfunc( eDO_delete, lightuserdata ) -- releases a deep data instance +* tbl = idfunc( eDO_metatable ) -- gives metatable for userdata proxies +* +* Reference counting and true userdata proxying are taken care of for the +* actual data type. +* +* Types using the deep userdata system (and only those!) can be passed between +* separate Lua states via 'luaG_inter_move()'. +* +* Returns: 'proxy' userdata for accessing the deep data via 'luaG_todeep()' +*/ +int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc, int nuv_) +{ + char const* errmsg; + + STACK_GROW( L, 1); + STACK_CHECK( L, 0); + { + int const oldtop = lua_gettop( L); + DeepPrelude* prelude = (DeepPrelude*) idfunc( L, eDO_new); + if( prelude == NULL) + { + return luaL_error( L, "idfunc(eDO_new) failed to create deep userdata (out of memory)"); + } + if( prelude->magic.value != DEEP_VERSION.value) + { + // just in case, don't leak the newly allocated deep userdata object + lua_pushlightuserdata( L, prelude); + idfunc( L, eDO_delete); + return luaL_error( L, "Bad idfunc(eDO_new): DEEP_VERSION is incorrect, rebuild your implementation with the latest deep implementation"); + } + prelude->refcount = 0; // 'push_deep_proxy' will lift it to 1 + prelude->idfunc = idfunc; + + if( lua_gettop( L) - oldtop != 0) + { + // just in case, don't leak the newly allocated deep userdata object + lua_pushlightuserdata( L, prelude); + idfunc( L, eDO_delete); + return luaL_error( L, "Bad idfunc(eDO_new): should not push anything on the stack"); + } + errmsg = push_deep_proxy( universe_get( L), L, prelude, nuv_, eLM_LaneBody); // proxy + if( errmsg != NULL) + { + return luaL_error( L, errmsg); + } + } + STACK_END( L, 1); + return 1; +} + + +/* +* Access deep userdata through a proxy. +* +* Reference count is not changed, and access to the deep userdata is not +* serialized. It is the module's responsibility to prevent conflicting usage. +*/ +void* luaG_todeep( lua_State* L, luaG_IdFunction idfunc, int index) +{ + DeepPrelude** proxy; + + STACK_CHECK( L, 0); + // ensure it is actually a deep userdata + if( get_idfunc( L, index, eLM_LaneBody) != idfunc) + { + return NULL; // no metatable, or wrong kind + } + + proxy = (DeepPrelude**) lua_touserdata( L, index); + STACK_END( L, 0); + + return *proxy; +} + + +/* + * Copy deep userdata between two separate Lua states (from L to L2) + * + * Returns: + * the id function of the copied value, or NULL for non-deep userdata + * (not copied) + */ +bool_t copydeep( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) +{ + char const* errmsg; + luaG_IdFunction idfunc = get_idfunc( L, i, mode_); + int nuv = 0; + + if( idfunc == NULL) + { + return FALSE; // not a deep userdata + } + + STACK_CHECK( L, 0); + STACK_CHECK( L2, 0); + + // extract all uservalues of the source + while( lua_getiuservalue( L, i, nuv + 1) != LUA_TNONE) // ... u [uv]* nil + { + ++ nuv; + } + // last call returned TNONE and pushed nil, that we don't need + lua_pop( L, 1); // ... u [uv]* + STACK_MID( L, nuv); + + errmsg = push_deep_proxy( U, L2, *(DeepPrelude**) lua_touserdata( L, i), nuv, mode_); // u + + // transfer all uservalues of the source in the destination + { + int const clone_i = lua_gettop( L2); + while( nuv) + { + inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), VT_NORMAL, mode_, upName_); // u uv + lua_pop( L, 1); // ... u [uv]* + // this pops the value from the stack + lua_setiuservalue( L2, clone_i, nuv); // u + -- nuv; + } + } + + STACK_END( L2, 1); + STACK_END( L, 0); + + if( errmsg != NULL) + { + // raise the error in the proper state (not the keeper) + lua_State* errL = (mode_ == eLM_FromKeeper) ? L2 : L; + luaL_error( errL, errmsg); + } + return TRUE; +} \ No newline at end of file diff --git a/src/keeper.c b/src/keeper.c deleted file mode 100644 index 8aa734a..0000000 --- a/src/keeper.c +++ /dev/null @@ -1,825 +0,0 @@ -/* - -- - -- KEEPER.C - -- - -- Keeper state logic - -- - -- This code is read in for each "keeper state", which are the hidden, inter- - -- mediate data stores used by Lanes inter-state communication objects. - -- - -- Author: Benoit Germain - -- - -- C implementation replacement of the original keeper.lua - -- - --[[ - =============================================================================== - - Copyright (C) 2011-2013 Benoit Germain - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - - =============================================================================== - ]]-- - */ - -#include -#include -#include -#include -#include - -#include "keeper.h" -#include "compat.h" -#include "tools.h" -#include "state.h" -#include "universe.h" -#include "uniquekey.h" - -//################################################################################### -// Keeper implementation -//################################################################################### - -#ifndef __min -#define __min( a, b) (((a) < (b)) ? (a) : (b)) -#endif // __min - -typedef struct -{ - lua_Integer first; - lua_Integer count; - lua_Integer limit; -} keeper_fifo; - -static int const CONTENTS_TABLE = 1; - -// replaces the fifo ud by its uservalue on the stack -static keeper_fifo* prepare_fifo_access( lua_State* L, int idx_) -{ - keeper_fifo* fifo = (keeper_fifo*) lua_touserdata( L, idx_); - if( fifo != NULL) - { - idx_ = lua_absindex( L, idx_); - STACK_GROW( L, 1); - // we can replace the fifo userdata in the stack without fear of it being GCed, there are other references around - lua_getiuservalue( L, idx_, CONTENTS_TABLE); - lua_replace( L, idx_); - } - return fifo; -} - -// in: nothing -// out: { first = 1, count = 0, limit = -1} -static void fifo_new( lua_State* L) -{ - keeper_fifo* fifo; - STACK_GROW( L, 2); - // a fifo full userdata has one uservalue, the table that holds the actual fifo contents - fifo = (keeper_fifo*)lua_newuserdatauv( L, sizeof( keeper_fifo), 1); - fifo->first = 1; - fifo->count = 0; - fifo->limit = -1; - lua_newtable( L); - lua_setiuservalue( L, -2, CONTENTS_TABLE); -} - -// in: expect fifo ... on top of the stack -// out: nothing, removes all pushed values from the stack -static void fifo_push( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) -{ - int const idx = lua_gettop( L) - (int) count_; - lua_Integer start = fifo_->first + fifo_->count - 1; - lua_Integer i; - // pop all additional arguments, storing them in the fifo - for( i = count_; i >= 1; -- i) - { - // store in the fifo the value at the top of the stack at the specified index, popping it from the stack - lua_rawseti( L, idx, (int)(start + i)); - } - fifo_->count += count_; -} - -// in: fifo -// out: ...|nothing -// expects exactly 1 value on the stack! -// currently only called with a count of 1, but this may change in the future -// function assumes that there is enough data in the fifo to satisfy the request -static void fifo_peek( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) -{ - lua_Integer i; - STACK_GROW( L, count_); - for( i = 0; i < count_; ++ i) - { - lua_rawgeti( L, 1, (int)( fifo_->first + i)); - } -} - -// in: fifo -// out: remove the fifo from the stack, push as many items as required on the stack (function assumes they exist in sufficient number) -static void fifo_pop( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) -{ - int const fifo_idx = lua_gettop( L); // ... fifo - int i; - // each iteration pushes a value on the stack! - STACK_GROW( L, count_ + 2); - // skip first item, we will push it last - for( i = 1; i < count_; ++ i) - { - int const at = (int)( fifo_->first + i); - // push item on the stack - lua_rawgeti( L, fifo_idx, at); // ... fifo val - // remove item from the fifo - lua_pushnil( L); // ... fifo val nil - lua_rawseti( L, fifo_idx, at); // ... fifo val - } - // now process first item - { - int const at = (int)( fifo_->first); - lua_rawgeti( L, fifo_idx, at); // ... fifo vals val - lua_pushnil( L); // ... fifo vals val nil - lua_rawseti( L, fifo_idx, at); // ... fifo vals val - lua_replace( L, fifo_idx); // ... vals - } - { - // avoid ever-growing indexes by resetting each time we detect the fifo is empty - lua_Integer const new_count = fifo_->count - count_; - fifo_->first = (new_count == 0) ? 1 : (fifo_->first + count_); - fifo_->count = new_count; - } -} - -// in: linda_ud expected at *absolute* stack slot idx -// out: fifos[ud] -// crc64/we of string "FIFOS_KEY" generated at http://www.nitrxgen.net/hashgen/ -static DECLARE_CONST_UNIQUE_KEY( FIFOS_KEY, 0xdce50bbc351cd465); -static void push_table( lua_State* L, int idx_) -{ - STACK_GROW( L, 4); - STACK_CHECK( L, 0); - idx_ = lua_absindex( L, idx_); - REGISTRY_GET( L, FIFOS_KEY); // ud fifos - lua_pushvalue( L, idx_); // ud fifos ud - lua_rawget( L, -2); // ud fifos fifos[ud] - STACK_MID( L, 2); - if( lua_isnil( L, -1)) - { - lua_pop( L, 1); // ud fifos - // add a new fifos table for this linda - lua_newtable( L); // ud fifos fifos[ud] - lua_pushvalue( L, idx_); // ud fifos fifos[ud] ud - lua_pushvalue( L, -2); // ud fifos fifos[ud] ud fifos[ud] - lua_rawset( L, -4); // ud fifos fifos[ud] - } - lua_remove( L, -2); // ud fifos[ud] - STACK_END( L, 1); -} - -int keeper_push_linda_storage( Universe* U, lua_State* L, void* ptr_, ptrdiff_t magic_) -{ - Keeper* const K = which_keeper( U->keepers, magic_); - lua_State* const KL = K ? K->L : NULL; - if( KL == NULL) return 0; - STACK_GROW( KL, 4); - STACK_CHECK( KL, 0); - REGISTRY_GET( KL, FIFOS_KEY); // fifos - lua_pushlightuserdata( KL, ptr_); // fifos ud - lua_rawget( KL, -2); // fifos storage - lua_remove( KL, -2); // storage - if( !lua_istable( KL, -1)) - { - lua_pop( KL, 1); // - STACK_MID( KL, 0); - return 0; - } - // move data from keeper to destination state KEEPER MAIN - lua_pushnil( KL); // storage nil - STACK_GROW( L, 5); - STACK_CHECK( L, 0); - lua_newtable( L); // out - while( lua_next( KL, -2)) // storage key fifo - { - keeper_fifo* fifo = prepare_fifo_access( KL, -1); // storage key fifo - lua_pushvalue( KL, -2); // storage key fifo key - luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key fifo // out key - STACK_MID( L, 2); - lua_newtable( L); // out key keyout - luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key // out key keyout fifo - lua_pushinteger( L, fifo->first); // out key keyout fifo first - STACK_MID( L, 5); - lua_setfield( L, -3, "first"); // out key keyout fifo - lua_pushinteger( L, fifo->count); // out key keyout fifo count - STACK_MID( L, 5); - lua_setfield( L, -3, "count"); // out key keyout fifo - lua_pushinteger( L, fifo->limit); // out key keyout fifo limit - STACK_MID( L, 5); - lua_setfield( L, -3, "limit"); // out key keyout fifo - lua_setfield( L, -2, "fifo"); // out key keyout - lua_rawset( L, -3); // out - STACK_MID( L, 1); - } - STACK_END( L, 1); - lua_pop( KL, 1); // - STACK_END( KL, 0); - return 1; -} - -// in: linda_ud -int keepercall_clear( lua_State* L) -{ - STACK_GROW( L, 3); - STACK_CHECK( L, 0); - REGISTRY_GET( L, FIFOS_KEY); // ud fifos - lua_pushvalue( L, 1); // ud fifos ud - lua_pushnil( L); // ud fifos ud nil - lua_rawset( L, -3); // ud fifos - lua_pop( L, 1); // ud - STACK_END( L, 0); - return 0; -} - - -// in: linda_ud, key, ... -// out: true|false -int keepercall_send( lua_State* L) -{ - keeper_fifo* fifo; - int n = lua_gettop( L) - 2; - push_table( L, 1); // ud key ... fifos - // get the fifo associated to this key in this linda, create it if it doesn't exist - lua_pushvalue( L, 2); // ud key ... fifos key - lua_rawget( L, -2); // ud key ... fifos fifo - if( lua_isnil( L, -1)) - { - lua_pop( L, 1); // ud key ... fifos - fifo_new( L); // ud key ... fifos fifo - lua_pushvalue( L, 2); // ud key ... fifos fifo key - lua_pushvalue( L, -2); // ud key ... fifos fifo key fifo - lua_rawset( L, -4); // ud key ... fifos fifo - } - lua_remove( L, -2); // ud key ... fifo - fifo = (keeper_fifo*) lua_touserdata( L, -1); - if( fifo->limit >= 0 && fifo->count + n > fifo->limit) - { - lua_settop( L, 0); // - lua_pushboolean( L, 0); // false - } - else - { - fifo = prepare_fifo_access( L, -1); - lua_replace( L, 2); // ud fifo ... - fifo_push( L, fifo, n); // ud fifo - lua_settop( L, 0); // - lua_pushboolean( L, 1); // true - } - return 1; -} - -// in: linda_ud, key [, key]? -// out: (key, val) or nothing -int keepercall_receive( lua_State* L) -{ - int top = lua_gettop( L); - int i; - push_table( L, 1); // ud keys fifos - lua_replace( L, 1); // fifos keys - for( i = 2; i <= top; ++ i) - { - keeper_fifo* fifo; - lua_pushvalue( L, i); // fifos keys key[i] - lua_rawget( L, 1); // fifos keys fifo - fifo = prepare_fifo_access( L, -1); // fifos keys fifo - if( fifo != NULL && fifo->count > 0) - { - fifo_pop( L, fifo, 1); // fifos keys val - if( !lua_isnil( L, -1)) - { - lua_replace( L, 1); // val keys - lua_settop( L, i); // val keys key[i] - if( i != 2) - { - lua_replace( L, 2); // val key keys - lua_settop( L, 2); // val key - } - lua_insert( L, 1); // key, val - return 2; - } - } - lua_settop( L, top); // data keys - } - // nothing to receive - return 0; -} - -//in: linda_ud key mincount [maxcount] -int keepercall_receive_batched( lua_State* L) -{ - lua_Integer const min_count = lua_tointeger( L, 3); - if( min_count > 0) - { - keeper_fifo* fifo; - lua_Integer const max_count = luaL_optinteger( L, 4, min_count); - lua_settop( L, 2); // ud key - lua_insert( L, 1); // key ud - push_table( L, 2); // key ud fifos - lua_remove( L, 2); // key fifos - lua_pushvalue( L, 1); // key fifos key - lua_rawget( L, 2); // key fifos fifo - lua_remove( L, 2); // key fifo - fifo = prepare_fifo_access( L, 2); // key fifo - if( fifo != NULL && fifo->count >= min_count) - { - fifo_pop( L, fifo, __min( max_count, fifo->count)); // key ... - } - else - { - lua_settop( L, 0); - } - return lua_gettop( L); - } - else - { - return 0; - } -} - -// in: linda_ud key n -// out: true or nil -int keepercall_limit( lua_State* L) -{ - keeper_fifo* fifo; - lua_Integer limit = lua_tointeger( L, 3); - push_table( L, 1); // ud key n fifos - lua_replace( L, 1); // fifos key n - lua_pop( L, 1); // fifos key - lua_pushvalue( L, -1); // fifos key key - lua_rawget( L, -3); // fifos key fifo|nil - fifo = (keeper_fifo*) lua_touserdata( L, -1); - if( fifo == NULL) - { // fifos key nil - lua_pop( L, 1); // fifos key - fifo_new( L); // fifos key fifo - fifo = (keeper_fifo*) lua_touserdata( L, -1); - lua_rawset( L, -3); // fifos - } - // remove any clutter on the stack - lua_settop( L, 0); - // return true if we decide that blocked threads waiting to write on that key should be awakened - // this is the case if we detect the key was full but it is no longer the case - if( - ((fifo->limit >= 0) && (fifo->count >= fifo->limit)) // the key was full if limited and count exceeded the previous limit - && ((limit < 0) || (fifo->count < limit)) // the key is not full if unlimited or count is lower than the new limit - ) - { - lua_pushboolean( L, 1); - } - // set the new limit - fifo->limit = limit; - // return 0 or 1 value - return lua_gettop( L); -} - -//in: linda_ud key [[val] ...] -//out: true or nil -int keepercall_set( lua_State* L) -{ - bool_t should_wake_writers = FALSE; - STACK_GROW( L, 6); - - // retrieve fifos associated with the linda - push_table( L, 1); // ud key [val [, ...]] fifos - lua_replace( L, 1); // fifos key [val [, ...]] - - // make sure we have a value on the stack - if( lua_gettop( L) == 2) // fifos key - { - keeper_fifo* fifo; - lua_pushvalue( L, -1); // fifos key key - lua_rawget( L, 1); // fifos key fifo|nil - // empty the fifo for the specified key: replace uservalue with a virgin table, reset counters, but leave limit unchanged! - fifo = (keeper_fifo*) lua_touserdata( L, -1); - if( fifo != NULL) // might be NULL if we set a nonexistent key to nil - { // fifos key fifo - if( fifo->limit < 0) // fifo limit value is the default (unlimited): we can totally remove it - { - lua_pop( L, 1); // fifos key - lua_pushnil( L); // fifos key nil - lua_rawset( L, -3); // fifos - } - else - { - // we create room if the fifo was full but it is no longer the case - should_wake_writers = (fifo->limit > 0) && (fifo->count >= fifo->limit); - lua_remove( L, -2); // fifos fifo - lua_newtable( L); // fifos fifo {} - lua_setiuservalue( L, -2, CONTENTS_TABLE); // fifos fifo - fifo->first = 1; - fifo->count = 0; - } - } - } - else // set/replace contents stored at the specified key? - { - lua_Integer count = lua_gettop( L) - 2; // number of items we want to store - keeper_fifo* fifo; // fifos key [val [, ...]] - lua_pushvalue( L, 2); // fifos key [val [, ...]] key - lua_rawget( L, 1); // fifos key [val [, ...]] fifo|nil - fifo = (keeper_fifo*) lua_touserdata( L, -1); - if( fifo == NULL) // can be NULL if we store a value at a new key - { // fifos key [val [, ...]] nil - // no need to wake writers in that case, because a writer can't wait on an inexistent key - lua_pop( L, 1); // fifos key [val [, ...]] - fifo_new( L); // fifos key [val [, ...]] fifo - lua_pushvalue( L, 2); // fifos key [val [, ...]] fifo key - lua_pushvalue( L, -2); // fifos key [val [, ...]] fifo key fifo - lua_rawset( L, 1); // fifos key [val [, ...]] fifo - } - else // the fifo exists, we just want to update its contents - { // fifos key [val [, ...]] fifo - // we create room if the fifo was full but it is no longer the case - should_wake_writers = (fifo->limit > 0) && (fifo->count >= fifo->limit) && (count < fifo->limit); - // empty the fifo for the specified key: replace uservalue with a virgin table, reset counters, but leave limit unchanged! - lua_newtable( L); // fifos key [val [, ...]] fifo {} - lua_setiuservalue( L, -2, CONTENTS_TABLE); // fifos key [val [, ...]] fifo - fifo->first = 1; - fifo->count = 0; - } - fifo = prepare_fifo_access( L, -1); - // move the fifo below the values we want to store - lua_insert( L, 3); // fifos key fifo [val [, ...]] - fifo_push( L, fifo, count); // fifos key fifo - } - return should_wake_writers ? (lua_pushboolean( L, 1), 1) : 0; -} - -// in: linda_ud key [count] -// out: at most values -int keepercall_get( lua_State* L) -{ - keeper_fifo* fifo; - lua_Integer count = 1; - if( lua_gettop( L) == 3) // ud key count - { - count = lua_tointeger( L, 3); - lua_pop( L, 1); // ud key - } - push_table( L, 1); // ud key fifos - lua_replace( L, 1); // fifos key - lua_rawget( L, 1); // fifos fifo - fifo = prepare_fifo_access( L, -1); // fifos fifo - if( fifo != NULL && fifo->count > 0) - { - lua_remove( L, 1); // fifo - count = __min( count, fifo->count); - // read value off the fifo - fifo_peek( L, fifo, count); // fifo ... - return (int) count; - } - // no fifo was ever registered for this key, or it is empty - return 0; -} - -// in: linda_ud [, key [, ...]] -int keepercall_count( lua_State* L) -{ - push_table( L, 1); // ud keys fifos - switch( lua_gettop( L)) - { - // no key is specified: return a table giving the count of all known keys - case 2: // ud fifos - lua_newtable( L); // ud fifos out - lua_replace( L, 1); // out fifos - lua_pushnil( L); // out fifos nil - while( lua_next( L, 2)) // out fifos key fifo - { - keeper_fifo* fifo = prepare_fifo_access( L, -1); // out fifos key fifo - lua_pop( L, 1); // out fifos key - lua_pushvalue( L, -1); // out fifos key key - lua_pushinteger( L, fifo->count); // out fifos key key count - lua_rawset( L, -5); // out fifos key - } - lua_pop( L, 1); // out - break; - - // 1 key is specified: return its count - case 3: // ud key fifos - { - keeper_fifo* fifo; - lua_replace( L, 1); // fifos key - lua_rawget( L, -2); // fifos fifo|nil - if( lua_isnil( L, -1)) // the key is unknown - { // fifos nil - lua_remove( L, -2); // nil - } - else // the key is known - { // fifos fifo - fifo = prepare_fifo_access( L, -1); // fifos fifo - lua_pushinteger( L, fifo->count); // fifos fifo count - lua_replace( L, -3); // count fifo - lua_pop( L, 1); // count - } - } - break; - - // a variable number of keys is specified: return a table of their counts - default: // ud keys fifos - lua_newtable( L); // ud keys fifos out - lua_replace( L, 1); // out keys fifos - // shifts all keys up in the stack. potentially slow if there are a lot of them, but then it should be bearable - lua_insert( L, 2); // out fifos keys - while( lua_gettop( L) > 2) - { - keeper_fifo* fifo; - lua_pushvalue( L, -1); // out fifos keys key - lua_rawget( L, 2); // out fifos keys fifo|nil - fifo = prepare_fifo_access( L, -1); // out fifos keys fifo|nil - lua_pop( L, 1); // out fifos keys - if( fifo != NULL) // the key is known - { - lua_pushinteger( L, fifo->count); // out fifos keys count - lua_rawset( L, 1); // out fifos keys - } - else // the key is unknown - { - lua_pop( L, 1); // out fifos keys - } - } - lua_pop( L, 1); // out - } - ASSERT_L( lua_gettop( L) == 1); - return 1; -} - -//################################################################################### -// Keeper API, accessed from linda methods -//################################################################################### - -/*---=== Keeper states ===--- -*/ - -/* -* Pool of keeper states -* -* Access to keeper states is locked (only one OS thread at a time) so the -* bigger the pool, the less chances of unnecessary waits. Lindas map to the -* keepers randomly, by a hash. -*/ - -// called as __gc for the keepers array userdata -void close_keepers( Universe* U) -{ - if( U->keepers != NULL) - { - int i; - int nbKeepers = U->keepers->nb_keepers; - // NOTE: imagine some keeper state N+1 currently holds a linda that uses another keeper N, and a _gc that will make use of it - // when keeper N+1 is closed, object is GCed, linda operation is called, which attempts to acquire keeper N, whose Lua state no longer exists - // in that case, the linda operation should do nothing. which means that these operations must check for keeper acquisition success - // which is early-outed with a U->keepers->nbKeepers null-check - U->keepers->nb_keepers = 0; - for( i = 0; i < nbKeepers; ++ i) - { - lua_State* K = U->keepers->keeper_array[i].L; - U->keepers->keeper_array[i].L = NULL; - if( K != NULL) - { - lua_close( K); - } - else - { - // detected partial init: destroy only the mutexes that got initialized properly - nbKeepers = i; - } - } - for( i = 0; i < nbKeepers; ++ i) - { - MUTEX_FREE( &U->keepers->keeper_array[i].keeper_cs); - } - // free the keeper bookkeeping structure - { - AllocatorDefinition* const allocD = &U->internal_allocator; - (void) allocD->allocF( allocD->allocUD, U->keepers, sizeof( Keepers) + (nbKeepers - 1) * sizeof( Keeper), 0); - U->keepers = NULL; - } - } -} - -/* - * Initialize keeper states - * - * If there is a problem, returns NULL and pushes the error message on the stack - * else returns the keepers bookkeeping structure. - * - * Note: Any problems would be design flaws; the created Lua state is left - * unclosed, because it does not really matter. In production code, this - * function never fails. - * settings table is at position 1 on the stack - */ -void init_keepers( Universe* U, lua_State* L) -{ - int i; - int nb_keepers; - - STACK_CHECK( L, 0); // L K - lua_getfield( L, 1, "nb_keepers"); // nb_keepers - nb_keepers = (int) lua_tointeger( L, -1); - lua_pop( L, 1); // - if( nb_keepers < 1) - { - (void) luaL_error( L, "Bad number of keepers (%d)", nb_keepers); - } - - // Keepers contains an array of 1 s_Keeper, adjust for the actual number of keeper states - { - size_t const bytes = sizeof( Keepers) + (nb_keepers - 1) * sizeof( Keeper); - { - AllocatorDefinition* const allocD = &U->internal_allocator; - U->keepers = (Keepers*) allocD->allocF( allocD->allocUD, NULL, 0, bytes); - } - if( U->keepers == NULL) - { - (void) luaL_error( L, "init_keepers() failed while creating keeper array; out of memory"); - return; - } - memset( U->keepers, 0, bytes); - U->keepers->nb_keepers = nb_keepers; - } - for( i = 0; i < nb_keepers; ++ i) // keepersUD - { - // note that we will leak K if we raise an error later - lua_State* K = create_state( U, L); - if( K == NULL) - { - (void) luaL_error( L, "init_keepers() failed while creating keeper states; out of memory"); - return; - } - - U->keepers->keeper_array[i].L = K; - // we can trigger a GC from inside keeper_call(), where a keeper is acquired - // from there, GC can collect a linda, which would acquire the keeper again, and deadlock the thread. - // therefore, we need a recursive mutex. - MUTEX_RECURSIVE_INIT( &U->keepers->keeper_array[i].keeper_cs); - - STACK_CHECK( K, 0); - - // copy the universe pointer in the keeper itself - universe_store( K, U); - STACK_MID( K, 0); - - // make sure 'package' is initialized in keeper states, so that we have require() - // this because this is needed when transferring deep userdata object - luaL_requiref( K, "package", luaopen_package, 1); // package - lua_pop( K, 1); // - STACK_MID( K, 0); - serialize_require( DEBUGSPEW_PARAM_COMMA( U) K); - STACK_MID( K, 0); - - // copy package.path and package.cpath from the source state - lua_getglobal( L, "package"); // "..." keepersUD package - if( !lua_isnil( L, -1)) - { - // when copying with mode eLM_ToKeeper, error message is pushed at the top of the stack, not raised immediately - if( luaG_inter_copy_package( U, L, K, -1, eLM_ToKeeper)) - { - // if something went wrong, the error message is at the top of the stack - lua_remove( L, -2); // error_msg - (void) lua_error( L); - return; - } - } - lua_pop( L, 1); // - STACK_MID( L, 0); - - // attempt to call on_state_create(), if we have one and it is a C function - // (only support a C function because we can't transfer executable Lua code in keepers) - // will raise an error in L in case of problem - call_on_state_create( U, K, L, eLM_ToKeeper); - - // to see VM name in Decoda debugger - lua_pushfstring( K, "Keeper #%d", i + 1); // "Keeper #n" - lua_setglobal( K, "decoda_name"); // - - // create the fifos table in the keeper state - REGISTRY_SET( K, FIFOS_KEY, lua_newtable( K)); - STACK_END( K, 0); - } - STACK_END( L, 0); -} - -// should be called only when inside a keeper_acquire/keeper_release pair (see linda_protected_call) -Keeper* which_keeper(Keepers* keepers_, ptrdiff_t magic_) -{ - int const nbKeepers = keepers_->nb_keepers; - unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); - return &keepers_->keeper_array[i]; -} - -Keeper* keeper_acquire( Keepers* keepers_, ptrdiff_t magic_) -{ - int const nbKeepers = keepers_->nb_keepers; - // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers) - if( nbKeepers == 0) - { - return NULL; - } - else - { - /* - * Any hashing will do that maps pointers to 0..GNbKeepers-1 - * consistently. - * - * Pointers are often aligned by 8 or so - ignore the low order bits - * have to cast to unsigned long to avoid compilation warnings about loss of data when converting pointer-to-integer - */ - unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); - Keeper* K = &keepers_->keeper_array[i]; - - MUTEX_LOCK( &K->keeper_cs); - //++ K->count; - return K; - } -} - -void keeper_release( Keeper* K) -{ - //-- K->count; - if( K) MUTEX_UNLOCK( &K->keeper_cs); -} - -void keeper_toggle_nil_sentinels( lua_State* L, int val_i_, LookupMode const mode_) -{ - int i, n = lua_gettop( L); - for( i = val_i_; i <= n; ++ i) - { - if( mode_ == eLM_ToKeeper) - { - if( lua_isnil( L, i)) - { - push_unique_key( L, NIL_SENTINEL); - lua_replace( L, i); - } - } - else - { - if( equal_unique_key( L, i, NIL_SENTINEL)) - { - lua_pushnil( L); - lua_replace( L, i); - } - } - } -} - -/* -* Call a function ('func_name') in the keeper state, and pass on the returned -* values to 'L'. -* -* 'linda': deep Linda pointer (used only as a unique table key, first parameter) -* 'starting_index': first of the rest of parameters (none if 0) -* -* Returns: number of return values (pushed to 'L') or -1 in case of error -*/ -int keeper_call( Universe* U, lua_State* K, keeper_api_t func_, lua_State* L, void* linda, uint_t starting_index) -{ - int const args = starting_index ? (lua_gettop( L) - starting_index + 1) : 0; - int const Ktos = lua_gettop( K); - int retvals = -1; - - STACK_GROW( K, 2); - - PUSH_KEEPER_FUNC( K, func_); - - lua_pushlightuserdata( K, linda); - - if( (args == 0) || luaG_inter_copy( U, L, K, args, eLM_ToKeeper) == 0) // L->K - { - lua_call( K, 1 + args, LUA_MULTRET); - - retvals = lua_gettop( K) - Ktos; - // note that this can raise a luaL_error while the keeper state (and its mutex) is acquired - // this may interrupt a lane, causing the destruction of the underlying OS thread - // after this, another lane making use of this keeper can get an error code from the mutex-locking function - // when attempting to grab the mutex again (WINVER <= 0x400 does this, but locks just fine, I don't know about pthread) - if( (retvals > 0) && luaG_inter_move( U, K, L, retvals, eLM_FromKeeper) != 0) // K->L - { - retvals = -1; - } - } - // whatever happens, restore the stack to where it was at the origin - lua_settop( K, Ktos); - return retvals; -} diff --git a/src/keeper.cpp b/src/keeper.cpp new file mode 100644 index 0000000..8aa734a --- /dev/null +++ b/src/keeper.cpp @@ -0,0 +1,825 @@ +/* + -- + -- KEEPER.C + -- + -- Keeper state logic + -- + -- This code is read in for each "keeper state", which are the hidden, inter- + -- mediate data stores used by Lanes inter-state communication objects. + -- + -- Author: Benoit Germain + -- + -- C implementation replacement of the original keeper.lua + -- + --[[ + =============================================================================== + + Copyright (C) 2011-2013 Benoit Germain + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + =============================================================================== + ]]-- + */ + +#include +#include +#include +#include +#include + +#include "keeper.h" +#include "compat.h" +#include "tools.h" +#include "state.h" +#include "universe.h" +#include "uniquekey.h" + +//################################################################################### +// Keeper implementation +//################################################################################### + +#ifndef __min +#define __min( a, b) (((a) < (b)) ? (a) : (b)) +#endif // __min + +typedef struct +{ + lua_Integer first; + lua_Integer count; + lua_Integer limit; +} keeper_fifo; + +static int const CONTENTS_TABLE = 1; + +// replaces the fifo ud by its uservalue on the stack +static keeper_fifo* prepare_fifo_access( lua_State* L, int idx_) +{ + keeper_fifo* fifo = (keeper_fifo*) lua_touserdata( L, idx_); + if( fifo != NULL) + { + idx_ = lua_absindex( L, idx_); + STACK_GROW( L, 1); + // we can replace the fifo userdata in the stack without fear of it being GCed, there are other references around + lua_getiuservalue( L, idx_, CONTENTS_TABLE); + lua_replace( L, idx_); + } + return fifo; +} + +// in: nothing +// out: { first = 1, count = 0, limit = -1} +static void fifo_new( lua_State* L) +{ + keeper_fifo* fifo; + STACK_GROW( L, 2); + // a fifo full userdata has one uservalue, the table that holds the actual fifo contents + fifo = (keeper_fifo*)lua_newuserdatauv( L, sizeof( keeper_fifo), 1); + fifo->first = 1; + fifo->count = 0; + fifo->limit = -1; + lua_newtable( L); + lua_setiuservalue( L, -2, CONTENTS_TABLE); +} + +// in: expect fifo ... on top of the stack +// out: nothing, removes all pushed values from the stack +static void fifo_push( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) +{ + int const idx = lua_gettop( L) - (int) count_; + lua_Integer start = fifo_->first + fifo_->count - 1; + lua_Integer i; + // pop all additional arguments, storing them in the fifo + for( i = count_; i >= 1; -- i) + { + // store in the fifo the value at the top of the stack at the specified index, popping it from the stack + lua_rawseti( L, idx, (int)(start + i)); + } + fifo_->count += count_; +} + +// in: fifo +// out: ...|nothing +// expects exactly 1 value on the stack! +// currently only called with a count of 1, but this may change in the future +// function assumes that there is enough data in the fifo to satisfy the request +static void fifo_peek( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) +{ + lua_Integer i; + STACK_GROW( L, count_); + for( i = 0; i < count_; ++ i) + { + lua_rawgeti( L, 1, (int)( fifo_->first + i)); + } +} + +// in: fifo +// out: remove the fifo from the stack, push as many items as required on the stack (function assumes they exist in sufficient number) +static void fifo_pop( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) +{ + int const fifo_idx = lua_gettop( L); // ... fifo + int i; + // each iteration pushes a value on the stack! + STACK_GROW( L, count_ + 2); + // skip first item, we will push it last + for( i = 1; i < count_; ++ i) + { + int const at = (int)( fifo_->first + i); + // push item on the stack + lua_rawgeti( L, fifo_idx, at); // ... fifo val + // remove item from the fifo + lua_pushnil( L); // ... fifo val nil + lua_rawseti( L, fifo_idx, at); // ... fifo val + } + // now process first item + { + int const at = (int)( fifo_->first); + lua_rawgeti( L, fifo_idx, at); // ... fifo vals val + lua_pushnil( L); // ... fifo vals val nil + lua_rawseti( L, fifo_idx, at); // ... fifo vals val + lua_replace( L, fifo_idx); // ... vals + } + { + // avoid ever-growing indexes by resetting each time we detect the fifo is empty + lua_Integer const new_count = fifo_->count - count_; + fifo_->first = (new_count == 0) ? 1 : (fifo_->first + count_); + fifo_->count = new_count; + } +} + +// in: linda_ud expected at *absolute* stack slot idx +// out: fifos[ud] +// crc64/we of string "FIFOS_KEY" generated at http://www.nitrxgen.net/hashgen/ +static DECLARE_CONST_UNIQUE_KEY( FIFOS_KEY, 0xdce50bbc351cd465); +static void push_table( lua_State* L, int idx_) +{ + STACK_GROW( L, 4); + STACK_CHECK( L, 0); + idx_ = lua_absindex( L, idx_); + REGISTRY_GET( L, FIFOS_KEY); // ud fifos + lua_pushvalue( L, idx_); // ud fifos ud + lua_rawget( L, -2); // ud fifos fifos[ud] + STACK_MID( L, 2); + if( lua_isnil( L, -1)) + { + lua_pop( L, 1); // ud fifos + // add a new fifos table for this linda + lua_newtable( L); // ud fifos fifos[ud] + lua_pushvalue( L, idx_); // ud fifos fifos[ud] ud + lua_pushvalue( L, -2); // ud fifos fifos[ud] ud fifos[ud] + lua_rawset( L, -4); // ud fifos fifos[ud] + } + lua_remove( L, -2); // ud fifos[ud] + STACK_END( L, 1); +} + +int keeper_push_linda_storage( Universe* U, lua_State* L, void* ptr_, ptrdiff_t magic_) +{ + Keeper* const K = which_keeper( U->keepers, magic_); + lua_State* const KL = K ? K->L : NULL; + if( KL == NULL) return 0; + STACK_GROW( KL, 4); + STACK_CHECK( KL, 0); + REGISTRY_GET( KL, FIFOS_KEY); // fifos + lua_pushlightuserdata( KL, ptr_); // fifos ud + lua_rawget( KL, -2); // fifos storage + lua_remove( KL, -2); // storage + if( !lua_istable( KL, -1)) + { + lua_pop( KL, 1); // + STACK_MID( KL, 0); + return 0; + } + // move data from keeper to destination state KEEPER MAIN + lua_pushnil( KL); // storage nil + STACK_GROW( L, 5); + STACK_CHECK( L, 0); + lua_newtable( L); // out + while( lua_next( KL, -2)) // storage key fifo + { + keeper_fifo* fifo = prepare_fifo_access( KL, -1); // storage key fifo + lua_pushvalue( KL, -2); // storage key fifo key + luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key fifo // out key + STACK_MID( L, 2); + lua_newtable( L); // out key keyout + luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key // out key keyout fifo + lua_pushinteger( L, fifo->first); // out key keyout fifo first + STACK_MID( L, 5); + lua_setfield( L, -3, "first"); // out key keyout fifo + lua_pushinteger( L, fifo->count); // out key keyout fifo count + STACK_MID( L, 5); + lua_setfield( L, -3, "count"); // out key keyout fifo + lua_pushinteger( L, fifo->limit); // out key keyout fifo limit + STACK_MID( L, 5); + lua_setfield( L, -3, "limit"); // out key keyout fifo + lua_setfield( L, -2, "fifo"); // out key keyout + lua_rawset( L, -3); // out + STACK_MID( L, 1); + } + STACK_END( L, 1); + lua_pop( KL, 1); // + STACK_END( KL, 0); + return 1; +} + +// in: linda_ud +int keepercall_clear( lua_State* L) +{ + STACK_GROW( L, 3); + STACK_CHECK( L, 0); + REGISTRY_GET( L, FIFOS_KEY); // ud fifos + lua_pushvalue( L, 1); // ud fifos ud + lua_pushnil( L); // ud fifos ud nil + lua_rawset( L, -3); // ud fifos + lua_pop( L, 1); // ud + STACK_END( L, 0); + return 0; +} + + +// in: linda_ud, key, ... +// out: true|false +int keepercall_send( lua_State* L) +{ + keeper_fifo* fifo; + int n = lua_gettop( L) - 2; + push_table( L, 1); // ud key ... fifos + // get the fifo associated to this key in this linda, create it if it doesn't exist + lua_pushvalue( L, 2); // ud key ... fifos key + lua_rawget( L, -2); // ud key ... fifos fifo + if( lua_isnil( L, -1)) + { + lua_pop( L, 1); // ud key ... fifos + fifo_new( L); // ud key ... fifos fifo + lua_pushvalue( L, 2); // ud key ... fifos fifo key + lua_pushvalue( L, -2); // ud key ... fifos fifo key fifo + lua_rawset( L, -4); // ud key ... fifos fifo + } + lua_remove( L, -2); // ud key ... fifo + fifo = (keeper_fifo*) lua_touserdata( L, -1); + if( fifo->limit >= 0 && fifo->count + n > fifo->limit) + { + lua_settop( L, 0); // + lua_pushboolean( L, 0); // false + } + else + { + fifo = prepare_fifo_access( L, -1); + lua_replace( L, 2); // ud fifo ... + fifo_push( L, fifo, n); // ud fifo + lua_settop( L, 0); // + lua_pushboolean( L, 1); // true + } + return 1; +} + +// in: linda_ud, key [, key]? +// out: (key, val) or nothing +int keepercall_receive( lua_State* L) +{ + int top = lua_gettop( L); + int i; + push_table( L, 1); // ud keys fifos + lua_replace( L, 1); // fifos keys + for( i = 2; i <= top; ++ i) + { + keeper_fifo* fifo; + lua_pushvalue( L, i); // fifos keys key[i] + lua_rawget( L, 1); // fifos keys fifo + fifo = prepare_fifo_access( L, -1); // fifos keys fifo + if( fifo != NULL && fifo->count > 0) + { + fifo_pop( L, fifo, 1); // fifos keys val + if( !lua_isnil( L, -1)) + { + lua_replace( L, 1); // val keys + lua_settop( L, i); // val keys key[i] + if( i != 2) + { + lua_replace( L, 2); // val key keys + lua_settop( L, 2); // val key + } + lua_insert( L, 1); // key, val + return 2; + } + } + lua_settop( L, top); // data keys + } + // nothing to receive + return 0; +} + +//in: linda_ud key mincount [maxcount] +int keepercall_receive_batched( lua_State* L) +{ + lua_Integer const min_count = lua_tointeger( L, 3); + if( min_count > 0) + { + keeper_fifo* fifo; + lua_Integer const max_count = luaL_optinteger( L, 4, min_count); + lua_settop( L, 2); // ud key + lua_insert( L, 1); // key ud + push_table( L, 2); // key ud fifos + lua_remove( L, 2); // key fifos + lua_pushvalue( L, 1); // key fifos key + lua_rawget( L, 2); // key fifos fifo + lua_remove( L, 2); // key fifo + fifo = prepare_fifo_access( L, 2); // key fifo + if( fifo != NULL && fifo->count >= min_count) + { + fifo_pop( L, fifo, __min( max_count, fifo->count)); // key ... + } + else + { + lua_settop( L, 0); + } + return lua_gettop( L); + } + else + { + return 0; + } +} + +// in: linda_ud key n +// out: true or nil +int keepercall_limit( lua_State* L) +{ + keeper_fifo* fifo; + lua_Integer limit = lua_tointeger( L, 3); + push_table( L, 1); // ud key n fifos + lua_replace( L, 1); // fifos key n + lua_pop( L, 1); // fifos key + lua_pushvalue( L, -1); // fifos key key + lua_rawget( L, -3); // fifos key fifo|nil + fifo = (keeper_fifo*) lua_touserdata( L, -1); + if( fifo == NULL) + { // fifos key nil + lua_pop( L, 1); // fifos key + fifo_new( L); // fifos key fifo + fifo = (keeper_fifo*) lua_touserdata( L, -1); + lua_rawset( L, -3); // fifos + } + // remove any clutter on the stack + lua_settop( L, 0); + // return true if we decide that blocked threads waiting to write on that key should be awakened + // this is the case if we detect the key was full but it is no longer the case + if( + ((fifo->limit >= 0) && (fifo->count >= fifo->limit)) // the key was full if limited and count exceeded the previous limit + && ((limit < 0) || (fifo->count < limit)) // the key is not full if unlimited or count is lower than the new limit + ) + { + lua_pushboolean( L, 1); + } + // set the new limit + fifo->limit = limit; + // return 0 or 1 value + return lua_gettop( L); +} + +//in: linda_ud key [[val] ...] +//out: true or nil +int keepercall_set( lua_State* L) +{ + bool_t should_wake_writers = FALSE; + STACK_GROW( L, 6); + + // retrieve fifos associated with the linda + push_table( L, 1); // ud key [val [, ...]] fifos + lua_replace( L, 1); // fifos key [val [, ...]] + + // make sure we have a value on the stack + if( lua_gettop( L) == 2) // fifos key + { + keeper_fifo* fifo; + lua_pushvalue( L, -1); // fifos key key + lua_rawget( L, 1); // fifos key fifo|nil + // empty the fifo for the specified key: replace uservalue with a virgin table, reset counters, but leave limit unchanged! + fifo = (keeper_fifo*) lua_touserdata( L, -1); + if( fifo != NULL) // might be NULL if we set a nonexistent key to nil + { // fifos key fifo + if( fifo->limit < 0) // fifo limit value is the default (unlimited): we can totally remove it + { + lua_pop( L, 1); // fifos key + lua_pushnil( L); // fifos key nil + lua_rawset( L, -3); // fifos + } + else + { + // we create room if the fifo was full but it is no longer the case + should_wake_writers = (fifo->limit > 0) && (fifo->count >= fifo->limit); + lua_remove( L, -2); // fifos fifo + lua_newtable( L); // fifos fifo {} + lua_setiuservalue( L, -2, CONTENTS_TABLE); // fifos fifo + fifo->first = 1; + fifo->count = 0; + } + } + } + else // set/replace contents stored at the specified key? + { + lua_Integer count = lua_gettop( L) - 2; // number of items we want to store + keeper_fifo* fifo; // fifos key [val [, ...]] + lua_pushvalue( L, 2); // fifos key [val [, ...]] key + lua_rawget( L, 1); // fifos key [val [, ...]] fifo|nil + fifo = (keeper_fifo*) lua_touserdata( L, -1); + if( fifo == NULL) // can be NULL if we store a value at a new key + { // fifos key [val [, ...]] nil + // no need to wake writers in that case, because a writer can't wait on an inexistent key + lua_pop( L, 1); // fifos key [val [, ...]] + fifo_new( L); // fifos key [val [, ...]] fifo + lua_pushvalue( L, 2); // fifos key [val [, ...]] fifo key + lua_pushvalue( L, -2); // fifos key [val [, ...]] fifo key fifo + lua_rawset( L, 1); // fifos key [val [, ...]] fifo + } + else // the fifo exists, we just want to update its contents + { // fifos key [val [, ...]] fifo + // we create room if the fifo was full but it is no longer the case + should_wake_writers = (fifo->limit > 0) && (fifo->count >= fifo->limit) && (count < fifo->limit); + // empty the fifo for the specified key: replace uservalue with a virgin table, reset counters, but leave limit unchanged! + lua_newtable( L); // fifos key [val [, ...]] fifo {} + lua_setiuservalue( L, -2, CONTENTS_TABLE); // fifos key [val [, ...]] fifo + fifo->first = 1; + fifo->count = 0; + } + fifo = prepare_fifo_access( L, -1); + // move the fifo below the values we want to store + lua_insert( L, 3); // fifos key fifo [val [, ...]] + fifo_push( L, fifo, count); // fifos key fifo + } + return should_wake_writers ? (lua_pushboolean( L, 1), 1) : 0; +} + +// in: linda_ud key [count] +// out: at most values +int keepercall_get( lua_State* L) +{ + keeper_fifo* fifo; + lua_Integer count = 1; + if( lua_gettop( L) == 3) // ud key count + { + count = lua_tointeger( L, 3); + lua_pop( L, 1); // ud key + } + push_table( L, 1); // ud key fifos + lua_replace( L, 1); // fifos key + lua_rawget( L, 1); // fifos fifo + fifo = prepare_fifo_access( L, -1); // fifos fifo + if( fifo != NULL && fifo->count > 0) + { + lua_remove( L, 1); // fifo + count = __min( count, fifo->count); + // read value off the fifo + fifo_peek( L, fifo, count); // fifo ... + return (int) count; + } + // no fifo was ever registered for this key, or it is empty + return 0; +} + +// in: linda_ud [, key [, ...]] +int keepercall_count( lua_State* L) +{ + push_table( L, 1); // ud keys fifos + switch( lua_gettop( L)) + { + // no key is specified: return a table giving the count of all known keys + case 2: // ud fifos + lua_newtable( L); // ud fifos out + lua_replace( L, 1); // out fifos + lua_pushnil( L); // out fifos nil + while( lua_next( L, 2)) // out fifos key fifo + { + keeper_fifo* fifo = prepare_fifo_access( L, -1); // out fifos key fifo + lua_pop( L, 1); // out fifos key + lua_pushvalue( L, -1); // out fifos key key + lua_pushinteger( L, fifo->count); // out fifos key key count + lua_rawset( L, -5); // out fifos key + } + lua_pop( L, 1); // out + break; + + // 1 key is specified: return its count + case 3: // ud key fifos + { + keeper_fifo* fifo; + lua_replace( L, 1); // fifos key + lua_rawget( L, -2); // fifos fifo|nil + if( lua_isnil( L, -1)) // the key is unknown + { // fifos nil + lua_remove( L, -2); // nil + } + else // the key is known + { // fifos fifo + fifo = prepare_fifo_access( L, -1); // fifos fifo + lua_pushinteger( L, fifo->count); // fifos fifo count + lua_replace( L, -3); // count fifo + lua_pop( L, 1); // count + } + } + break; + + // a variable number of keys is specified: return a table of their counts + default: // ud keys fifos + lua_newtable( L); // ud keys fifos out + lua_replace( L, 1); // out keys fifos + // shifts all keys up in the stack. potentially slow if there are a lot of them, but then it should be bearable + lua_insert( L, 2); // out fifos keys + while( lua_gettop( L) > 2) + { + keeper_fifo* fifo; + lua_pushvalue( L, -1); // out fifos keys key + lua_rawget( L, 2); // out fifos keys fifo|nil + fifo = prepare_fifo_access( L, -1); // out fifos keys fifo|nil + lua_pop( L, 1); // out fifos keys + if( fifo != NULL) // the key is known + { + lua_pushinteger( L, fifo->count); // out fifos keys count + lua_rawset( L, 1); // out fifos keys + } + else // the key is unknown + { + lua_pop( L, 1); // out fifos keys + } + } + lua_pop( L, 1); // out + } + ASSERT_L( lua_gettop( L) == 1); + return 1; +} + +//################################################################################### +// Keeper API, accessed from linda methods +//################################################################################### + +/*---=== Keeper states ===--- +*/ + +/* +* Pool of keeper states +* +* Access to keeper states is locked (only one OS thread at a time) so the +* bigger the pool, the less chances of unnecessary waits. Lindas map to the +* keepers randomly, by a hash. +*/ + +// called as __gc for the keepers array userdata +void close_keepers( Universe* U) +{ + if( U->keepers != NULL) + { + int i; + int nbKeepers = U->keepers->nb_keepers; + // NOTE: imagine some keeper state N+1 currently holds a linda that uses another keeper N, and a _gc that will make use of it + // when keeper N+1 is closed, object is GCed, linda operation is called, which attempts to acquire keeper N, whose Lua state no longer exists + // in that case, the linda operation should do nothing. which means that these operations must check for keeper acquisition success + // which is early-outed with a U->keepers->nbKeepers null-check + U->keepers->nb_keepers = 0; + for( i = 0; i < nbKeepers; ++ i) + { + lua_State* K = U->keepers->keeper_array[i].L; + U->keepers->keeper_array[i].L = NULL; + if( K != NULL) + { + lua_close( K); + } + else + { + // detected partial init: destroy only the mutexes that got initialized properly + nbKeepers = i; + } + } + for( i = 0; i < nbKeepers; ++ i) + { + MUTEX_FREE( &U->keepers->keeper_array[i].keeper_cs); + } + // free the keeper bookkeeping structure + { + AllocatorDefinition* const allocD = &U->internal_allocator; + (void) allocD->allocF( allocD->allocUD, U->keepers, sizeof( Keepers) + (nbKeepers - 1) * sizeof( Keeper), 0); + U->keepers = NULL; + } + } +} + +/* + * Initialize keeper states + * + * If there is a problem, returns NULL and pushes the error message on the stack + * else returns the keepers bookkeeping structure. + * + * Note: Any problems would be design flaws; the created Lua state is left + * unclosed, because it does not really matter. In production code, this + * function never fails. + * settings table is at position 1 on the stack + */ +void init_keepers( Universe* U, lua_State* L) +{ + int i; + int nb_keepers; + + STACK_CHECK( L, 0); // L K + lua_getfield( L, 1, "nb_keepers"); // nb_keepers + nb_keepers = (int) lua_tointeger( L, -1); + lua_pop( L, 1); // + if( nb_keepers < 1) + { + (void) luaL_error( L, "Bad number of keepers (%d)", nb_keepers); + } + + // Keepers contains an array of 1 s_Keeper, adjust for the actual number of keeper states + { + size_t const bytes = sizeof( Keepers) + (nb_keepers - 1) * sizeof( Keeper); + { + AllocatorDefinition* const allocD = &U->internal_allocator; + U->keepers = (Keepers*) allocD->allocF( allocD->allocUD, NULL, 0, bytes); + } + if( U->keepers == NULL) + { + (void) luaL_error( L, "init_keepers() failed while creating keeper array; out of memory"); + return; + } + memset( U->keepers, 0, bytes); + U->keepers->nb_keepers = nb_keepers; + } + for( i = 0; i < nb_keepers; ++ i) // keepersUD + { + // note that we will leak K if we raise an error later + lua_State* K = create_state( U, L); + if( K == NULL) + { + (void) luaL_error( L, "init_keepers() failed while creating keeper states; out of memory"); + return; + } + + U->keepers->keeper_array[i].L = K; + // we can trigger a GC from inside keeper_call(), where a keeper is acquired + // from there, GC can collect a linda, which would acquire the keeper again, and deadlock the thread. + // therefore, we need a recursive mutex. + MUTEX_RECURSIVE_INIT( &U->keepers->keeper_array[i].keeper_cs); + + STACK_CHECK( K, 0); + + // copy the universe pointer in the keeper itself + universe_store( K, U); + STACK_MID( K, 0); + + // make sure 'package' is initialized in keeper states, so that we have require() + // this because this is needed when transferring deep userdata object + luaL_requiref( K, "package", luaopen_package, 1); // package + lua_pop( K, 1); // + STACK_MID( K, 0); + serialize_require( DEBUGSPEW_PARAM_COMMA( U) K); + STACK_MID( K, 0); + + // copy package.path and package.cpath from the source state + lua_getglobal( L, "package"); // "..." keepersUD package + if( !lua_isnil( L, -1)) + { + // when copying with mode eLM_ToKeeper, error message is pushed at the top of the stack, not raised immediately + if( luaG_inter_copy_package( U, L, K, -1, eLM_ToKeeper)) + { + // if something went wrong, the error message is at the top of the stack + lua_remove( L, -2); // error_msg + (void) lua_error( L); + return; + } + } + lua_pop( L, 1); // + STACK_MID( L, 0); + + // attempt to call on_state_create(), if we have one and it is a C function + // (only support a C function because we can't transfer executable Lua code in keepers) + // will raise an error in L in case of problem + call_on_state_create( U, K, L, eLM_ToKeeper); + + // to see VM name in Decoda debugger + lua_pushfstring( K, "Keeper #%d", i + 1); // "Keeper #n" + lua_setglobal( K, "decoda_name"); // + + // create the fifos table in the keeper state + REGISTRY_SET( K, FIFOS_KEY, lua_newtable( K)); + STACK_END( K, 0); + } + STACK_END( L, 0); +} + +// should be called only when inside a keeper_acquire/keeper_release pair (see linda_protected_call) +Keeper* which_keeper(Keepers* keepers_, ptrdiff_t magic_) +{ + int const nbKeepers = keepers_->nb_keepers; + unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); + return &keepers_->keeper_array[i]; +} + +Keeper* keeper_acquire( Keepers* keepers_, ptrdiff_t magic_) +{ + int const nbKeepers = keepers_->nb_keepers; + // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers) + if( nbKeepers == 0) + { + return NULL; + } + else + { + /* + * Any hashing will do that maps pointers to 0..GNbKeepers-1 + * consistently. + * + * Pointers are often aligned by 8 or so - ignore the low order bits + * have to cast to unsigned long to avoid compilation warnings about loss of data when converting pointer-to-integer + */ + unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); + Keeper* K = &keepers_->keeper_array[i]; + + MUTEX_LOCK( &K->keeper_cs); + //++ K->count; + return K; + } +} + +void keeper_release( Keeper* K) +{ + //-- K->count; + if( K) MUTEX_UNLOCK( &K->keeper_cs); +} + +void keeper_toggle_nil_sentinels( lua_State* L, int val_i_, LookupMode const mode_) +{ + int i, n = lua_gettop( L); + for( i = val_i_; i <= n; ++ i) + { + if( mode_ == eLM_ToKeeper) + { + if( lua_isnil( L, i)) + { + push_unique_key( L, NIL_SENTINEL); + lua_replace( L, i); + } + } + else + { + if( equal_unique_key( L, i, NIL_SENTINEL)) + { + lua_pushnil( L); + lua_replace( L, i); + } + } + } +} + +/* +* Call a function ('func_name') in the keeper state, and pass on the returned +* values to 'L'. +* +* 'linda': deep Linda pointer (used only as a unique table key, first parameter) +* 'starting_index': first of the rest of parameters (none if 0) +* +* Returns: number of return values (pushed to 'L') or -1 in case of error +*/ +int keeper_call( Universe* U, lua_State* K, keeper_api_t func_, lua_State* L, void* linda, uint_t starting_index) +{ + int const args = starting_index ? (lua_gettop( L) - starting_index + 1) : 0; + int const Ktos = lua_gettop( K); + int retvals = -1; + + STACK_GROW( K, 2); + + PUSH_KEEPER_FUNC( K, func_); + + lua_pushlightuserdata( K, linda); + + if( (args == 0) || luaG_inter_copy( U, L, K, args, eLM_ToKeeper) == 0) // L->K + { + lua_call( K, 1 + args, LUA_MULTRET); + + retvals = lua_gettop( K) - Ktos; + // note that this can raise a luaL_error while the keeper state (and its mutex) is acquired + // this may interrupt a lane, causing the destruction of the underlying OS thread + // after this, another lane making use of this keeper can get an error code from the mutex-locking function + // when attempting to grab the mutex again (WINVER <= 0x400 does this, but locks just fine, I don't know about pthread) + if( (retvals > 0) && luaG_inter_move( U, K, L, retvals, eLM_FromKeeper) != 0) // K->L + { + retvals = -1; + } + } + // whatever happens, restore the stack to where it was at the origin + lua_settop( K, Ktos); + return retvals; +} diff --git a/src/lanes.c b/src/lanes.c deleted file mode 100644 index deee90c..0000000 --- a/src/lanes.c +++ /dev/null @@ -1,2142 +0,0 @@ -/* - * LANES.C Copyright (c) 2007-08, Asko Kauppi - * Copyright (C) 2009-19, Benoit Germain - * - * Multithreading in Lua. - * - * History: - * See CHANGES - * - * Platforms (tested internally): - * OS X (10.5.7 PowerPC/Intel) - * Linux x86 (Ubuntu 8.04) - * Win32 (Windows XP Home SP2, Visual C++ 2005/2008 Express) - * - * Platforms (tested externally): - * Win32 (MSYS) by Ross Berteig. - * - * Platforms (testers appreciated): - * Win64 - should work??? - * Linux x64 - should work - * FreeBSD - should work - * QNX - porting shouldn't be hard - * Sun Solaris - porting shouldn't be hard - * - * References: - * "Porting multithreaded applications from Win32 to Mac OS X": - * - * - * Pthreads: - * - * - * MSDN: - * - * - * - * Defines: - * -DLINUX_SCHED_RR: all threads are lifted to SCHED_RR category, to - * allow negative priorities [-3,-1] be used. Even without this, - * using priorities will require 'sudo' privileges on Linux. - * - * -DUSE_PTHREAD_TIMEDJOIN: use 'pthread_timedjoin_np()' for waiting - * for threads with a timeout. This changes the thread cleanup - * mechanism slightly (cleans up at the join, not once the thread - * has finished). May or may not be a good idea to use it. - * Available only in selected operating systems (Linux). - * - * Bugs: - * - * To-do: - * - * Make waiting threads cancellable. - * ... - */ - -/* -=============================================================================== - -Copyright (C) 2007-10 Asko Kauppi - 2011-19 Benoit Germain - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -=============================================================================== -*/ - -#include -#include -#include -#include -#include - -#include "lanes.h" -#include "threading.h" -#include "compat.h" -#include "tools.h" -#include "state.h" -#include "universe.h" -#include "keeper.h" -#include "lanes_private.h" - -#if !(defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)) -# include -#endif - -/* geteuid() */ -#ifdef PLATFORM_LINUX -# include -# include -#endif - -/* Do you want full call stacks, or just the line where the error happened? -* -* TBD: The full stack feature does not seem to work (try 'make error'). -*/ -#define ERROR_FULL_STACK 1 // must be either 0 or 1 as we do some index arithmetics with it! - -// intern the debug name in the specified lua state so that the pointer remains valid when the lane's state is closed -static void securize_debug_threadname( lua_State* L, Lane* s) -{ - STACK_CHECK( L, 0); - STACK_GROW( L, 3); - lua_getiuservalue( L, 1, 1); - lua_newtable( L); - // Lua 5.1 can't do 's->debug_name = lua_pushstring( L, s->debug_name);' - lua_pushstring( L, s->debug_name); - s->debug_name = lua_tostring( L, -1); - lua_rawset( L, -3); - lua_pop( L, 1); - STACK_END( L, 0); -} - -#if ERROR_FULL_STACK -static int lane_error( lua_State* L); -// crc64/we of string "STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ -static DECLARE_CONST_UNIQUE_KEY( STACKTRACE_REGKEY, 0x534af7d3226a429f); -#endif // ERROR_FULL_STACK - -/* -* registry[FINALIZER_REG_KEY] is either nil (no finalizers) or a table -* of functions that Lanes will call after the executing 'pcall' has ended. -* -* We're NOT using the GC system for finalizer mainly because providing the -* error (and maybe stack trace) parameters to the finalizer functions would -* anyways complicate that approach. -*/ -// crc64/we of string "FINALIZER_REGKEY" generated at http://www.nitrxgen.net/hashgen/ -static DECLARE_CONST_UNIQUE_KEY( FINALIZER_REGKEY, 0x188fccb8bf348e09); - -struct s_Linda; - -/* -* Push a table stored in registry onto Lua stack. -* -* If there is no existing table, create one if 'create' is TRUE. -* -* Returns: TRUE if a table was pushed -* FALSE if no table found, not created, and nothing pushed -*/ -static bool_t push_registry_table( lua_State* L, UniqueKey key, bool_t create) -{ - STACK_GROW( L, 3); - STACK_CHECK( L, 0); - - REGISTRY_GET( L, key); // ? - if( lua_isnil( L, -1)) // nil? - { - lua_pop( L, 1); // - - if( !create) - { - return FALSE; - } - - lua_newtable( L); // t - REGISTRY_SET( L, key, lua_pushvalue( L, -2)); - } - STACK_END( L, 1); - return TRUE; // table pushed -} - -#if HAVE_LANE_TRACKING() - -// The chain is ended by '(Lane*)(-1)', not NULL: -// 'tracking_first -> ... -> ... -> (-1)' -#define TRACKING_END ((Lane *)(-1)) - -/* - * Add the lane to tracking chain; the ones still running at the end of the - * whole process will be cancelled. - */ -static void tracking_add( Lane* s) -{ - - MUTEX_LOCK( &s->U->tracking_cs); - { - assert( s->tracking_next == NULL); - - s->tracking_next = s->U->tracking_first; - s->U->tracking_first = s; - } - MUTEX_UNLOCK( &s->U->tracking_cs); -} - -/* - * A free-running lane has ended; remove it from tracking chain - */ -static bool_t tracking_remove( Lane* s) -{ - bool_t found = FALSE; - MUTEX_LOCK( &s->U->tracking_cs); - { - // Make sure (within the MUTEX) that we actually are in the chain - // still (at process exit they will remove us from chain and then - // cancel/kill). - // - if( s->tracking_next != NULL) - { - Lane** ref = (Lane**) &s->U->tracking_first; - - while( *ref != TRACKING_END) - { - if( *ref == s) - { - *ref = s->tracking_next; - s->tracking_next = NULL; - found = TRUE; - break; - } - ref = (Lane**) &((*ref)->tracking_next); - } - assert( found); - } - } - MUTEX_UNLOCK( &s->U->tracking_cs); - return found; -} - -#endif // HAVE_LANE_TRACKING() - -//--- -// low-level cleanup - -static void lane_cleanup( Lane* s) -{ - // Clean up after a (finished) thread - // -#if THREADWAIT_METHOD == THREADWAIT_CONDVAR - SIGNAL_FREE( &s->done_signal); - MUTEX_FREE( &s->done_lock); -#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR - -#if HAVE_LANE_TRACKING() - if( s->U->tracking_first != NULL) - { - // Lane was cleaned up, no need to handle at process termination - tracking_remove( s); - } -#endif // HAVE_LANE_TRACKING() - - { - AllocatorDefinition* const allocD = &s->U->internal_allocator; - (void) allocD->allocF(allocD->allocUD, s, sizeof(Lane), 0); - } -} - -/* - * ############################################################################################### - * ########################################## Finalizer ########################################## - * ############################################################################################### - */ - -//--- -// void= finalizer( finalizer_func ) -// -// finalizer_func( [err, stack_tbl] ) -// -// Add a function that will be called when exiting the lane, either via -// normal return or an error. -// -LUAG_FUNC( set_finalizer) -{ - luaL_argcheck( L, lua_isfunction( L, 1), 1, "finalizer should be a function"); - luaL_argcheck( L, lua_gettop( L) == 1, 1, "too many arguments"); - // Get the current finalizer table (if any) - push_registry_table( L, FINALIZER_REGKEY, TRUE /*do create if none*/); // finalizer {finalisers} - STACK_GROW( L, 2); - lua_pushinteger( L, lua_rawlen( L, -1) + 1); // finalizer {finalisers} idx - lua_pushvalue( L, 1); // finalizer {finalisers} idx finalizer - lua_rawset( L, -3); // finalizer {finalisers} - lua_pop( L, 2); // - return 0; -} - - -//--- -// Run finalizers - if any - with the given parameters -// -// If 'rc' is nonzero, error message and stack index (the latter only when ERROR_FULL_STACK == 1) are available as: -// [-1]: stack trace (table) -// [-2]: error message (any type) -// -// Returns: -// 0 if finalizers were run without error (or there were none) -// LUA_ERRxxx return code if any of the finalizers failed -// -// TBD: should we add stack trace on failing finalizer, wouldn't be hard.. -// -static void push_stack_trace( lua_State* L, int rc_, int stk_base_); - -static int run_finalizers( lua_State* L, int lua_rc) -{ - int finalizers_index; - int n; - int err_handler_index = 0; - int rc = LUA_OK; // ... - if( !push_registry_table( L, FINALIZER_REGKEY, FALSE)) // ... finalizers? - { - return 0; // no finalizers - } - - STACK_GROW( L, 5); - - finalizers_index = lua_gettop( L); - -#if ERROR_FULL_STACK - lua_pushcfunction( L, lane_error); // ... finalizers lane_error - err_handler_index = lua_gettop( L); -#endif // ERROR_FULL_STACK - - for( n = (int) lua_rawlen( L, finalizers_index); n > 0; -- n) - { - int args = 0; - lua_pushinteger( L, n); // ... finalizers lane_error n - lua_rawget( L, finalizers_index); // ... finalizers lane_error finalizer - ASSERT_L( lua_isfunction( L, -1)); - if( lua_rc != LUA_OK) // we have an error message and an optional stack trace at the bottom of the stack - { - ASSERT_L( finalizers_index == 2 || finalizers_index == 3); - //char const* err_msg = lua_tostring( L, 1); - lua_pushvalue( L, 1); // ... finalizers lane_error finalizer err_msg - // note we don't always have a stack trace for example when CANCEL_ERROR, or when we got an error that doesn't call our handler, such as LUA_ERRMEM - if( finalizers_index == 3) - { - lua_pushvalue( L, 2); // ... finalizers lane_error finalizer err_msg stack_trace - } - args = finalizers_index - 1; - } - - // if no error from the main body, finalizer doesn't receive any argument, else it gets the error message and optional stack trace - rc = lua_pcall( L, args, 0, err_handler_index); // ... finalizers lane_error err_msg2? - if( rc != LUA_OK) - { - push_stack_trace( L, rc, lua_gettop( L)); - // If one finalizer fails, don't run the others. Return this - // as the 'real' error, replacing what we could have had (or not) - // from the actual code. - break; - } - // no error, proceed to next finalizer // ... finalizers lane_error - } - - if( rc != LUA_OK) - { - // ERROR_FULL_STACK accounts for the presence of lane_error on the stack - int nb_err_slots = lua_gettop( L) - finalizers_index - ERROR_FULL_STACK; - // a finalizer generated an error, this is what we leave of the stack - for( n = nb_err_slots; n > 0; -- n) - { - lua_replace( L, n); - } - // leave on the stack only the error and optional stack trace produced by the error in the finalizer - lua_settop( L, nb_err_slots); - } - else // no error from the finalizers, make sure only the original return values from the lane body remain on the stack - { - lua_settop( L, finalizers_index - 1); - } - - return rc; -} - -/* - * ############################################################################################### - * ########################################### Threads ########################################### - * ############################################################################################### - */ - -// -// Protects modifying the selfdestruct chain - -#define SELFDESTRUCT_END ((Lane*)(-1)) -// -// The chain is ended by '(Lane*)(-1)', not NULL: -// 'selfdestruct_first -> ... -> ... -> (-1)' - -/* - * Add the lane to selfdestruct chain; the ones still running at the end of the - * whole process will be cancelled. - */ -static void selfdestruct_add( Lane* s) -{ - MUTEX_LOCK( &s->U->selfdestruct_cs); - assert( s->selfdestruct_next == NULL); - - s->selfdestruct_next = s->U->selfdestruct_first; - s->U->selfdestruct_first= s; - MUTEX_UNLOCK( &s->U->selfdestruct_cs); -} - -/* - * A free-running lane has ended; remove it from selfdestruct chain - */ -static bool_t selfdestruct_remove( Lane* s) -{ - bool_t found = FALSE; - MUTEX_LOCK( &s->U->selfdestruct_cs); - { - // Make sure (within the MUTEX) that we actually are in the chain - // still (at process exit they will remove us from chain and then - // cancel/kill). - // - if( s->selfdestruct_next != NULL) - { - Lane** ref = (Lane**) &s->U->selfdestruct_first; - - while( *ref != SELFDESTRUCT_END ) - { - if( *ref == s) - { - *ref = s->selfdestruct_next; - s->selfdestruct_next = NULL; - // the terminal shutdown should wait until the lane is done with its lua_close() - ++ s->U->selfdestructing_count; - found = TRUE; - break; - } - ref = (Lane**) &((*ref)->selfdestruct_next); - } - assert( found); - } - } - MUTEX_UNLOCK( &s->U->selfdestruct_cs); - return found; -} - -/* -* Process end; cancel any still free-running threads -*/ -static int selfdestruct_gc( lua_State* L) -{ - Universe* U = (Universe*) lua_touserdata( L, 1); - - while( U->selfdestruct_first != SELFDESTRUCT_END) // true at most once! - { - // Signal _all_ still running threads to exit (including the timer thread) - // - MUTEX_LOCK( &U->selfdestruct_cs); - { - Lane* s = U->selfdestruct_first; - while( s != SELFDESTRUCT_END) - { - // attempt a regular unforced hard cancel with a small timeout - bool_t cancelled = THREAD_ISNULL( s->thread) || thread_cancel( L, s, CO_Hard, 0.0001, FALSE, 0.0); - // if we failed, and we know the thread is waiting on a linda - if( cancelled == FALSE && s->status == WAITING && s->waiting_on != NULL) - { - // signal the linda to wake up the thread so that it can react to the cancel query - // let us hope we never land here with a pointer on a linda that has been destroyed... - SIGNAL_T* waiting_on = s->waiting_on; - //s->waiting_on = NULL; // useful, or not? - SIGNAL_ALL( waiting_on); - } - s = s->selfdestruct_next; - } - } - MUTEX_UNLOCK( &U->selfdestruct_cs); - - // When noticing their cancel, the lanes will remove themselves from - // the selfdestruct chain. - - // TBD: Not sure if Windows (multi core) will require the timed approach, - // or single Yield. I don't have machine to test that (so leaving - // for timed approach). -- AKa 25-Oct-2008 - - // OS X 10.5 (Intel) needs more to avoid segfaults. - // - // "make test" is okay. 100's of "make require" are okay. - // - // Tested on MacBook Core Duo 2GHz and 10.5.5: - // -- AKa 25-Oct-2008 - // - { - lua_Number const shutdown_timeout = lua_tonumber( L, lua_upvalueindex( 1)); - double const t_until = now_secs() + shutdown_timeout; - - while( U->selfdestruct_first != SELFDESTRUCT_END) - { - YIELD(); // give threads time to act on their cancel - { - // count the number of cancelled thread that didn't have the time to act yet - int n = 0; - double t_now = 0.0; - MUTEX_LOCK( &U->selfdestruct_cs); - { - Lane* s = U->selfdestruct_first; - while( s != SELFDESTRUCT_END) - { - if( s->cancel_request == CANCEL_HARD) - ++ n; - s = s->selfdestruct_next; - } - } - MUTEX_UNLOCK( &U->selfdestruct_cs); - // if timeout elapsed, or we know all threads have acted, stop waiting - t_now = now_secs(); - if( n == 0 || (t_now >= t_until)) - { - DEBUGSPEW_CODE( fprintf( stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now))); - break; - } - } - } - } - - // If some lanes are currently cleaning after themselves, wait until they are done. - // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). - while( U->selfdestructing_count > 0) - { - YIELD(); - } - - //--- - // Kill the still free running threads - // - if( U->selfdestruct_first != SELFDESTRUCT_END) - { - unsigned int n = 0; - // first thing we did was to raise the linda signals the threads were waiting on (if any) - // therefore, any well-behaved thread should be in CANCELLED state - // these are not running, and the state can be closed - MUTEX_LOCK( &U->selfdestruct_cs); - { - Lane* s = U->selfdestruct_first; - while( s != SELFDESTRUCT_END) - { - Lane* next_s = s->selfdestruct_next; - s->selfdestruct_next = NULL; // detach from selfdestruct chain - if( !THREAD_ISNULL( s->thread)) // can be NULL if previous 'soft' termination succeeded - { - THREAD_KILL( &s->thread); -#if THREADAPI == THREADAPI_PTHREAD - // pthread: make sure the thread is really stopped! - THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status); -#endif // THREADAPI == THREADAPI_PTHREAD - } - // NO lua_close() in this case because we don't know where execution of the state was interrupted - lane_cleanup( s); - s = next_s; - ++ n; - } - U->selfdestruct_first = SELFDESTRUCT_END; - } - MUTEX_UNLOCK( &U->selfdestruct_cs); - - DEBUGSPEW_CODE( fprintf( stderr, "Killed %d lane(s) at process end.\n", n)); - } - } - - // If some lanes are currently cleaning after themselves, wait until they are done. - // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). - while( U->selfdestructing_count > 0) - { - YIELD(); - } - - // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 - lua_settop( L, 0); - // no need to mutex-protect this as all threads in the universe are gone at that point - if( U->timer_deep != NULL) // test ins case some early internal error prevented Lanes from creating the deep timer - { - -- U->timer_deep->refcount; // should be 0 now - free_deep_prelude( L, (DeepPrelude*) U->timer_deep); - U->timer_deep = NULL; - } - - close_keepers( U); - - // remove the protected allocator, if any - cleanup_allocator_function( U, L); - -#if HAVE_LANE_TRACKING() - MUTEX_FREE( &U->tracking_cs); -#endif // HAVE_LANE_TRACKING() - // Linked chains handling - MUTEX_FREE( &U->selfdestruct_cs); - MUTEX_FREE( &U->require_cs); - // Locks for 'tools.c' inc/dec counters - MUTEX_FREE( &U->deep_lock); - MUTEX_FREE( &U->mtid_lock); - // universe is no longer available (nor necessary) - // we need to do this in case some deep userdata objects were created before Lanes was initialized, - // as potentially they will be garbage collected after Lanes at application shutdown - universe_store( L, NULL); - return 0; -} - - -//--- -// = _single( [cores_uint=1] ) -// -// Limits the process to use only 'cores' CPU cores. To be used for performance -// testing on multicore devices. DEBUGGING ONLY! -// -LUAG_FUNC( set_singlethreaded) -{ - uint_t cores = luaG_optunsigned( L, 1, 1); - (void) cores; // prevent "unused" warning - -#ifdef PLATFORM_OSX -#ifdef _UTILBINDTHREADTOCPU - if( cores > 1) - { - return luaL_error( L, "Limiting to N>1 cores not possible"); - } - // requires 'chudInitialize()' - utilBindThreadToCPU(0); // # of CPU to run on (we cannot limit to 2..N CPUs?) - return 0; -#else - return luaL_error( L, "Not available: compile with _UTILBINDTHREADTOCPU"); -#endif -#else - return luaL_error( L, "not implemented"); -#endif -} - - -/* -* str= lane_error( error_val|str ) -* -* Called if there's an error in some lane; add call stack to error message -* just like 'lua.c' normally does. -* -* ".. will be called with the error message and its return value will be the -* message returned on the stack by lua_pcall." -* -* Note: Rather than modifying the error message itself, it would be better -* to provide the call stack (as string) completely separated. This would -* work great with non-string error values as well (current system does not). -* (This is NOT possible with the Lua 5.1 'lua_pcall()'; we could of course -* implement a Lanes-specific 'pcall' of our own that does this). TBD!!! :) -* --AKa 22-Jan-2009 -*/ -#if ERROR_FULL_STACK - -// crc64/we of string "EXTENDED_STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ -static DECLARE_CONST_UNIQUE_KEY( EXTENDED_STACKTRACE_REGKEY, 0x2357c69a7c92c936); // used as registry key - -LUAG_FUNC( set_error_reporting) -{ - bool_t equal; - luaL_checktype( L, 1, LUA_TSTRING); - lua_pushliteral( L, "extended"); - equal = lua_rawequal( L, -1, 1); - lua_pop( L, 1); - if( equal) - { - goto done; - } - lua_pushliteral( L, "basic"); - equal = !lua_rawequal( L, -1, 1); - lua_pop( L, 1); - if( equal) - { - return luaL_error( L, "unsupported error reporting model"); - } -done: - REGISTRY_SET( L, EXTENDED_STACKTRACE_REGKEY, lua_pushboolean( L, equal)); - return 0; -} - -static int lane_error( lua_State* L) -{ - lua_Debug ar; - int n; - bool_t extended; - - // error message (any type) - STACK_CHECK_ABS( L, 1); // some_error - - // Don't do stack survey for cancelled lanes. - // - if( equal_unique_key( L, 1, CANCEL_ERROR)) - { - return 1; // just pass on - } - - STACK_GROW( L, 3); - REGISTRY_GET( L, EXTENDED_STACKTRACE_REGKEY); // some_error basic|extended - extended = lua_toboolean( L, -1); - lua_pop( L, 1); // some_error - - // Place stack trace at 'registry[lane_error]' for the 'lua_pcall()' - // caller to fetch. This bypasses the Lua 5.1 limitation of only one - // return value from error handler to 'lua_pcall()' caller. - - // It's adequate to push stack trace as a table. This gives the receiver - // of the stack best means to format it to their liking. Also, it allows - // us to add more stack info later, if needed. - // - // table of { "sourcefile.lua:", ... } - // - lua_newtable( L); // some_error {} - - // Best to start from level 1, but in some cases it might be a C function - // and we don't get '.currentline' for that. It's okay - just keep level - // and table index growing separate. --AKa 22-Jan-2009 - // - for( n = 1; lua_getstack( L, n, &ar); ++ n) - { - lua_getinfo( L, extended ? "Sln" : "Sl", &ar); - if( extended) - { - lua_newtable( L); // some_error {} {} - - lua_pushstring( L, ar.source); // some_error {} {} source - lua_setfield( L, -2, "source"); // some_error {} {} - - lua_pushinteger( L, ar.currentline); // some_error {} {} currentline - lua_setfield( L, -2, "currentline"); // some_error {} {} - - lua_pushstring( L, ar.name); // some_error {} {} name - lua_setfield( L, -2, "name"); // some_error {} {} - - lua_pushstring( L, ar.namewhat); // some_error {} {} namewhat - lua_setfield( L, -2, "namewhat"); // some_error {} {} - - lua_pushstring( L, ar.what); // some_error {} {} what - lua_setfield( L, -2, "what"); // some_error {} {} - } - else if( ar.currentline > 0) - { - lua_pushfstring( L, "%s:%d", ar.short_src, ar.currentline); // some_error {} "blah:blah" - } - else - { - lua_pushfstring( L, "%s:?", ar.short_src); // some_error {} "blah" - } - lua_rawseti( L, -2, (lua_Integer) n); // some_error {} - } - - REGISTRY_SET( L, STACKTRACE_REGKEY, lua_insert( L, -2)); // some_error - - STACK_END( L, 1); - return 1; // the untouched error value -} -#endif // ERROR_FULL_STACK - -static void push_stack_trace( lua_State* L, int rc_, int stk_base_) -{ - // Lua 5.1 error handler is limited to one return value; it stored the stack trace in the registry - switch( rc_) - { - case LUA_OK: // no error, body return values are on the stack - break; - - case LUA_ERRRUN: // cancellation or a runtime error -#if ERROR_FULL_STACK // when ERROR_FULL_STACK, we installed a handler - { - STACK_CHECK( L, 0); - // fetch the call stack table from the registry where the handler stored it - STACK_GROW( L, 1); - // yields nil if no stack was generated (in case of cancellation for example) - REGISTRY_GET( L, STACKTRACE_REGKEY); // err trace|nil - STACK_END( L, 1); - - // For cancellation the error message is CANCEL_ERROR, and a stack trace isn't placed - // For other errors, the message can be whatever was thrown, and we should have a stack trace table - ASSERT_L( lua_type( L, 1 + stk_base_) == (equal_unique_key( L, stk_base_, CANCEL_ERROR) ? LUA_TNIL : LUA_TTABLE)); - // Just leaving the stack trace table on the stack is enough to get it through to the master. - break; - } -#endif // fall through if not ERROR_FULL_STACK - - case LUA_ERRMEM: // memory allocation error (handler not called) - case LUA_ERRERR: // error while running the error handler (if any, for example an out-of-memory condition) - default: - // we should have a single value which is either a string (the error message) or CANCEL_ERROR - ASSERT_L( (lua_gettop( L) == stk_base_) && ((lua_type( L, stk_base_) == LUA_TSTRING) || equal_unique_key( L, stk_base_, CANCEL_ERROR))); - break; - } -} - -LUAG_FUNC( set_debug_threadname) -{ - DECLARE_CONST_UNIQUE_KEY( hidden_regkey, LG_set_debug_threadname); - // C s_lane structure is a light userdata upvalue - Lane* s = (Lane*) lua_touserdata( L, lua_upvalueindex( 1)); - luaL_checktype( L, -1, LUA_TSTRING); // "name" - lua_settop( L, 1); - STACK_CHECK_ABS( L, 1); - // store a hidden reference in the registry to make sure the string is kept around even if a lane decides to manually change the "decoda_name" global... - REGISTRY_SET( L, hidden_regkey, lua_pushvalue( L, -2)); - STACK_MID( L, 1); - s->debug_name = lua_tostring( L, -1); - // keep a direct pointer on the string - THREAD_SETNAME( s->debug_name); - // to see VM name in Decoda debugger Virtual Machine window - lua_setglobal( L, "decoda_name"); // - STACK_END( L, 0); - return 0; -} - -LUAG_FUNC( get_debug_threadname) -{ - Lane* const s = lua_toLane( L, 1); - luaL_argcheck( L, lua_gettop( L) == 1, 2, "too many arguments"); - lua_pushstring( L, s->debug_name); - return 1; -} - -LUAG_FUNC( set_thread_priority) -{ - int const prio = (int) luaL_checkinteger( L, 1); - // public Lanes API accepts a generic range -3/+3 - // that will be remapped into the platform-specific scheduler priority scheme - // On some platforms, -3 is equivalent to -2 and +3 to +2 - if( prio < THREAD_PRIO_MIN || prio > THREAD_PRIO_MAX) - { - return luaL_error( L, "priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio); - } - THREAD_SET_PRIORITY( prio); - return 0; -} - -LUAG_FUNC( set_thread_affinity) -{ - lua_Integer affinity = luaL_checkinteger( L, 1); - if( affinity <= 0) - { - return luaL_error( L, "invalid affinity (%d)", affinity); - } - THREAD_SET_AFFINITY( (unsigned int) affinity); - return 0; -} - -#if USE_DEBUG_SPEW() -// can't use direct LUA_x errcode indexing because the sequence is not the same between Lua 5.1 and 5.2 :-( -// LUA_ERRERR doesn't have the same value -struct errcode_name -{ - int code; - char const* name; -}; - -static struct errcode_name s_errcodes[] = -{ - { LUA_OK, "LUA_OK"}, - { LUA_YIELD, "LUA_YIELD"}, - { LUA_ERRRUN, "LUA_ERRRUN"}, - { LUA_ERRSYNTAX, "LUA_ERRSYNTAX"}, - { LUA_ERRMEM, "LUA_ERRMEM"}, - { LUA_ERRGCMM, "LUA_ERRGCMM"}, - { LUA_ERRERR, "LUA_ERRERR"}, -}; -static char const* get_errcode_name( int _code) -{ - int i; - for( i = 0; i < 7; ++ i) - { - if( s_errcodes[i].code == _code) - { - return s_errcodes[i].name; - } - } - return ""; -} -#endif // USE_DEBUG_SPEW() - -#if THREADWAIT_METHOD == THREADWAIT_CONDVAR // implies THREADAPI == THREADAPI_PTHREAD -static void thread_cleanup_handler( void* opaque) -{ - Lane* s= (Lane*) opaque; - MUTEX_LOCK( &s->done_lock); - s->status = CANCELLED; - SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on) - MUTEX_UNLOCK( &s->done_lock); -} -#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR - -static THREAD_RETURN_T THREAD_CALLCONV lane_main( void* vs) -{ - Lane* s = (Lane*) vs; - int rc, rc2; - lua_State* L = s->L; - // Called with the lane function and arguments on the stack - int const nargs = lua_gettop( L) - 1; - DEBUGSPEW_CODE( Universe* U = universe_get( L)); - THREAD_MAKE_ASYNCH_CANCELLABLE(); - THREAD_CLEANUP_PUSH( thread_cleanup_handler, s); - s->status = RUNNING; // PENDING -> RUNNING - - // Tie "set_finalizer()" to the state - lua_pushcfunction( L, LG_set_finalizer); - populate_func_lookup_table( L, -1, "set_finalizer"); - lua_setglobal( L, "set_finalizer"); - - // Tie "set_debug_threadname()" to the state - // But don't register it in the lookup database because of the s_lane pointer upvalue - lua_pushlightuserdata( L, s); - lua_pushcclosure( L, LG_set_debug_threadname, 1); - lua_setglobal( L, "set_debug_threadname"); - - // Tie "cancel_test()" to the state - lua_pushcfunction( L, LG_cancel_test); - populate_func_lookup_table( L, -1, "cancel_test"); - lua_setglobal( L, "cancel_test"); - - // this could be done in lane_new before the lane body function is pushed on the stack to avoid unnecessary stack slot shifting around -#if ERROR_FULL_STACK - // Tie "set_error_reporting()" to the state - lua_pushcfunction( L, LG_set_error_reporting); - populate_func_lookup_table( L, -1, "set_error_reporting"); - lua_setglobal( L, "set_error_reporting"); - - STACK_GROW( L, 1); - lua_pushcfunction( L, lane_error); // func args handler - lua_insert( L, 1); // handler func args -#endif // ERROR_FULL_STACK - - rc = lua_pcall( L, nargs, LUA_MULTRET, ERROR_FULL_STACK); // retvals|err - -#if ERROR_FULL_STACK - lua_remove( L, 1); // retvals|error -# endif // ERROR_FULL_STACK - - // in case of error and if it exists, fetch stack trace from registry and push it - push_stack_trace( L, rc, 1); // retvals|error [trace] - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p body: %s (%s)\n" INDENT_END, L, get_errcode_name( rc), equal_unique_key( L, 1, CANCEL_ERROR) ? "cancelled" : lua_typename( L, lua_type( L, 1)))); - //STACK_DUMP(L); - // Call finalizers, if the script has set them up. - // - rc2 = run_finalizers( L, rc); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p finalizer: %s\n" INDENT_END, L, get_errcode_name( rc2))); - if( rc2 != LUA_OK) // Error within a finalizer! - { - // the finalizer generated an error, and left its own error message [and stack trace] on the stack - rc = rc2; // we're overruling the earlier script error or normal return - } - s->waiting_on = NULL; // just in case - if( selfdestruct_remove( s)) // check and remove (under lock!) - { - // We're a free-running thread and no-one's there to clean us up. - // - lua_close( s->L); - - MUTEX_LOCK( &s->U->selfdestruct_cs); - // done with lua_close(), terminal shutdown sequence may proceed - -- s->U->selfdestructing_count; - MUTEX_UNLOCK( &s->U->selfdestruct_cs); - - lane_cleanup( s); // s is freed at this point - } - else - { - // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them - - enum e_status st = (rc == 0) ? DONE : equal_unique_key( L, 1, CANCEL_ERROR) ? CANCELLED : ERROR_ST; - - // Posix no PTHREAD_TIMEDJOIN: - // 'done_lock' protects the -> DONE|ERROR_ST|CANCELLED state change - // -#if THREADWAIT_METHOD == THREADWAIT_CONDVAR - MUTEX_LOCK( &s->done_lock); - { -#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR - s->status = st; -#if THREADWAIT_METHOD == THREADWAIT_CONDVAR - SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on) - } - MUTEX_UNLOCK( &s->done_lock); -#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR - } - THREAD_CLEANUP_POP( FALSE); - return 0; // ignored -} - -// --- If a client wants to transfer stuff of a given module from the current state to another Lane, the module must be required -// with lanes.require, that will call the regular 'require', then populate the lookup database in the source lane -// module = lanes.require( "modname") -// upvalue[1]: _G.require -LUAG_FUNC( require) -{ - char const* name = lua_tostring( L, 1); - int const nargs = lua_gettop( L); - DEBUGSPEW_CODE( Universe* U = universe_get( L)); - STACK_CHECK( L, 0); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - lua_pushvalue( L, lua_upvalueindex(1)); // "name" require - lua_insert( L, 1); // require "name" - lua_call( L, nargs, 1); // module - populate_func_lookup_table( L, -1, name); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name)); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - STACK_END( L, 0); - return 1; -} - - -// --- If a client wants to transfer stuff of a previously required module from the current state to another Lane, the module must be registered -// to populate the lookup database in the source lane (and in the destination too, of course) -// lanes.register( "modname", module) -LUAG_FUNC( register) -{ - char const* name = luaL_checkstring( L, 1); - int const mod_type = lua_type( L, 2); - // ignore extra parameters, just in case - lua_settop( L, 2); - luaL_argcheck( L, (mod_type == LUA_TTABLE) || (mod_type == LUA_TFUNCTION), 2, "unexpected module type"); - DEBUGSPEW_CODE( Universe* U = universe_get( L)); - STACK_CHECK( L, 0); // "name" mod_table - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.register %s BEGIN\n" INDENT_END, name)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - populate_func_lookup_table( L, -1, name); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.register %s END\n" INDENT_END, name)); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - STACK_END( L, 0); - return 0; -} - -// crc64/we of string "GCCB_KEY" generated at http://www.nitrxgen.net/hashgen/ -static DECLARE_CONST_UNIQUE_KEY( GCCB_KEY, 0xcfb1f046ef074e88); - -//--- -// lane_ud = lane_new( function -// , [libs_str] -// , [priority_int=0] -// , [globals_tbl] -// , [package_tbl] -// , [required_tbl] -// , [gc_cb_func] -// [, ... args ...]) -// -// Upvalues: metatable to use for 'lane_ud' -// -LUAG_FUNC( lane_new) -{ - lua_State* L2; - Lane* s; - Lane** ud; - - char const* libs_str = lua_tostring( L, 2); - bool_t const have_priority = !lua_isnoneornil( L, 3); - int const priority = have_priority ? (int) lua_tointeger( L, 3) : THREAD_PRIO_DEFAULT; - uint_t const globals_idx = lua_isnoneornil( L, 4) ? 0 : 4; - uint_t const package_idx = lua_isnoneornil( L, 5) ? 0 : 5; - uint_t const required_idx = lua_isnoneornil( L, 6) ? 0 : 6; - uint_t const gc_cb_idx = lua_isnoneornil( L, 7) ? 0 : 7; - -#define FIXED_ARGS 7 - int const nargs = lua_gettop(L) - FIXED_ARGS; - Universe* const U = universe_get( L); - ASSERT_L( nargs >= 0); - - // public Lanes API accepts a generic range -3/+3 - // that will be remapped into the platform-specific scheduler priority scheme - // On some platforms, -3 is equivalent to -2 and +3 to +2 - if( have_priority && (priority < THREAD_PRIO_MIN || priority > THREAD_PRIO_MAX)) - { - return luaL_error( L, "Priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, priority); - } - - /* --- Create and prepare the sub state --- */ - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: setup\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - - // populate with selected libraries at the same time - L2 = luaG_newstate( U, L, libs_str); // L // L2 - - STACK_GROW( L2, nargs + 3); // - STACK_CHECK( L2, 0); - - STACK_GROW( L, 3); // func libs priority globals package required gc_cb [... args ...] - STACK_CHECK( L, 0); - - // give a default "Lua" name to the thread to see VM name in Decoda debugger - lua_pushfstring( L2, "Lane #%p", L2); // "..." - lua_setglobal( L2, "decoda_name"); // - ASSERT_L( lua_gettop( L2) == 0); - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: update 'package'\n" INDENT_END)); - // package - if( package_idx != 0) - { - // when copying with mode eLM_LaneBody, should raise an error in case of problem, not leave it one the stack - (void) luaG_inter_copy_package( U, L, L2, package_idx, eLM_LaneBody); - } - - // modules to require in the target lane *before* the function is transfered! - - if( required_idx != 0) - { - int nbRequired = 1; - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require 'required' list\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - // should not happen, was checked in lanes.lua before calling lane_new() - if( lua_type( L, required_idx) != LUA_TTABLE) - { - return luaL_error( L, "expected required module list as a table, got %s", luaL_typename( L, required_idx)); - } - - lua_pushnil( L); // func libs priority globals package required gc_cb [... args ...] nil - while( lua_next( L, required_idx) != 0) // func libs priority globals package required gc_cb [... args ...] n "modname" - { - if( lua_type( L, -1) != LUA_TSTRING || lua_type( L, -2) != LUA_TNUMBER || lua_tonumber( L, -2) != nbRequired) - { - return luaL_error( L, "required module list should be a list of strings"); - } - else - { - // require the module in the target state, and populate the lookup table there too - size_t len; - char const* name = lua_tolstring( L, -1, &len); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require '%s'\n" INDENT_END, name)); - - // require the module in the target lane - lua_getglobal( L2, "require"); // require()? - if( lua_isnil( L2, -1)) - { - lua_pop( L2, 1); // - luaL_error( L, "cannot pre-require modules without loading 'package' library first"); - } - else - { - lua_pushlstring( L2, name, len); // require() name - if( lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode - { - // propagate error to main state if any - luaG_inter_move( U, L2, L, 1, eLM_LaneBody); // func libs priority globals package required gc_cb [... args ...] n "modname" error - return lua_error( L); - } - // after requiring the module, register the functions it exported in our name<->function database - populate_func_lookup_table( L2, -1, name); - lua_pop( L2, 1); // - } - } - lua_pop( L, 1); // func libs priority globals package required gc_cb [... args ...] n - ++ nbRequired; - } // func libs priority globals package required gc_cb [... args ...] - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - } - STACK_MID( L, 0); - STACK_MID( L2, 0); // - - // Appending the specified globals to the global environment - // *after* stdlibs have been loaded and modules required, in case we transfer references to native functions they exposed... - // - if( globals_idx != 0) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer globals\n" INDENT_END)); - if( !lua_istable( L, globals_idx)) - { - return luaL_error( L, "Expected table, got %s", luaL_typename( L, globals_idx)); - } - - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - lua_pushnil( L); // func libs priority globals package required gc_cb [... args ...] nil - // Lua 5.2 wants us to push the globals table on the stack - lua_pushglobaltable( L2); // _G - while( lua_next( L, globals_idx)) // func libs priority globals package required gc_cb [... args ...] k v - { - luaG_inter_copy( U, L, L2, 2, eLM_LaneBody); // _G k v - // assign it in L2's globals table - lua_rawset( L2, -3); // _G - lua_pop( L, 1); // func libs priority globals package required gc_cb [... args ...] k - } // func libs priority globals package required gc_cb [... args ...] - lua_pop( L2, 1); // - - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - } - STACK_MID( L, 0); - STACK_MID( L2, 0); - - // Lane main function - if( lua_type( L, 1) == LUA_TFUNCTION) - { - int res; - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane body\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - lua_pushvalue( L, 1); // func libs priority globals package required gc_cb [... args ...] func - res = luaG_inter_move( U, L, L2, 1, eLM_LaneBody); // func libs priority globals package required gc_cb [... args ...] // func - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - if( res != 0) - { - return luaL_error( L, "tried to copy unsupported types"); - } - } - else if( lua_type( L, 1) == LUA_TSTRING) - { - // compile the string - if( luaL_loadstring( L2, lua_tostring( L, 1)) != 0) // func - { - return luaL_error( L, "error when parsing lane function code"); - } - } - STACK_MID( L, 0); - STACK_MID( L2, 1); - ASSERT_L( lua_isfunction( L2, 1)); - - // revive arguments - if( nargs > 0) - { - int res; - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane arguments\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - res = luaG_inter_move( U, L, L2, nargs, eLM_LaneBody); // func libs priority globals package required gc_cb // func [... args ...] - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - if( res != 0) - { - return luaL_error( L, "tried to copy unsupported types"); - } - } - STACK_END( L, -nargs); - ASSERT_L( lua_gettop( L) == FIXED_ARGS); - STACK_CHECK( L, 0); - STACK_MID( L2, 1 + nargs); - - // 's' is allocated from heap, not Lua, since its life span may surpass the handle's (if free running thread) - // - // a Lane full userdata needs a single uservalue - ud = (Lane**) lua_newuserdatauv( L, sizeof( Lane*), 1); // func libs priority globals package required gc_cb lane - { - AllocatorDefinition* const allocD = &U->internal_allocator; - s = *ud = (Lane*) allocD->allocF(allocD->allocUD, NULL, 0, sizeof(Lane)); - } - if( s == NULL) - { - return luaL_error( L, "could not create lane: out of memory"); - } - - s->L = L2; - s->U = U; - s->status = PENDING; - s->waiting_on = NULL; - s->debug_name = ""; - s->cancel_request = CANCEL_NONE; - -#if THREADWAIT_METHOD == THREADWAIT_CONDVAR - MUTEX_INIT( &s->done_lock); - SIGNAL_INIT( &s->done_signal); -#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR - s->mstatus = NORMAL; - s->selfdestruct_next = NULL; -#if HAVE_LANE_TRACKING() - s->tracking_next = NULL; - if( s->U->tracking_first) - { - tracking_add( s); - } -#endif // HAVE_LANE_TRACKING() - - // Set metatable for the userdata - // - lua_pushvalue( L, lua_upvalueindex( 1)); // func libs priority globals package required gc_cb lane mt - lua_setmetatable( L, -2); // func libs priority globals package required gc_cb lane - STACK_MID( L, 1); - - // Create uservalue for the userdata - // (this is where lane body return values will be stored when the handle is indexed by a numeric key) - lua_newtable( L); // func libs cancelstep priority globals package required gc_cb lane uv - - // Store the gc_cb callback in the uservalue - if( gc_cb_idx > 0) - { - push_unique_key( L, GCCB_KEY); // func libs priority globals package required gc_cb lane uv k - lua_pushvalue( L, gc_cb_idx); // func libs priority globals package required gc_cb lane uv k gc_cb - lua_rawset( L, -3); // func libs priority globals package required gc_cb lane uv - } - - lua_setiuservalue( L, -2, 1); // func libs priority globals package required gc_cb lane - - // Store 's' in the lane's registry, for 'cancel_test()' (we do cancel tests at pending send/receive). - REGISTRY_SET( L2, CANCEL_TEST_KEY, lua_pushlightuserdata( L2, s)); // func [... args ...] - - STACK_END( L, 1); - STACK_END( L2, 1 + nargs); - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END)); - THREAD_CREATE( &s->thread, lane_main, s, priority); - - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - return 1; -} - - -//--- -// = thread_gc( lane_ud ) -// -// Cleanup for a thread userdata. If the thread is still executing, leave it -// alive as a free-running thread (will clean up itself). -// -// * Why NOT cancel/kill a loose thread: -// -// At least timer system uses a free-running thread, they should be handy -// and the issue of canceling/killing threads at gc is not very nice, either -// (would easily cause waits at gc cycle, which we don't want). -// -LUAG_FUNC( thread_gc) -{ - bool_t have_gc_cb = FALSE; - Lane* s = lua_toLane( L, 1); // ud - - // if there a gc callback? - lua_getiuservalue( L, 1, 1); // ud uservalue - push_unique_key( L, GCCB_KEY); // ud uservalue __gc - lua_rawget( L, -2); // ud uservalue gc_cb|nil - if( !lua_isnil( L, -1)) - { - lua_remove( L, -2); // ud gc_cb|nil - lua_pushstring( L, s->debug_name); // ud gc_cb name - have_gc_cb = TRUE; - } - else - { - lua_pop( L, 2); // ud - } - - // We can read 's->status' without locks, but not wait for it - // test KILLED state first, as it doesn't need to enter the selfdestruct chain - if( s->mstatus == KILLED) - { - // Make sure a kill has proceeded, before cleaning up the data structure. - // - // NO lua_close() in this case because we don't know where execution of the state was interrupted - DEBUGSPEW_CODE( fprintf( stderr, "** Joining with a killed thread (needs testing) **")); - // make sure the thread is no longer running, just like thread_join() - if(! THREAD_ISNULL( s->thread)) - { - THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status); - } - if( s->status >= DONE && s->L) - { - // we know the thread was killed while the Lua VM was not doing anything: we should be able to close it without crashing - // now, thread_cancel() will not forcefully kill a lane with s->status >= DONE, so I am not sure it can ever happen - lua_close( s->L); - s->L = 0; - // just in case, but s will be freed soon so... - s->debug_name = ""; - } - DEBUGSPEW_CODE( fprintf( stderr, "** Joined ok **")); - } - else if( s->status < DONE) - { - // still running: will have to be cleaned up later - selfdestruct_add( s); - assert( s->selfdestruct_next); - if( have_gc_cb) - { - lua_pushliteral( L, "selfdestruct"); // ud gc_cb name status - lua_call( L, 2, 0); // ud - } - return 0; - } - else if( s->L) - { - // no longer accessing the Lua VM: we can close right now - lua_close( s->L); - s->L = 0; - // just in case, but s will be freed soon so... - s->debug_name = ""; - } - - // Clean up after a (finished) thread - lane_cleanup( s); - - // do this after lane cleanup in case the callback triggers an error - if( have_gc_cb) - { - lua_pushliteral( L, "closed"); // ud gc_cb name status - lua_call( L, 2, 0); // ud - } - return 0; -} - -//--- -// str= thread_status( lane ) -// -// Returns: "pending" not started yet -// -> "running" started, doing its work.. -// <-> "waiting" blocked in a receive() -// -> "done" finished, results are there -// / "error" finished at an error, error value is there -// / "cancelled" execution cancelled by M (state gone) -// -static char const * thread_status_string( Lane* s) -{ - enum e_status st = s->status; // read just once (volatile) - char const* str = - (s->mstatus == KILLED) ? "killed" : // new to v3.3.0! - (st == PENDING) ? "pending" : - (st == RUNNING) ? "running" : // like in 'co.status()' - (st == WAITING) ? "waiting" : - (st == DONE) ? "done" : - (st == ERROR_ST) ? "error" : - (st == CANCELLED) ? "cancelled" : NULL; - return str; -} - -int push_thread_status( lua_State* L, Lane* s) -{ - char const* const str = thread_status_string( s); - ASSERT_L( str); - - lua_pushstring( L, str); - return 1; -} - - -//--- -// [...] | [nil, err_any, stack_tbl]= thread_join( lane_ud [, wait_secs=-1] ) -// -// timeout: returns nil -// done: returns return values (0..N) -// error: returns nil + error value [+ stack table] -// cancelled: returns nil -// -LUAG_FUNC( thread_join) -{ - Lane* const s = lua_toLane( L, 1); - double wait_secs = luaL_optnumber( L, 2, -1.0); - lua_State* L2 = s->L; - int ret; - bool_t done = THREAD_ISNULL( s->thread) || THREAD_WAIT( &s->thread, wait_secs, &s->done_signal, &s->done_lock, &s->status); - if( !done || !L2) - { - STACK_GROW( L, 2); - lua_pushnil( L); - lua_pushliteral( L, "timeout"); - return 2; - } - - STACK_CHECK( L, 0); - // Thread is DONE/ERROR_ST/CANCELLED; all ours now - - if( s->mstatus == KILLED) // OS thread was killed if thread_cancel was forced - { - // in that case, even if the thread was killed while DONE/ERROR_ST/CANCELLED, ignore regular return values - STACK_GROW( L, 2); - lua_pushnil( L); - lua_pushliteral( L, "killed"); - ret = 2; - } - else - { - Universe* U = universe_get( L); - // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed - // so store it in the userdata uservalue at a key that can't possibly collide - securize_debug_threadname( L, s); - switch( s->status) - { - case DONE: - { - uint_t n = lua_gettop( L2); // whole L2 stack - if( (n > 0) && (luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0)) - { - return luaL_error( L, "tried to copy unsupported types"); - } - ret = n; - } - break; - - case ERROR_ST: - { - int const n = lua_gettop( L2); - STACK_GROW( L, 3); - lua_pushnil( L); - // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ... - if( luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0) // nil "err" [trace] - { - return luaL_error( L, "tried to copy unsupported types: %s", lua_tostring( L, -n)); - } - ret = 1 + n; - } - break; - - case CANCELLED: - ret = 0; - break; - - default: - DEBUGSPEW_CODE( fprintf( stderr, "Status: %d\n", s->status)); - ASSERT_L( FALSE); - ret = 0; - } - lua_close( L2); - } - s->L = 0; - STACK_END( L, ret); - return ret; -} - - -//--- -// thread_index( ud, key) -> value -// -// If key is found in the environment, return it -// If key is numeric, wait until the thread returns and populate the environment with the return values -// If the return values signal an error, propagate it -// If key is "status" return the thread status -// Else raise an error -LUAG_FUNC( thread_index) -{ - int const UD = 1; - int const KEY = 2; - int const USR = 3; - Lane* const s = lua_toLane( L, UD); - ASSERT_L( lua_gettop( L) == 2); - - STACK_GROW( L, 8); // up to 8 positions are needed in case of error propagation - - // If key is numeric, wait until the thread returns and populate the environment with the return values - if( lua_type( L, KEY) == LUA_TNUMBER) - { - // first, check that we don't already have an environment that holds the requested value - { - // If key is found in the uservalue, return it - lua_getiuservalue( L, UD, 1); - lua_pushvalue( L, KEY); - lua_rawget( L, USR); - if( !lua_isnil( L, -1)) - { - return 1; - } - lua_pop( L, 1); - } - { - // check if we already fetched the values from the thread or not - bool_t fetched; - lua_Integer key = lua_tointeger( L, KEY); - lua_pushinteger( L, 0); - lua_rawget( L, USR); - fetched = !lua_isnil( L, -1); - lua_pop( L, 1); // back to our 2 args + uservalue on the stack - if( !fetched) - { - lua_pushinteger( L, 0); - lua_pushboolean( L, 1); - lua_rawset( L, USR); - // wait until thread has completed - lua_pushcfunction( L, LG_thread_join); - lua_pushvalue( L, UD); - lua_call( L, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+ - switch( s->status) - { - default: - if( s->mstatus != KILLED) - { - // this is an internal error, we probably never get here - lua_settop( L, 0); - lua_pushliteral( L, "Unexpected status: "); - lua_pushstring( L, thread_status_string( s)); - lua_concat( L, 2); - lua_error( L); - break; - } - // fall through if we are killed, as we got nil, "killed" on the stack - - case DONE: // got regular return values - { - int i, nvalues = lua_gettop( L) - 3; - for( i = nvalues; i > 0; -- i) - { - // pop the last element of the stack, to store it in the uservalue at its proper index - lua_rawseti( L, USR, i); - } - } - break; - - case ERROR_ST: // got 3 values: nil, errstring, callstack table - // me[-2] could carry the stack table, but even - // me[-1] is rather unnecessary (and undocumented); - // use ':join()' instead. --AKa 22-Jan-2009 - ASSERT_L( lua_isnil( L, 4) && !lua_isnil( L, 5) && lua_istable( L, 6)); - // store errstring at key -1 - lua_pushnumber( L, -1); - lua_pushvalue( L, 5); - lua_rawset( L, USR); - break; - - case CANCELLED: - // do nothing - break; - } - } - lua_settop( L, 3); // UD KEY ENV - if( key != -1) - { - lua_pushnumber( L, -1); // UD KEY ENV -1 - lua_rawget( L, USR); // UD KEY ENV "error" - if( !lua_isnil( L, -1)) // an error was stored - { - // Note: Lua 5.1 interpreter is not prepared to show - // non-string errors, so we use 'tostring()' here - // to get meaningful output. --AKa 22-Jan-2009 - // - // Also, the stack dump we get is no good; it only - // lists our internal Lanes functions. There seems - // to be no way to switch it off, though. - // - // Level 3 should show the line where 'h[x]' was read - // but this only seems to work for string messages - // (Lua 5.1.4). No idea, why. --AKa 22-Jan-2009 - lua_getmetatable( L, UD); // UD KEY ENV "error" mt - lua_getfield( L, -1, "cached_error"); // UD KEY ENV "error" mt error() - lua_getfield( L, -2, "cached_tostring"); // UD KEY ENV "error" mt error() tostring() - lua_pushvalue( L, 4); // UD KEY ENV "error" mt error() tostring() "error" - lua_call( L, 1, 1); // tostring( errstring) -- just in case // UD KEY ENV "error" mt error() "error" - lua_pushinteger( L, 3); // UD KEY ENV "error" mt error() "error" 3 - lua_call( L, 2, 0); // error( tostring( errstring), 3) // UD KEY ENV "error" mt - } - else - { - lua_pop( L, 1); // back to our 3 arguments on the stack - } - } - lua_rawgeti( L, USR, (int)key); - } - return 1; - } - if( lua_type( L, KEY) == LUA_TSTRING) - { - char const * const keystr = lua_tostring( L, KEY); - lua_settop( L, 2); // keep only our original arguments on the stack - if( strcmp( keystr, "status") == 0) - { - return push_thread_status( L, s); // push the string representing the status - } - // return UD.metatable[key] - lua_getmetatable( L, UD); // UD KEY mt - lua_replace( L, -3); // mt KEY - lua_rawget( L, -2); // mt value - // only "cancel" and "join" are registered as functions, any other string will raise an error - if( lua_iscfunction( L, -1)) - { - return 1; - } - return luaL_error( L, "can't index a lane with '%s'", keystr); - } - // unknown key - lua_getmetatable( L, UD); - lua_getfield( L, -1, "cached_error"); - lua_pushliteral( L, "Unknown key: "); - lua_pushvalue( L, KEY); - lua_concat( L, 2); - lua_call( L, 1, 0); // error( "Unknown key: " .. key) -> doesn't return - return 0; -} - -#if HAVE_LANE_TRACKING() -//--- -// threads() -> {}|nil -// -// Return a list of all known lanes -LUAG_FUNC( threads) -{ - int const top = lua_gettop( L); - Universe* U = universe_get( L); - - // List _all_ still running threads - // - MUTEX_LOCK( &U->tracking_cs); - if( U->tracking_first && U->tracking_first != TRACKING_END) - { - Lane* s = U->tracking_first; - int index = 0; - lua_newtable( L); // {} - while( s != TRACKING_END) - { - // insert a { name, status } tuple, so that several lanes with the same name can't clobber each other - lua_newtable( L); // {} {} - lua_pushstring( L, s->debug_name); // {} {} "name" - lua_setfield( L, -2, "name"); // {} {} - push_thread_status( L, s); // {} {} "status" - lua_setfield( L, -2, "status"); // {} {} - lua_rawseti( L, -2, ++ index); // {} - s = s->tracking_next; - } - } - MUTEX_UNLOCK( &U->tracking_cs); - return lua_gettop( L) - top; // 0 or 1 -} -#endif // HAVE_LANE_TRACKING() - -/* - * ############################################################################################### - * ######################################## Timer support ######################################## - * ############################################################################################### - */ - -/* -* secs= now_secs() -* -* Returns the current time, as seconds (millisecond resolution). -*/ -LUAG_FUNC( now_secs ) -{ - lua_pushnumber( L, now_secs() ); - return 1; -} - -/* -* wakeup_at_secs= wakeup_conv( date_tbl ) -*/ -LUAG_FUNC( wakeup_conv ) -{ - int year, month, day, hour, min, sec, isdst; - struct tm t; - memset( &t, 0, sizeof( t)); - // - // .year (four digits) - // .month (1..12) - // .day (1..31) - // .hour (0..23) - // .min (0..59) - // .sec (0..61) - // .yday (day of the year) - // .isdst (daylight saving on/off) - - STACK_CHECK( L, 0); - lua_getfield( L, 1, "year" ); year= (int)lua_tointeger(L,-1); lua_pop(L,1); - lua_getfield( L, 1, "month" ); month= (int)lua_tointeger(L,-1); lua_pop(L,1); - lua_getfield( L, 1, "day" ); day= (int)lua_tointeger(L,-1); lua_pop(L,1); - lua_getfield( L, 1, "hour" ); hour= (int)lua_tointeger(L,-1); lua_pop(L,1); - lua_getfield( L, 1, "min" ); min= (int)lua_tointeger(L,-1); lua_pop(L,1); - lua_getfield( L, 1, "sec" ); sec= (int)lua_tointeger(L,-1); lua_pop(L,1); - - // If Lua table has '.isdst' we trust that. If it does not, we'll let - // 'mktime' decide on whether the time is within DST or not (value -1). - // - lua_getfield( L, 1, "isdst" ); - isdst= lua_isboolean(L,-1) ? lua_toboolean(L,-1) : -1; - lua_pop(L,1); - STACK_END( L, 0); - - t.tm_year= year-1900; - t.tm_mon= month-1; // 0..11 - t.tm_mday= day; // 1..31 - t.tm_hour= hour; // 0..23 - t.tm_min= min; // 0..59 - t.tm_sec= sec; // 0..60 - t.tm_isdst= isdst; // 0/1/negative - - lua_pushnumber( L, (double) mktime( &t)); // ms=0 - return 1; -} - -/* - * ############################################################################################### - * ######################################## Module linkage ####################################### - * ############################################################################################### - */ - -extern int LG_linda( lua_State* L); -static const struct luaL_Reg lanes_functions [] = { - {"linda", LG_linda}, - {"now_secs", LG_now_secs}, - {"wakeup_conv", LG_wakeup_conv}, - {"set_thread_priority", LG_set_thread_priority}, - {"set_thread_affinity", LG_set_thread_affinity}, - {"nameof", luaG_nameof}, - {"register", LG_register}, - {"set_singlethreaded", LG_set_singlethreaded}, - {NULL, NULL} -}; - -/* - * One-time initializations - * settings table it at position 1 on the stack - * pushes an error string on the stack in case of problem - */ -static void init_once_LOCKED( void) -{ -#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) - now_secs(); // initialize 'now_secs()' internal offset -#endif - -#if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU) - chudInitialize(); -#endif - - //--- - // Linux needs SCHED_RR to change thread priorities, and that is only - // allowed for sudo'ers. SCHED_OTHER (default) has no priorities. - // SCHED_OTHER threads are always lower priority than SCHED_RR. - // - // ^-- those apply to 2.6 kernel. IF **wishful thinking** these - // constraints will change in the future, non-sudo priorities can - // be enabled also for Linux. - // -#ifdef PLATFORM_LINUX - sudo = (geteuid() == 0); // we are root? - - // If lower priorities (-2..-1) are wanted, we need to lift the main - // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below - // the launched threads (even -2). - // -#ifdef LINUX_SCHED_RR - if( sudo) - { - struct sched_param sp; - sp.sched_priority = _PRIO_0; - PT_CALL( pthread_setschedparam( pthread_self(), SCHED_RR, &sp)); - } -#endif // LINUX_SCHED_RR -#endif // PLATFORM_LINUX -} - -static volatile long s_initCount = 0; - -// upvalue 1: module name -// upvalue 2: module table -// param 1: settings table -LUAG_FUNC( configure) -{ - Universe* U = universe_get( L); - bool_t const from_master_state = (U == NULL); - char const* name = luaL_checkstring( L, lua_upvalueindex( 1)); - _ASSERT_L( L, lua_type( L, 1) == LUA_TTABLE); - - /* - ** Making one-time initializations. - ** - ** When the host application is single-threaded (and all threading happens via Lanes) - ** there is no problem. But if the host is multithreaded, we need to lock around the - ** initializations. - */ -#if THREADAPI == THREADAPI_WINDOWS - { - static volatile int /*bool*/ go_ahead; // = 0 - if( InterlockedCompareExchange( &s_initCount, 1, 0) == 0) - { - init_once_LOCKED(); - go_ahead = 1; // let others pass - } - else - { - while( !go_ahead) { Sleep(1); } // changes threads - } - } -#else // THREADAPI == THREADAPI_PTHREAD - if( s_initCount == 0) - { - static pthread_mutex_t my_lock = PTHREAD_MUTEX_INITIALIZER; - pthread_mutex_lock( &my_lock); - { - // Recheck now that we're within the lock - // - if( s_initCount == 0) - { - init_once_LOCKED(); - s_initCount = 1; - } - } - pthread_mutex_unlock( &my_lock); - } -#endif // THREADAPI == THREADAPI_PTHREAD - - STACK_GROW( L, 4); - STACK_CHECK_ABS( L, 1); // settings - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); - DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth); - - if( U == NULL) - { - U = universe_create( L); // settings universe - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - lua_newtable( L); // settings universe mt - lua_getfield( L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout - lua_pushcclosure( L, selfdestruct_gc, 1); // settings universe mt selfdestruct_gc - lua_setfield( L, -2, "__gc"); // settings universe mt - lua_setmetatable( L, -2); // settings universe - lua_pop( L, 1); // settings - lua_getfield( L, 1, "verbose_errors"); // settings verbose_errors - U->verboseErrors = lua_toboolean( L, -1); - lua_pop( L, 1); // settings - lua_getfield( L, 1, "demote_full_userdata"); // settings demote_full_userdata - U->demoteFullUserdata = lua_toboolean( L, -1); - lua_pop( L, 1); // settings -#if HAVE_LANE_TRACKING() - MUTEX_INIT( &U->tracking_cs); - lua_getfield( L, 1, "track_lanes"); // settings track_lanes - U->tracking_first = lua_toboolean( L, -1) ? TRACKING_END : NULL; - lua_pop( L, 1); // settings -#endif // HAVE_LANE_TRACKING() - // Linked chains handling - MUTEX_INIT( &U->selfdestruct_cs); - MUTEX_RECURSIVE_INIT( &U->require_cs); - // Locks for 'tools.c' inc/dec counters - MUTEX_INIT( &U->deep_lock); - MUTEX_INIT( &U->mtid_lock); - U->selfdestruct_first = SELFDESTRUCT_END; - initialize_allocator_function( U, L); - initialize_on_state_create( U, L); - init_keepers( U, L); - STACK_MID( L, 1); - - // Initialize 'timer_deep'; a common Linda object shared by all states - lua_pushcfunction( L, LG_linda); // settings lanes.linda - lua_pushliteral( L, "lanes-timer"); // settings lanes.linda "lanes-timer" - lua_call( L, 1, 1); // settings linda - STACK_MID( L, 2); - - // Proxy userdata contents is only a 'DEEP_PRELUDE*' pointer - U->timer_deep = *(DeepPrelude**) lua_touserdata( L, -1); - // increment refcount so that this linda remains alive as long as the universe exists. - ++ U->timer_deep->refcount; - lua_pop( L, 1); // settings - } - STACK_MID( L, 1); - - // Serialize calls to 'require' from now on, also in the primary state - serialize_require( DEBUGSPEW_PARAM_COMMA( U) L); - - // Retrieve main module interface table - lua_pushvalue( L, lua_upvalueindex( 2)); // settings M - // remove configure() (this function) from the module interface - lua_pushnil( L); // settings M nil - lua_setfield( L, -2, "configure"); // settings M - // add functions to the module's table - luaG_registerlibfuncs( L, lanes_functions); -#if HAVE_LANE_TRACKING() - // register core.threads() only if settings say it should be available - if( U->tracking_first != NULL) - { - lua_pushcfunction( L, LG_threads); // settings M LG_threads() - lua_setfield( L, -2, "threads"); // settings M - } -#endif // HAVE_LANE_TRACKING() - STACK_MID( L, 2); - - { - char const* errmsg; - errmsg = push_deep_proxy( U, L, (DeepPrelude*) U->timer_deep, 0, eLM_LaneBody); // settings M timer_deep - if( errmsg != NULL) - { - return luaL_error( L, errmsg); - } - lua_setfield( L, -2, "timer_gateway"); // settings M - } - STACK_MID( L, 2); - - // prepare the metatable for threads - // contains keys: { __gc, __index, cached_error, cached_tostring, cancel, join, get_debug_threadname } - // - if( luaL_newmetatable( L, "Lane")) // settings M mt - { - lua_pushcfunction( L, LG_thread_gc); // settings M mt LG_thread_gc - lua_setfield( L, -2, "__gc"); // settings M mt - lua_pushcfunction( L, LG_thread_index); // settings M mt LG_thread_index - lua_setfield( L, -2, "__index"); // settings M mt - lua_getglobal( L, "error"); // settings M mt error - ASSERT_L( lua_isfunction( L, -1)); - lua_setfield( L, -2, "cached_error"); // settings M mt - lua_getglobal( L, "tostring"); // settings M mt tostring - ASSERT_L( lua_isfunction( L, -1)); - lua_setfield( L, -2, "cached_tostring"); // settings M mt - lua_pushcfunction( L, LG_thread_join); // settings M mt LG_thread_join - lua_setfield( L, -2, "join"); // settings M mt - lua_pushcfunction( L, LG_get_debug_threadname); // settings M mt LG_get_debug_threadname - lua_setfield( L, -2, "get_debug_threadname"); // settings M mt - lua_pushcfunction( L, LG_thread_cancel); // settings M mt LG_thread_cancel - lua_setfield( L, -2, "cancel"); // settings M mt - lua_pushliteral( L, "Lane"); // settings M mt "Lane" - lua_setfield( L, -2, "__metatable"); // settings M mt - } - - lua_pushcclosure( L, LG_lane_new, 1); // settings M lane_new - lua_setfield( L, -2, "lane_new"); // settings M - - // we can't register 'lanes.require' normally because we want to create an upvalued closure - lua_getglobal( L, "require"); // settings M require - lua_pushcclosure( L, LG_require, 1); // settings M lanes.require - lua_setfield( L, -2, "require"); // settings M - - lua_pushfstring( - L, "%d.%d.%d" - , LANES_VERSION_MAJOR, LANES_VERSION_MINOR, LANES_VERSION_PATCH - ); // settings M VERSION - lua_setfield( L, -2, "version"); // settings M - - lua_pushinteger(L, THREAD_PRIO_MAX); // settings M THREAD_PRIO_MAX - lua_setfield( L, -2, "max_prio"); // settings M - - push_unique_key( L, CANCEL_ERROR); // settings M CANCEL_ERROR - lua_setfield( L, -2, "cancel_error"); // settings M - - STACK_MID( L, 2); // reference stack contains only the function argument 'settings' - // we'll need this every time we transfer some C function from/to this state - REGISTRY_SET( L, LOOKUP_REGKEY, lua_newtable( L)); - STACK_MID( L, 2); - - // register all native functions found in that module in the transferable functions database - // we process it before _G because we don't want to find the module when scanning _G (this would generate longer names) - // for example in package.loaded["lanes.core"].* - populate_func_lookup_table( L, -1, name); - STACK_MID( L, 2); - - // record all existing C/JIT-fast functions - // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack - if( from_master_state) - { - // don't do this when called during the initialization of a new lane, - // because we will do it after on_state_create() is called, - // and we don't want to skip _G because of caching in case globals are created then - lua_pushglobaltable( L); // settings M _G - populate_func_lookup_table( L, -1, NULL); - lua_pop( L, 1); // settings M - } - lua_pop( L, 1); // settings - - // set _R[CONFIG_REGKEY] = settings - REGISTRY_SET( L, CONFIG_REGKEY, lua_pushvalue( L, -2)); // -2 because CONFIG_REGKEY is pushed before the value itself - STACK_END( L, 1); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L)); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - // Return the settings table - return 1; -} - -#if defined PLATFORM_WIN32 && !defined NDEBUG -#include -#include - -void signal_handler( int signal) -{ - if( signal == SIGABRT) - { - _cprintf( "caught abnormal termination!"); - abort(); - } -} - -// helper to have correct callstacks when crashing a Win32 running on 64 bits Windows -// don't forget to toggle Debug/Exceptions/Win32 in visual Studio too! -static volatile long s_ecoc_initCount = 0; -static volatile int s_ecoc_go_ahead = 0; -static void EnableCrashingOnCrashes( void) -{ - if( InterlockedCompareExchange( &s_ecoc_initCount, 1, 0) == 0) - { - typedef BOOL (WINAPI* tGetPolicy)( LPDWORD lpFlags); - typedef BOOL (WINAPI* tSetPolicy)( DWORD dwFlags); - const DWORD EXCEPTION_SWALLOWING = 0x1; - - HMODULE kernel32 = LoadLibraryA("kernel32.dll"); - tGetPolicy pGetPolicy = (tGetPolicy)GetProcAddress(kernel32, "GetProcessUserModeExceptionPolicy"); - tSetPolicy pSetPolicy = (tSetPolicy)GetProcAddress(kernel32, "SetProcessUserModeExceptionPolicy"); - if( pGetPolicy && pSetPolicy) - { - DWORD dwFlags; - if( pGetPolicy( &dwFlags)) - { - // Turn off the filter - pSetPolicy( dwFlags & ~EXCEPTION_SWALLOWING); - } - } - //typedef void (* SignalHandlerPointer)( int); - /*SignalHandlerPointer previousHandler =*/ signal( SIGABRT, signal_handler); - - s_ecoc_go_ahead = 1; // let others pass - } - else - { - while( !s_ecoc_go_ahead) { Sleep(1); } // changes threads - } -} -#endif // PLATFORM_WIN32 - -LANES_API int luaopen_lanes_core( lua_State* L) -{ -#if defined PLATFORM_WIN32 && !defined NDEBUG - EnableCrashingOnCrashes(); -#endif // defined PLATFORM_WIN32 && !defined NDEBUG - - STACK_GROW( L, 4); - STACK_CHECK( L, 0); - - // Prevent PUC-Lua/LuaJIT mismatch. Hopefully this works for MoonJIT too - lua_getglobal( L, "jit"); // {jit?} -#if LUAJIT_FLAVOR() == 0 - if (!lua_isnil( L, -1)) - return luaL_error( L, "Lanes is built for PUC-Lua, don't run from LuaJIT"); -#else - if (lua_isnil( L, -1)) - return luaL_error( L, "Lanes is built for LuaJIT, don't run from PUC-Lua"); -#endif - lua_pop( L, 1); // - - // Create main module interface table - // we only have 1 closure, which must be called to configure Lanes - lua_newtable( L); // M - lua_pushvalue( L, 1); // M "lanes.core" - lua_pushvalue( L, -2); // M "lanes.core" M - lua_pushcclosure( L, LG_configure, 2); // M LG_configure() - REGISTRY_GET( L, CONFIG_REGKEY); // M LG_configure() settings - if( !lua_isnil( L, -1)) // this is not the first require "lanes.core": call configure() immediately - { - lua_pushvalue( L, -1); // M LG_configure() settings settings - lua_setfield( L, -4, "settings"); // M LG_configure() settings - lua_call( L, 1, 0); // M - } - else - { - // will do nothing on first invocation, as we haven't stored settings in the registry yet - lua_setfield( L, -3, "settings"); // M LG_configure() - lua_setfield( L, -2, "configure"); // M - } - - STACK_END( L, 1); - return 1; -} - -static int default_luaopen_lanes( lua_State* L) -{ - int rc = luaL_loadfile( L, "lanes.lua") || lua_pcall( L, 0, 1, 0); - if( rc != LUA_OK) - { - return luaL_error( L, "failed to initialize embedded Lanes"); - } - return 1; -} - -// call this instead of luaopen_lanes_core() when embedding Lua and Lanes in a custom application -LANES_API void luaopen_lanes_embedded( lua_State* L, lua_CFunction _luaopen_lanes) -{ - STACK_CHECK( L, 0); - // pre-require lanes.core so that when lanes.lua calls require "lanes.core" it finds it is already loaded - luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); // ... lanes.core - lua_pop( L, 1); // ... - STACK_MID( L, 0); - // call user-provided function that runs the chunk "lanes.lua" from wherever they stored it - luaL_requiref( L, "lanes", _luaopen_lanes ? _luaopen_lanes : default_luaopen_lanes, 0); // ... lanes - STACK_END( L, 1); -} diff --git a/src/lanes.cpp b/src/lanes.cpp new file mode 100644 index 0000000..deee90c --- /dev/null +++ b/src/lanes.cpp @@ -0,0 +1,2142 @@ +/* + * LANES.C Copyright (c) 2007-08, Asko Kauppi + * Copyright (C) 2009-19, Benoit Germain + * + * Multithreading in Lua. + * + * History: + * See CHANGES + * + * Platforms (tested internally): + * OS X (10.5.7 PowerPC/Intel) + * Linux x86 (Ubuntu 8.04) + * Win32 (Windows XP Home SP2, Visual C++ 2005/2008 Express) + * + * Platforms (tested externally): + * Win32 (MSYS) by Ross Berteig. + * + * Platforms (testers appreciated): + * Win64 - should work??? + * Linux x64 - should work + * FreeBSD - should work + * QNX - porting shouldn't be hard + * Sun Solaris - porting shouldn't be hard + * + * References: + * "Porting multithreaded applications from Win32 to Mac OS X": + * + * + * Pthreads: + * + * + * MSDN: + * + * + * + * Defines: + * -DLINUX_SCHED_RR: all threads are lifted to SCHED_RR category, to + * allow negative priorities [-3,-1] be used. Even without this, + * using priorities will require 'sudo' privileges on Linux. + * + * -DUSE_PTHREAD_TIMEDJOIN: use 'pthread_timedjoin_np()' for waiting + * for threads with a timeout. This changes the thread cleanup + * mechanism slightly (cleans up at the join, not once the thread + * has finished). May or may not be a good idea to use it. + * Available only in selected operating systems (Linux). + * + * Bugs: + * + * To-do: + * + * Make waiting threads cancellable. + * ... + */ + +/* +=============================================================================== + +Copyright (C) 2007-10 Asko Kauppi + 2011-19 Benoit Germain + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== +*/ + +#include +#include +#include +#include +#include + +#include "lanes.h" +#include "threading.h" +#include "compat.h" +#include "tools.h" +#include "state.h" +#include "universe.h" +#include "keeper.h" +#include "lanes_private.h" + +#if !(defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)) +# include +#endif + +/* geteuid() */ +#ifdef PLATFORM_LINUX +# include +# include +#endif + +/* Do you want full call stacks, or just the line where the error happened? +* +* TBD: The full stack feature does not seem to work (try 'make error'). +*/ +#define ERROR_FULL_STACK 1 // must be either 0 or 1 as we do some index arithmetics with it! + +// intern the debug name in the specified lua state so that the pointer remains valid when the lane's state is closed +static void securize_debug_threadname( lua_State* L, Lane* s) +{ + STACK_CHECK( L, 0); + STACK_GROW( L, 3); + lua_getiuservalue( L, 1, 1); + lua_newtable( L); + // Lua 5.1 can't do 's->debug_name = lua_pushstring( L, s->debug_name);' + lua_pushstring( L, s->debug_name); + s->debug_name = lua_tostring( L, -1); + lua_rawset( L, -3); + lua_pop( L, 1); + STACK_END( L, 0); +} + +#if ERROR_FULL_STACK +static int lane_error( lua_State* L); +// crc64/we of string "STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ +static DECLARE_CONST_UNIQUE_KEY( STACKTRACE_REGKEY, 0x534af7d3226a429f); +#endif // ERROR_FULL_STACK + +/* +* registry[FINALIZER_REG_KEY] is either nil (no finalizers) or a table +* of functions that Lanes will call after the executing 'pcall' has ended. +* +* We're NOT using the GC system for finalizer mainly because providing the +* error (and maybe stack trace) parameters to the finalizer functions would +* anyways complicate that approach. +*/ +// crc64/we of string "FINALIZER_REGKEY" generated at http://www.nitrxgen.net/hashgen/ +static DECLARE_CONST_UNIQUE_KEY( FINALIZER_REGKEY, 0x188fccb8bf348e09); + +struct s_Linda; + +/* +* Push a table stored in registry onto Lua stack. +* +* If there is no existing table, create one if 'create' is TRUE. +* +* Returns: TRUE if a table was pushed +* FALSE if no table found, not created, and nothing pushed +*/ +static bool_t push_registry_table( lua_State* L, UniqueKey key, bool_t create) +{ + STACK_GROW( L, 3); + STACK_CHECK( L, 0); + + REGISTRY_GET( L, key); // ? + if( lua_isnil( L, -1)) // nil? + { + lua_pop( L, 1); // + + if( !create) + { + return FALSE; + } + + lua_newtable( L); // t + REGISTRY_SET( L, key, lua_pushvalue( L, -2)); + } + STACK_END( L, 1); + return TRUE; // table pushed +} + +#if HAVE_LANE_TRACKING() + +// The chain is ended by '(Lane*)(-1)', not NULL: +// 'tracking_first -> ... -> ... -> (-1)' +#define TRACKING_END ((Lane *)(-1)) + +/* + * Add the lane to tracking chain; the ones still running at the end of the + * whole process will be cancelled. + */ +static void tracking_add( Lane* s) +{ + + MUTEX_LOCK( &s->U->tracking_cs); + { + assert( s->tracking_next == NULL); + + s->tracking_next = s->U->tracking_first; + s->U->tracking_first = s; + } + MUTEX_UNLOCK( &s->U->tracking_cs); +} + +/* + * A free-running lane has ended; remove it from tracking chain + */ +static bool_t tracking_remove( Lane* s) +{ + bool_t found = FALSE; + MUTEX_LOCK( &s->U->tracking_cs); + { + // Make sure (within the MUTEX) that we actually are in the chain + // still (at process exit they will remove us from chain and then + // cancel/kill). + // + if( s->tracking_next != NULL) + { + Lane** ref = (Lane**) &s->U->tracking_first; + + while( *ref != TRACKING_END) + { + if( *ref == s) + { + *ref = s->tracking_next; + s->tracking_next = NULL; + found = TRUE; + break; + } + ref = (Lane**) &((*ref)->tracking_next); + } + assert( found); + } + } + MUTEX_UNLOCK( &s->U->tracking_cs); + return found; +} + +#endif // HAVE_LANE_TRACKING() + +//--- +// low-level cleanup + +static void lane_cleanup( Lane* s) +{ + // Clean up after a (finished) thread + // +#if THREADWAIT_METHOD == THREADWAIT_CONDVAR + SIGNAL_FREE( &s->done_signal); + MUTEX_FREE( &s->done_lock); +#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR + +#if HAVE_LANE_TRACKING() + if( s->U->tracking_first != NULL) + { + // Lane was cleaned up, no need to handle at process termination + tracking_remove( s); + } +#endif // HAVE_LANE_TRACKING() + + { + AllocatorDefinition* const allocD = &s->U->internal_allocator; + (void) allocD->allocF(allocD->allocUD, s, sizeof(Lane), 0); + } +} + +/* + * ############################################################################################### + * ########################################## Finalizer ########################################## + * ############################################################################################### + */ + +//--- +// void= finalizer( finalizer_func ) +// +// finalizer_func( [err, stack_tbl] ) +// +// Add a function that will be called when exiting the lane, either via +// normal return or an error. +// +LUAG_FUNC( set_finalizer) +{ + luaL_argcheck( L, lua_isfunction( L, 1), 1, "finalizer should be a function"); + luaL_argcheck( L, lua_gettop( L) == 1, 1, "too many arguments"); + // Get the current finalizer table (if any) + push_registry_table( L, FINALIZER_REGKEY, TRUE /*do create if none*/); // finalizer {finalisers} + STACK_GROW( L, 2); + lua_pushinteger( L, lua_rawlen( L, -1) + 1); // finalizer {finalisers} idx + lua_pushvalue( L, 1); // finalizer {finalisers} idx finalizer + lua_rawset( L, -3); // finalizer {finalisers} + lua_pop( L, 2); // + return 0; +} + + +//--- +// Run finalizers - if any - with the given parameters +// +// If 'rc' is nonzero, error message and stack index (the latter only when ERROR_FULL_STACK == 1) are available as: +// [-1]: stack trace (table) +// [-2]: error message (any type) +// +// Returns: +// 0 if finalizers were run without error (or there were none) +// LUA_ERRxxx return code if any of the finalizers failed +// +// TBD: should we add stack trace on failing finalizer, wouldn't be hard.. +// +static void push_stack_trace( lua_State* L, int rc_, int stk_base_); + +static int run_finalizers( lua_State* L, int lua_rc) +{ + int finalizers_index; + int n; + int err_handler_index = 0; + int rc = LUA_OK; // ... + if( !push_registry_table( L, FINALIZER_REGKEY, FALSE)) // ... finalizers? + { + return 0; // no finalizers + } + + STACK_GROW( L, 5); + + finalizers_index = lua_gettop( L); + +#if ERROR_FULL_STACK + lua_pushcfunction( L, lane_error); // ... finalizers lane_error + err_handler_index = lua_gettop( L); +#endif // ERROR_FULL_STACK + + for( n = (int) lua_rawlen( L, finalizers_index); n > 0; -- n) + { + int args = 0; + lua_pushinteger( L, n); // ... finalizers lane_error n + lua_rawget( L, finalizers_index); // ... finalizers lane_error finalizer + ASSERT_L( lua_isfunction( L, -1)); + if( lua_rc != LUA_OK) // we have an error message and an optional stack trace at the bottom of the stack + { + ASSERT_L( finalizers_index == 2 || finalizers_index == 3); + //char const* err_msg = lua_tostring( L, 1); + lua_pushvalue( L, 1); // ... finalizers lane_error finalizer err_msg + // note we don't always have a stack trace for example when CANCEL_ERROR, or when we got an error that doesn't call our handler, such as LUA_ERRMEM + if( finalizers_index == 3) + { + lua_pushvalue( L, 2); // ... finalizers lane_error finalizer err_msg stack_trace + } + args = finalizers_index - 1; + } + + // if no error from the main body, finalizer doesn't receive any argument, else it gets the error message and optional stack trace + rc = lua_pcall( L, args, 0, err_handler_index); // ... finalizers lane_error err_msg2? + if( rc != LUA_OK) + { + push_stack_trace( L, rc, lua_gettop( L)); + // If one finalizer fails, don't run the others. Return this + // as the 'real' error, replacing what we could have had (or not) + // from the actual code. + break; + } + // no error, proceed to next finalizer // ... finalizers lane_error + } + + if( rc != LUA_OK) + { + // ERROR_FULL_STACK accounts for the presence of lane_error on the stack + int nb_err_slots = lua_gettop( L) - finalizers_index - ERROR_FULL_STACK; + // a finalizer generated an error, this is what we leave of the stack + for( n = nb_err_slots; n > 0; -- n) + { + lua_replace( L, n); + } + // leave on the stack only the error and optional stack trace produced by the error in the finalizer + lua_settop( L, nb_err_slots); + } + else // no error from the finalizers, make sure only the original return values from the lane body remain on the stack + { + lua_settop( L, finalizers_index - 1); + } + + return rc; +} + +/* + * ############################################################################################### + * ########################################### Threads ########################################### + * ############################################################################################### + */ + +// +// Protects modifying the selfdestruct chain + +#define SELFDESTRUCT_END ((Lane*)(-1)) +// +// The chain is ended by '(Lane*)(-1)', not NULL: +// 'selfdestruct_first -> ... -> ... -> (-1)' + +/* + * Add the lane to selfdestruct chain; the ones still running at the end of the + * whole process will be cancelled. + */ +static void selfdestruct_add( Lane* s) +{ + MUTEX_LOCK( &s->U->selfdestruct_cs); + assert( s->selfdestruct_next == NULL); + + s->selfdestruct_next = s->U->selfdestruct_first; + s->U->selfdestruct_first= s; + MUTEX_UNLOCK( &s->U->selfdestruct_cs); +} + +/* + * A free-running lane has ended; remove it from selfdestruct chain + */ +static bool_t selfdestruct_remove( Lane* s) +{ + bool_t found = FALSE; + MUTEX_LOCK( &s->U->selfdestruct_cs); + { + // Make sure (within the MUTEX) that we actually are in the chain + // still (at process exit they will remove us from chain and then + // cancel/kill). + // + if( s->selfdestruct_next != NULL) + { + Lane** ref = (Lane**) &s->U->selfdestruct_first; + + while( *ref != SELFDESTRUCT_END ) + { + if( *ref == s) + { + *ref = s->selfdestruct_next; + s->selfdestruct_next = NULL; + // the terminal shutdown should wait until the lane is done with its lua_close() + ++ s->U->selfdestructing_count; + found = TRUE; + break; + } + ref = (Lane**) &((*ref)->selfdestruct_next); + } + assert( found); + } + } + MUTEX_UNLOCK( &s->U->selfdestruct_cs); + return found; +} + +/* +* Process end; cancel any still free-running threads +*/ +static int selfdestruct_gc( lua_State* L) +{ + Universe* U = (Universe*) lua_touserdata( L, 1); + + while( U->selfdestruct_first != SELFDESTRUCT_END) // true at most once! + { + // Signal _all_ still running threads to exit (including the timer thread) + // + MUTEX_LOCK( &U->selfdestruct_cs); + { + Lane* s = U->selfdestruct_first; + while( s != SELFDESTRUCT_END) + { + // attempt a regular unforced hard cancel with a small timeout + bool_t cancelled = THREAD_ISNULL( s->thread) || thread_cancel( L, s, CO_Hard, 0.0001, FALSE, 0.0); + // if we failed, and we know the thread is waiting on a linda + if( cancelled == FALSE && s->status == WAITING && s->waiting_on != NULL) + { + // signal the linda to wake up the thread so that it can react to the cancel query + // let us hope we never land here with a pointer on a linda that has been destroyed... + SIGNAL_T* waiting_on = s->waiting_on; + //s->waiting_on = NULL; // useful, or not? + SIGNAL_ALL( waiting_on); + } + s = s->selfdestruct_next; + } + } + MUTEX_UNLOCK( &U->selfdestruct_cs); + + // When noticing their cancel, the lanes will remove themselves from + // the selfdestruct chain. + + // TBD: Not sure if Windows (multi core) will require the timed approach, + // or single Yield. I don't have machine to test that (so leaving + // for timed approach). -- AKa 25-Oct-2008 + + // OS X 10.5 (Intel) needs more to avoid segfaults. + // + // "make test" is okay. 100's of "make require" are okay. + // + // Tested on MacBook Core Duo 2GHz and 10.5.5: + // -- AKa 25-Oct-2008 + // + { + lua_Number const shutdown_timeout = lua_tonumber( L, lua_upvalueindex( 1)); + double const t_until = now_secs() + shutdown_timeout; + + while( U->selfdestruct_first != SELFDESTRUCT_END) + { + YIELD(); // give threads time to act on their cancel + { + // count the number of cancelled thread that didn't have the time to act yet + int n = 0; + double t_now = 0.0; + MUTEX_LOCK( &U->selfdestruct_cs); + { + Lane* s = U->selfdestruct_first; + while( s != SELFDESTRUCT_END) + { + if( s->cancel_request == CANCEL_HARD) + ++ n; + s = s->selfdestruct_next; + } + } + MUTEX_UNLOCK( &U->selfdestruct_cs); + // if timeout elapsed, or we know all threads have acted, stop waiting + t_now = now_secs(); + if( n == 0 || (t_now >= t_until)) + { + DEBUGSPEW_CODE( fprintf( stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now))); + break; + } + } + } + } + + // If some lanes are currently cleaning after themselves, wait until they are done. + // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). + while( U->selfdestructing_count > 0) + { + YIELD(); + } + + //--- + // Kill the still free running threads + // + if( U->selfdestruct_first != SELFDESTRUCT_END) + { + unsigned int n = 0; + // first thing we did was to raise the linda signals the threads were waiting on (if any) + // therefore, any well-behaved thread should be in CANCELLED state + // these are not running, and the state can be closed + MUTEX_LOCK( &U->selfdestruct_cs); + { + Lane* s = U->selfdestruct_first; + while( s != SELFDESTRUCT_END) + { + Lane* next_s = s->selfdestruct_next; + s->selfdestruct_next = NULL; // detach from selfdestruct chain + if( !THREAD_ISNULL( s->thread)) // can be NULL if previous 'soft' termination succeeded + { + THREAD_KILL( &s->thread); +#if THREADAPI == THREADAPI_PTHREAD + // pthread: make sure the thread is really stopped! + THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status); +#endif // THREADAPI == THREADAPI_PTHREAD + } + // NO lua_close() in this case because we don't know where execution of the state was interrupted + lane_cleanup( s); + s = next_s; + ++ n; + } + U->selfdestruct_first = SELFDESTRUCT_END; + } + MUTEX_UNLOCK( &U->selfdestruct_cs); + + DEBUGSPEW_CODE( fprintf( stderr, "Killed %d lane(s) at process end.\n", n)); + } + } + + // If some lanes are currently cleaning after themselves, wait until they are done. + // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). + while( U->selfdestructing_count > 0) + { + YIELD(); + } + + // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 + lua_settop( L, 0); + // no need to mutex-protect this as all threads in the universe are gone at that point + if( U->timer_deep != NULL) // test ins case some early internal error prevented Lanes from creating the deep timer + { + -- U->timer_deep->refcount; // should be 0 now + free_deep_prelude( L, (DeepPrelude*) U->timer_deep); + U->timer_deep = NULL; + } + + close_keepers( U); + + // remove the protected allocator, if any + cleanup_allocator_function( U, L); + +#if HAVE_LANE_TRACKING() + MUTEX_FREE( &U->tracking_cs); +#endif // HAVE_LANE_TRACKING() + // Linked chains handling + MUTEX_FREE( &U->selfdestruct_cs); + MUTEX_FREE( &U->require_cs); + // Locks for 'tools.c' inc/dec counters + MUTEX_FREE( &U->deep_lock); + MUTEX_FREE( &U->mtid_lock); + // universe is no longer available (nor necessary) + // we need to do this in case some deep userdata objects were created before Lanes was initialized, + // as potentially they will be garbage collected after Lanes at application shutdown + universe_store( L, NULL); + return 0; +} + + +//--- +// = _single( [cores_uint=1] ) +// +// Limits the process to use only 'cores' CPU cores. To be used for performance +// testing on multicore devices. DEBUGGING ONLY! +// +LUAG_FUNC( set_singlethreaded) +{ + uint_t cores = luaG_optunsigned( L, 1, 1); + (void) cores; // prevent "unused" warning + +#ifdef PLATFORM_OSX +#ifdef _UTILBINDTHREADTOCPU + if( cores > 1) + { + return luaL_error( L, "Limiting to N>1 cores not possible"); + } + // requires 'chudInitialize()' + utilBindThreadToCPU(0); // # of CPU to run on (we cannot limit to 2..N CPUs?) + return 0; +#else + return luaL_error( L, "Not available: compile with _UTILBINDTHREADTOCPU"); +#endif +#else + return luaL_error( L, "not implemented"); +#endif +} + + +/* +* str= lane_error( error_val|str ) +* +* Called if there's an error in some lane; add call stack to error message +* just like 'lua.c' normally does. +* +* ".. will be called with the error message and its return value will be the +* message returned on the stack by lua_pcall." +* +* Note: Rather than modifying the error message itself, it would be better +* to provide the call stack (as string) completely separated. This would +* work great with non-string error values as well (current system does not). +* (This is NOT possible with the Lua 5.1 'lua_pcall()'; we could of course +* implement a Lanes-specific 'pcall' of our own that does this). TBD!!! :) +* --AKa 22-Jan-2009 +*/ +#if ERROR_FULL_STACK + +// crc64/we of string "EXTENDED_STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ +static DECLARE_CONST_UNIQUE_KEY( EXTENDED_STACKTRACE_REGKEY, 0x2357c69a7c92c936); // used as registry key + +LUAG_FUNC( set_error_reporting) +{ + bool_t equal; + luaL_checktype( L, 1, LUA_TSTRING); + lua_pushliteral( L, "extended"); + equal = lua_rawequal( L, -1, 1); + lua_pop( L, 1); + if( equal) + { + goto done; + } + lua_pushliteral( L, "basic"); + equal = !lua_rawequal( L, -1, 1); + lua_pop( L, 1); + if( equal) + { + return luaL_error( L, "unsupported error reporting model"); + } +done: + REGISTRY_SET( L, EXTENDED_STACKTRACE_REGKEY, lua_pushboolean( L, equal)); + return 0; +} + +static int lane_error( lua_State* L) +{ + lua_Debug ar; + int n; + bool_t extended; + + // error message (any type) + STACK_CHECK_ABS( L, 1); // some_error + + // Don't do stack survey for cancelled lanes. + // + if( equal_unique_key( L, 1, CANCEL_ERROR)) + { + return 1; // just pass on + } + + STACK_GROW( L, 3); + REGISTRY_GET( L, EXTENDED_STACKTRACE_REGKEY); // some_error basic|extended + extended = lua_toboolean( L, -1); + lua_pop( L, 1); // some_error + + // Place stack trace at 'registry[lane_error]' for the 'lua_pcall()' + // caller to fetch. This bypasses the Lua 5.1 limitation of only one + // return value from error handler to 'lua_pcall()' caller. + + // It's adequate to push stack trace as a table. This gives the receiver + // of the stack best means to format it to their liking. Also, it allows + // us to add more stack info later, if needed. + // + // table of { "sourcefile.lua:", ... } + // + lua_newtable( L); // some_error {} + + // Best to start from level 1, but in some cases it might be a C function + // and we don't get '.currentline' for that. It's okay - just keep level + // and table index growing separate. --AKa 22-Jan-2009 + // + for( n = 1; lua_getstack( L, n, &ar); ++ n) + { + lua_getinfo( L, extended ? "Sln" : "Sl", &ar); + if( extended) + { + lua_newtable( L); // some_error {} {} + + lua_pushstring( L, ar.source); // some_error {} {} source + lua_setfield( L, -2, "source"); // some_error {} {} + + lua_pushinteger( L, ar.currentline); // some_error {} {} currentline + lua_setfield( L, -2, "currentline"); // some_error {} {} + + lua_pushstring( L, ar.name); // some_error {} {} name + lua_setfield( L, -2, "name"); // some_error {} {} + + lua_pushstring( L, ar.namewhat); // some_error {} {} namewhat + lua_setfield( L, -2, "namewhat"); // some_error {} {} + + lua_pushstring( L, ar.what); // some_error {} {} what + lua_setfield( L, -2, "what"); // some_error {} {} + } + else if( ar.currentline > 0) + { + lua_pushfstring( L, "%s:%d", ar.short_src, ar.currentline); // some_error {} "blah:blah" + } + else + { + lua_pushfstring( L, "%s:?", ar.short_src); // some_error {} "blah" + } + lua_rawseti( L, -2, (lua_Integer) n); // some_error {} + } + + REGISTRY_SET( L, STACKTRACE_REGKEY, lua_insert( L, -2)); // some_error + + STACK_END( L, 1); + return 1; // the untouched error value +} +#endif // ERROR_FULL_STACK + +static void push_stack_trace( lua_State* L, int rc_, int stk_base_) +{ + // Lua 5.1 error handler is limited to one return value; it stored the stack trace in the registry + switch( rc_) + { + case LUA_OK: // no error, body return values are on the stack + break; + + case LUA_ERRRUN: // cancellation or a runtime error +#if ERROR_FULL_STACK // when ERROR_FULL_STACK, we installed a handler + { + STACK_CHECK( L, 0); + // fetch the call stack table from the registry where the handler stored it + STACK_GROW( L, 1); + // yields nil if no stack was generated (in case of cancellation for example) + REGISTRY_GET( L, STACKTRACE_REGKEY); // err trace|nil + STACK_END( L, 1); + + // For cancellation the error message is CANCEL_ERROR, and a stack trace isn't placed + // For other errors, the message can be whatever was thrown, and we should have a stack trace table + ASSERT_L( lua_type( L, 1 + stk_base_) == (equal_unique_key( L, stk_base_, CANCEL_ERROR) ? LUA_TNIL : LUA_TTABLE)); + // Just leaving the stack trace table on the stack is enough to get it through to the master. + break; + } +#endif // fall through if not ERROR_FULL_STACK + + case LUA_ERRMEM: // memory allocation error (handler not called) + case LUA_ERRERR: // error while running the error handler (if any, for example an out-of-memory condition) + default: + // we should have a single value which is either a string (the error message) or CANCEL_ERROR + ASSERT_L( (lua_gettop( L) == stk_base_) && ((lua_type( L, stk_base_) == LUA_TSTRING) || equal_unique_key( L, stk_base_, CANCEL_ERROR))); + break; + } +} + +LUAG_FUNC( set_debug_threadname) +{ + DECLARE_CONST_UNIQUE_KEY( hidden_regkey, LG_set_debug_threadname); + // C s_lane structure is a light userdata upvalue + Lane* s = (Lane*) lua_touserdata( L, lua_upvalueindex( 1)); + luaL_checktype( L, -1, LUA_TSTRING); // "name" + lua_settop( L, 1); + STACK_CHECK_ABS( L, 1); + // store a hidden reference in the registry to make sure the string is kept around even if a lane decides to manually change the "decoda_name" global... + REGISTRY_SET( L, hidden_regkey, lua_pushvalue( L, -2)); + STACK_MID( L, 1); + s->debug_name = lua_tostring( L, -1); + // keep a direct pointer on the string + THREAD_SETNAME( s->debug_name); + // to see VM name in Decoda debugger Virtual Machine window + lua_setglobal( L, "decoda_name"); // + STACK_END( L, 0); + return 0; +} + +LUAG_FUNC( get_debug_threadname) +{ + Lane* const s = lua_toLane( L, 1); + luaL_argcheck( L, lua_gettop( L) == 1, 2, "too many arguments"); + lua_pushstring( L, s->debug_name); + return 1; +} + +LUAG_FUNC( set_thread_priority) +{ + int const prio = (int) luaL_checkinteger( L, 1); + // public Lanes API accepts a generic range -3/+3 + // that will be remapped into the platform-specific scheduler priority scheme + // On some platforms, -3 is equivalent to -2 and +3 to +2 + if( prio < THREAD_PRIO_MIN || prio > THREAD_PRIO_MAX) + { + return luaL_error( L, "priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio); + } + THREAD_SET_PRIORITY( prio); + return 0; +} + +LUAG_FUNC( set_thread_affinity) +{ + lua_Integer affinity = luaL_checkinteger( L, 1); + if( affinity <= 0) + { + return luaL_error( L, "invalid affinity (%d)", affinity); + } + THREAD_SET_AFFINITY( (unsigned int) affinity); + return 0; +} + +#if USE_DEBUG_SPEW() +// can't use direct LUA_x errcode indexing because the sequence is not the same between Lua 5.1 and 5.2 :-( +// LUA_ERRERR doesn't have the same value +struct errcode_name +{ + int code; + char const* name; +}; + +static struct errcode_name s_errcodes[] = +{ + { LUA_OK, "LUA_OK"}, + { LUA_YIELD, "LUA_YIELD"}, + { LUA_ERRRUN, "LUA_ERRRUN"}, + { LUA_ERRSYNTAX, "LUA_ERRSYNTAX"}, + { LUA_ERRMEM, "LUA_ERRMEM"}, + { LUA_ERRGCMM, "LUA_ERRGCMM"}, + { LUA_ERRERR, "LUA_ERRERR"}, +}; +static char const* get_errcode_name( int _code) +{ + int i; + for( i = 0; i < 7; ++ i) + { + if( s_errcodes[i].code == _code) + { + return s_errcodes[i].name; + } + } + return ""; +} +#endif // USE_DEBUG_SPEW() + +#if THREADWAIT_METHOD == THREADWAIT_CONDVAR // implies THREADAPI == THREADAPI_PTHREAD +static void thread_cleanup_handler( void* opaque) +{ + Lane* s= (Lane*) opaque; + MUTEX_LOCK( &s->done_lock); + s->status = CANCELLED; + SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on) + MUTEX_UNLOCK( &s->done_lock); +} +#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR + +static THREAD_RETURN_T THREAD_CALLCONV lane_main( void* vs) +{ + Lane* s = (Lane*) vs; + int rc, rc2; + lua_State* L = s->L; + // Called with the lane function and arguments on the stack + int const nargs = lua_gettop( L) - 1; + DEBUGSPEW_CODE( Universe* U = universe_get( L)); + THREAD_MAKE_ASYNCH_CANCELLABLE(); + THREAD_CLEANUP_PUSH( thread_cleanup_handler, s); + s->status = RUNNING; // PENDING -> RUNNING + + // Tie "set_finalizer()" to the state + lua_pushcfunction( L, LG_set_finalizer); + populate_func_lookup_table( L, -1, "set_finalizer"); + lua_setglobal( L, "set_finalizer"); + + // Tie "set_debug_threadname()" to the state + // But don't register it in the lookup database because of the s_lane pointer upvalue + lua_pushlightuserdata( L, s); + lua_pushcclosure( L, LG_set_debug_threadname, 1); + lua_setglobal( L, "set_debug_threadname"); + + // Tie "cancel_test()" to the state + lua_pushcfunction( L, LG_cancel_test); + populate_func_lookup_table( L, -1, "cancel_test"); + lua_setglobal( L, "cancel_test"); + + // this could be done in lane_new before the lane body function is pushed on the stack to avoid unnecessary stack slot shifting around +#if ERROR_FULL_STACK + // Tie "set_error_reporting()" to the state + lua_pushcfunction( L, LG_set_error_reporting); + populate_func_lookup_table( L, -1, "set_error_reporting"); + lua_setglobal( L, "set_error_reporting"); + + STACK_GROW( L, 1); + lua_pushcfunction( L, lane_error); // func args handler + lua_insert( L, 1); // handler func args +#endif // ERROR_FULL_STACK + + rc = lua_pcall( L, nargs, LUA_MULTRET, ERROR_FULL_STACK); // retvals|err + +#if ERROR_FULL_STACK + lua_remove( L, 1); // retvals|error +# endif // ERROR_FULL_STACK + + // in case of error and if it exists, fetch stack trace from registry and push it + push_stack_trace( L, rc, 1); // retvals|error [trace] + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p body: %s (%s)\n" INDENT_END, L, get_errcode_name( rc), equal_unique_key( L, 1, CANCEL_ERROR) ? "cancelled" : lua_typename( L, lua_type( L, 1)))); + //STACK_DUMP(L); + // Call finalizers, if the script has set them up. + // + rc2 = run_finalizers( L, rc); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p finalizer: %s\n" INDENT_END, L, get_errcode_name( rc2))); + if( rc2 != LUA_OK) // Error within a finalizer! + { + // the finalizer generated an error, and left its own error message [and stack trace] on the stack + rc = rc2; // we're overruling the earlier script error or normal return + } + s->waiting_on = NULL; // just in case + if( selfdestruct_remove( s)) // check and remove (under lock!) + { + // We're a free-running thread and no-one's there to clean us up. + // + lua_close( s->L); + + MUTEX_LOCK( &s->U->selfdestruct_cs); + // done with lua_close(), terminal shutdown sequence may proceed + -- s->U->selfdestructing_count; + MUTEX_UNLOCK( &s->U->selfdestruct_cs); + + lane_cleanup( s); // s is freed at this point + } + else + { + // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them + + enum e_status st = (rc == 0) ? DONE : equal_unique_key( L, 1, CANCEL_ERROR) ? CANCELLED : ERROR_ST; + + // Posix no PTHREAD_TIMEDJOIN: + // 'done_lock' protects the -> DONE|ERROR_ST|CANCELLED state change + // +#if THREADWAIT_METHOD == THREADWAIT_CONDVAR + MUTEX_LOCK( &s->done_lock); + { +#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR + s->status = st; +#if THREADWAIT_METHOD == THREADWAIT_CONDVAR + SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on) + } + MUTEX_UNLOCK( &s->done_lock); +#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR + } + THREAD_CLEANUP_POP( FALSE); + return 0; // ignored +} + +// --- If a client wants to transfer stuff of a given module from the current state to another Lane, the module must be required +// with lanes.require, that will call the regular 'require', then populate the lookup database in the source lane +// module = lanes.require( "modname") +// upvalue[1]: _G.require +LUAG_FUNC( require) +{ + char const* name = lua_tostring( L, 1); + int const nargs = lua_gettop( L); + DEBUGSPEW_CODE( Universe* U = universe_get( L)); + STACK_CHECK( L, 0); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + lua_pushvalue( L, lua_upvalueindex(1)); // "name" require + lua_insert( L, 1); // require "name" + lua_call( L, nargs, 1); // module + populate_func_lookup_table( L, -1, name); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name)); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + STACK_END( L, 0); + return 1; +} + + +// --- If a client wants to transfer stuff of a previously required module from the current state to another Lane, the module must be registered +// to populate the lookup database in the source lane (and in the destination too, of course) +// lanes.register( "modname", module) +LUAG_FUNC( register) +{ + char const* name = luaL_checkstring( L, 1); + int const mod_type = lua_type( L, 2); + // ignore extra parameters, just in case + lua_settop( L, 2); + luaL_argcheck( L, (mod_type == LUA_TTABLE) || (mod_type == LUA_TFUNCTION), 2, "unexpected module type"); + DEBUGSPEW_CODE( Universe* U = universe_get( L)); + STACK_CHECK( L, 0); // "name" mod_table + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.register %s BEGIN\n" INDENT_END, name)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + populate_func_lookup_table( L, -1, name); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.register %s END\n" INDENT_END, name)); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + STACK_END( L, 0); + return 0; +} + +// crc64/we of string "GCCB_KEY" generated at http://www.nitrxgen.net/hashgen/ +static DECLARE_CONST_UNIQUE_KEY( GCCB_KEY, 0xcfb1f046ef074e88); + +//--- +// lane_ud = lane_new( function +// , [libs_str] +// , [priority_int=0] +// , [globals_tbl] +// , [package_tbl] +// , [required_tbl] +// , [gc_cb_func] +// [, ... args ...]) +// +// Upvalues: metatable to use for 'lane_ud' +// +LUAG_FUNC( lane_new) +{ + lua_State* L2; + Lane* s; + Lane** ud; + + char const* libs_str = lua_tostring( L, 2); + bool_t const have_priority = !lua_isnoneornil( L, 3); + int const priority = have_priority ? (int) lua_tointeger( L, 3) : THREAD_PRIO_DEFAULT; + uint_t const globals_idx = lua_isnoneornil( L, 4) ? 0 : 4; + uint_t const package_idx = lua_isnoneornil( L, 5) ? 0 : 5; + uint_t const required_idx = lua_isnoneornil( L, 6) ? 0 : 6; + uint_t const gc_cb_idx = lua_isnoneornil( L, 7) ? 0 : 7; + +#define FIXED_ARGS 7 + int const nargs = lua_gettop(L) - FIXED_ARGS; + Universe* const U = universe_get( L); + ASSERT_L( nargs >= 0); + + // public Lanes API accepts a generic range -3/+3 + // that will be remapped into the platform-specific scheduler priority scheme + // On some platforms, -3 is equivalent to -2 and +3 to +2 + if( have_priority && (priority < THREAD_PRIO_MIN || priority > THREAD_PRIO_MAX)) + { + return luaL_error( L, "Priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, priority); + } + + /* --- Create and prepare the sub state --- */ + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: setup\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + + // populate with selected libraries at the same time + L2 = luaG_newstate( U, L, libs_str); // L // L2 + + STACK_GROW( L2, nargs + 3); // + STACK_CHECK( L2, 0); + + STACK_GROW( L, 3); // func libs priority globals package required gc_cb [... args ...] + STACK_CHECK( L, 0); + + // give a default "Lua" name to the thread to see VM name in Decoda debugger + lua_pushfstring( L2, "Lane #%p", L2); // "..." + lua_setglobal( L2, "decoda_name"); // + ASSERT_L( lua_gettop( L2) == 0); + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: update 'package'\n" INDENT_END)); + // package + if( package_idx != 0) + { + // when copying with mode eLM_LaneBody, should raise an error in case of problem, not leave it one the stack + (void) luaG_inter_copy_package( U, L, L2, package_idx, eLM_LaneBody); + } + + // modules to require in the target lane *before* the function is transfered! + + if( required_idx != 0) + { + int nbRequired = 1; + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require 'required' list\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + // should not happen, was checked in lanes.lua before calling lane_new() + if( lua_type( L, required_idx) != LUA_TTABLE) + { + return luaL_error( L, "expected required module list as a table, got %s", luaL_typename( L, required_idx)); + } + + lua_pushnil( L); // func libs priority globals package required gc_cb [... args ...] nil + while( lua_next( L, required_idx) != 0) // func libs priority globals package required gc_cb [... args ...] n "modname" + { + if( lua_type( L, -1) != LUA_TSTRING || lua_type( L, -2) != LUA_TNUMBER || lua_tonumber( L, -2) != nbRequired) + { + return luaL_error( L, "required module list should be a list of strings"); + } + else + { + // require the module in the target state, and populate the lookup table there too + size_t len; + char const* name = lua_tolstring( L, -1, &len); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require '%s'\n" INDENT_END, name)); + + // require the module in the target lane + lua_getglobal( L2, "require"); // require()? + if( lua_isnil( L2, -1)) + { + lua_pop( L2, 1); // + luaL_error( L, "cannot pre-require modules without loading 'package' library first"); + } + else + { + lua_pushlstring( L2, name, len); // require() name + if( lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode + { + // propagate error to main state if any + luaG_inter_move( U, L2, L, 1, eLM_LaneBody); // func libs priority globals package required gc_cb [... args ...] n "modname" error + return lua_error( L); + } + // after requiring the module, register the functions it exported in our name<->function database + populate_func_lookup_table( L2, -1, name); + lua_pop( L2, 1); // + } + } + lua_pop( L, 1); // func libs priority globals package required gc_cb [... args ...] n + ++ nbRequired; + } // func libs priority globals package required gc_cb [... args ...] + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + } + STACK_MID( L, 0); + STACK_MID( L2, 0); // + + // Appending the specified globals to the global environment + // *after* stdlibs have been loaded and modules required, in case we transfer references to native functions they exposed... + // + if( globals_idx != 0) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer globals\n" INDENT_END)); + if( !lua_istable( L, globals_idx)) + { + return luaL_error( L, "Expected table, got %s", luaL_typename( L, globals_idx)); + } + + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + lua_pushnil( L); // func libs priority globals package required gc_cb [... args ...] nil + // Lua 5.2 wants us to push the globals table on the stack + lua_pushglobaltable( L2); // _G + while( lua_next( L, globals_idx)) // func libs priority globals package required gc_cb [... args ...] k v + { + luaG_inter_copy( U, L, L2, 2, eLM_LaneBody); // _G k v + // assign it in L2's globals table + lua_rawset( L2, -3); // _G + lua_pop( L, 1); // func libs priority globals package required gc_cb [... args ...] k + } // func libs priority globals package required gc_cb [... args ...] + lua_pop( L2, 1); // + + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + } + STACK_MID( L, 0); + STACK_MID( L2, 0); + + // Lane main function + if( lua_type( L, 1) == LUA_TFUNCTION) + { + int res; + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane body\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + lua_pushvalue( L, 1); // func libs priority globals package required gc_cb [... args ...] func + res = luaG_inter_move( U, L, L2, 1, eLM_LaneBody); // func libs priority globals package required gc_cb [... args ...] // func + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + if( res != 0) + { + return luaL_error( L, "tried to copy unsupported types"); + } + } + else if( lua_type( L, 1) == LUA_TSTRING) + { + // compile the string + if( luaL_loadstring( L2, lua_tostring( L, 1)) != 0) // func + { + return luaL_error( L, "error when parsing lane function code"); + } + } + STACK_MID( L, 0); + STACK_MID( L2, 1); + ASSERT_L( lua_isfunction( L2, 1)); + + // revive arguments + if( nargs > 0) + { + int res; + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane arguments\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + res = luaG_inter_move( U, L, L2, nargs, eLM_LaneBody); // func libs priority globals package required gc_cb // func [... args ...] + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + if( res != 0) + { + return luaL_error( L, "tried to copy unsupported types"); + } + } + STACK_END( L, -nargs); + ASSERT_L( lua_gettop( L) == FIXED_ARGS); + STACK_CHECK( L, 0); + STACK_MID( L2, 1 + nargs); + + // 's' is allocated from heap, not Lua, since its life span may surpass the handle's (if free running thread) + // + // a Lane full userdata needs a single uservalue + ud = (Lane**) lua_newuserdatauv( L, sizeof( Lane*), 1); // func libs priority globals package required gc_cb lane + { + AllocatorDefinition* const allocD = &U->internal_allocator; + s = *ud = (Lane*) allocD->allocF(allocD->allocUD, NULL, 0, sizeof(Lane)); + } + if( s == NULL) + { + return luaL_error( L, "could not create lane: out of memory"); + } + + s->L = L2; + s->U = U; + s->status = PENDING; + s->waiting_on = NULL; + s->debug_name = ""; + s->cancel_request = CANCEL_NONE; + +#if THREADWAIT_METHOD == THREADWAIT_CONDVAR + MUTEX_INIT( &s->done_lock); + SIGNAL_INIT( &s->done_signal); +#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR + s->mstatus = NORMAL; + s->selfdestruct_next = NULL; +#if HAVE_LANE_TRACKING() + s->tracking_next = NULL; + if( s->U->tracking_first) + { + tracking_add( s); + } +#endif // HAVE_LANE_TRACKING() + + // Set metatable for the userdata + // + lua_pushvalue( L, lua_upvalueindex( 1)); // func libs priority globals package required gc_cb lane mt + lua_setmetatable( L, -2); // func libs priority globals package required gc_cb lane + STACK_MID( L, 1); + + // Create uservalue for the userdata + // (this is where lane body return values will be stored when the handle is indexed by a numeric key) + lua_newtable( L); // func libs cancelstep priority globals package required gc_cb lane uv + + // Store the gc_cb callback in the uservalue + if( gc_cb_idx > 0) + { + push_unique_key( L, GCCB_KEY); // func libs priority globals package required gc_cb lane uv k + lua_pushvalue( L, gc_cb_idx); // func libs priority globals package required gc_cb lane uv k gc_cb + lua_rawset( L, -3); // func libs priority globals package required gc_cb lane uv + } + + lua_setiuservalue( L, -2, 1); // func libs priority globals package required gc_cb lane + + // Store 's' in the lane's registry, for 'cancel_test()' (we do cancel tests at pending send/receive). + REGISTRY_SET( L2, CANCEL_TEST_KEY, lua_pushlightuserdata( L2, s)); // func [... args ...] + + STACK_END( L, 1); + STACK_END( L2, 1 + nargs); + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END)); + THREAD_CREATE( &s->thread, lane_main, s, priority); + + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + return 1; +} + + +//--- +// = thread_gc( lane_ud ) +// +// Cleanup for a thread userdata. If the thread is still executing, leave it +// alive as a free-running thread (will clean up itself). +// +// * Why NOT cancel/kill a loose thread: +// +// At least timer system uses a free-running thread, they should be handy +// and the issue of canceling/killing threads at gc is not very nice, either +// (would easily cause waits at gc cycle, which we don't want). +// +LUAG_FUNC( thread_gc) +{ + bool_t have_gc_cb = FALSE; + Lane* s = lua_toLane( L, 1); // ud + + // if there a gc callback? + lua_getiuservalue( L, 1, 1); // ud uservalue + push_unique_key( L, GCCB_KEY); // ud uservalue __gc + lua_rawget( L, -2); // ud uservalue gc_cb|nil + if( !lua_isnil( L, -1)) + { + lua_remove( L, -2); // ud gc_cb|nil + lua_pushstring( L, s->debug_name); // ud gc_cb name + have_gc_cb = TRUE; + } + else + { + lua_pop( L, 2); // ud + } + + // We can read 's->status' without locks, but not wait for it + // test KILLED state first, as it doesn't need to enter the selfdestruct chain + if( s->mstatus == KILLED) + { + // Make sure a kill has proceeded, before cleaning up the data structure. + // + // NO lua_close() in this case because we don't know where execution of the state was interrupted + DEBUGSPEW_CODE( fprintf( stderr, "** Joining with a killed thread (needs testing) **")); + // make sure the thread is no longer running, just like thread_join() + if(! THREAD_ISNULL( s->thread)) + { + THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status); + } + if( s->status >= DONE && s->L) + { + // we know the thread was killed while the Lua VM was not doing anything: we should be able to close it without crashing + // now, thread_cancel() will not forcefully kill a lane with s->status >= DONE, so I am not sure it can ever happen + lua_close( s->L); + s->L = 0; + // just in case, but s will be freed soon so... + s->debug_name = ""; + } + DEBUGSPEW_CODE( fprintf( stderr, "** Joined ok **")); + } + else if( s->status < DONE) + { + // still running: will have to be cleaned up later + selfdestruct_add( s); + assert( s->selfdestruct_next); + if( have_gc_cb) + { + lua_pushliteral( L, "selfdestruct"); // ud gc_cb name status + lua_call( L, 2, 0); // ud + } + return 0; + } + else if( s->L) + { + // no longer accessing the Lua VM: we can close right now + lua_close( s->L); + s->L = 0; + // just in case, but s will be freed soon so... + s->debug_name = ""; + } + + // Clean up after a (finished) thread + lane_cleanup( s); + + // do this after lane cleanup in case the callback triggers an error + if( have_gc_cb) + { + lua_pushliteral( L, "closed"); // ud gc_cb name status + lua_call( L, 2, 0); // ud + } + return 0; +} + +//--- +// str= thread_status( lane ) +// +// Returns: "pending" not started yet +// -> "running" started, doing its work.. +// <-> "waiting" blocked in a receive() +// -> "done" finished, results are there +// / "error" finished at an error, error value is there +// / "cancelled" execution cancelled by M (state gone) +// +static char const * thread_status_string( Lane* s) +{ + enum e_status st = s->status; // read just once (volatile) + char const* str = + (s->mstatus == KILLED) ? "killed" : // new to v3.3.0! + (st == PENDING) ? "pending" : + (st == RUNNING) ? "running" : // like in 'co.status()' + (st == WAITING) ? "waiting" : + (st == DONE) ? "done" : + (st == ERROR_ST) ? "error" : + (st == CANCELLED) ? "cancelled" : NULL; + return str; +} + +int push_thread_status( lua_State* L, Lane* s) +{ + char const* const str = thread_status_string( s); + ASSERT_L( str); + + lua_pushstring( L, str); + return 1; +} + + +//--- +// [...] | [nil, err_any, stack_tbl]= thread_join( lane_ud [, wait_secs=-1] ) +// +// timeout: returns nil +// done: returns return values (0..N) +// error: returns nil + error value [+ stack table] +// cancelled: returns nil +// +LUAG_FUNC( thread_join) +{ + Lane* const s = lua_toLane( L, 1); + double wait_secs = luaL_optnumber( L, 2, -1.0); + lua_State* L2 = s->L; + int ret; + bool_t done = THREAD_ISNULL( s->thread) || THREAD_WAIT( &s->thread, wait_secs, &s->done_signal, &s->done_lock, &s->status); + if( !done || !L2) + { + STACK_GROW( L, 2); + lua_pushnil( L); + lua_pushliteral( L, "timeout"); + return 2; + } + + STACK_CHECK( L, 0); + // Thread is DONE/ERROR_ST/CANCELLED; all ours now + + if( s->mstatus == KILLED) // OS thread was killed if thread_cancel was forced + { + // in that case, even if the thread was killed while DONE/ERROR_ST/CANCELLED, ignore regular return values + STACK_GROW( L, 2); + lua_pushnil( L); + lua_pushliteral( L, "killed"); + ret = 2; + } + else + { + Universe* U = universe_get( L); + // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed + // so store it in the userdata uservalue at a key that can't possibly collide + securize_debug_threadname( L, s); + switch( s->status) + { + case DONE: + { + uint_t n = lua_gettop( L2); // whole L2 stack + if( (n > 0) && (luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0)) + { + return luaL_error( L, "tried to copy unsupported types"); + } + ret = n; + } + break; + + case ERROR_ST: + { + int const n = lua_gettop( L2); + STACK_GROW( L, 3); + lua_pushnil( L); + // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ... + if( luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0) // nil "err" [trace] + { + return luaL_error( L, "tried to copy unsupported types: %s", lua_tostring( L, -n)); + } + ret = 1 + n; + } + break; + + case CANCELLED: + ret = 0; + break; + + default: + DEBUGSPEW_CODE( fprintf( stderr, "Status: %d\n", s->status)); + ASSERT_L( FALSE); + ret = 0; + } + lua_close( L2); + } + s->L = 0; + STACK_END( L, ret); + return ret; +} + + +//--- +// thread_index( ud, key) -> value +// +// If key is found in the environment, return it +// If key is numeric, wait until the thread returns and populate the environment with the return values +// If the return values signal an error, propagate it +// If key is "status" return the thread status +// Else raise an error +LUAG_FUNC( thread_index) +{ + int const UD = 1; + int const KEY = 2; + int const USR = 3; + Lane* const s = lua_toLane( L, UD); + ASSERT_L( lua_gettop( L) == 2); + + STACK_GROW( L, 8); // up to 8 positions are needed in case of error propagation + + // If key is numeric, wait until the thread returns and populate the environment with the return values + if( lua_type( L, KEY) == LUA_TNUMBER) + { + // first, check that we don't already have an environment that holds the requested value + { + // If key is found in the uservalue, return it + lua_getiuservalue( L, UD, 1); + lua_pushvalue( L, KEY); + lua_rawget( L, USR); + if( !lua_isnil( L, -1)) + { + return 1; + } + lua_pop( L, 1); + } + { + // check if we already fetched the values from the thread or not + bool_t fetched; + lua_Integer key = lua_tointeger( L, KEY); + lua_pushinteger( L, 0); + lua_rawget( L, USR); + fetched = !lua_isnil( L, -1); + lua_pop( L, 1); // back to our 2 args + uservalue on the stack + if( !fetched) + { + lua_pushinteger( L, 0); + lua_pushboolean( L, 1); + lua_rawset( L, USR); + // wait until thread has completed + lua_pushcfunction( L, LG_thread_join); + lua_pushvalue( L, UD); + lua_call( L, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+ + switch( s->status) + { + default: + if( s->mstatus != KILLED) + { + // this is an internal error, we probably never get here + lua_settop( L, 0); + lua_pushliteral( L, "Unexpected status: "); + lua_pushstring( L, thread_status_string( s)); + lua_concat( L, 2); + lua_error( L); + break; + } + // fall through if we are killed, as we got nil, "killed" on the stack + + case DONE: // got regular return values + { + int i, nvalues = lua_gettop( L) - 3; + for( i = nvalues; i > 0; -- i) + { + // pop the last element of the stack, to store it in the uservalue at its proper index + lua_rawseti( L, USR, i); + } + } + break; + + case ERROR_ST: // got 3 values: nil, errstring, callstack table + // me[-2] could carry the stack table, but even + // me[-1] is rather unnecessary (and undocumented); + // use ':join()' instead. --AKa 22-Jan-2009 + ASSERT_L( lua_isnil( L, 4) && !lua_isnil( L, 5) && lua_istable( L, 6)); + // store errstring at key -1 + lua_pushnumber( L, -1); + lua_pushvalue( L, 5); + lua_rawset( L, USR); + break; + + case CANCELLED: + // do nothing + break; + } + } + lua_settop( L, 3); // UD KEY ENV + if( key != -1) + { + lua_pushnumber( L, -1); // UD KEY ENV -1 + lua_rawget( L, USR); // UD KEY ENV "error" + if( !lua_isnil( L, -1)) // an error was stored + { + // Note: Lua 5.1 interpreter is not prepared to show + // non-string errors, so we use 'tostring()' here + // to get meaningful output. --AKa 22-Jan-2009 + // + // Also, the stack dump we get is no good; it only + // lists our internal Lanes functions. There seems + // to be no way to switch it off, though. + // + // Level 3 should show the line where 'h[x]' was read + // but this only seems to work for string messages + // (Lua 5.1.4). No idea, why. --AKa 22-Jan-2009 + lua_getmetatable( L, UD); // UD KEY ENV "error" mt + lua_getfield( L, -1, "cached_error"); // UD KEY ENV "error" mt error() + lua_getfield( L, -2, "cached_tostring"); // UD KEY ENV "error" mt error() tostring() + lua_pushvalue( L, 4); // UD KEY ENV "error" mt error() tostring() "error" + lua_call( L, 1, 1); // tostring( errstring) -- just in case // UD KEY ENV "error" mt error() "error" + lua_pushinteger( L, 3); // UD KEY ENV "error" mt error() "error" 3 + lua_call( L, 2, 0); // error( tostring( errstring), 3) // UD KEY ENV "error" mt + } + else + { + lua_pop( L, 1); // back to our 3 arguments on the stack + } + } + lua_rawgeti( L, USR, (int)key); + } + return 1; + } + if( lua_type( L, KEY) == LUA_TSTRING) + { + char const * const keystr = lua_tostring( L, KEY); + lua_settop( L, 2); // keep only our original arguments on the stack + if( strcmp( keystr, "status") == 0) + { + return push_thread_status( L, s); // push the string representing the status + } + // return UD.metatable[key] + lua_getmetatable( L, UD); // UD KEY mt + lua_replace( L, -3); // mt KEY + lua_rawget( L, -2); // mt value + // only "cancel" and "join" are registered as functions, any other string will raise an error + if( lua_iscfunction( L, -1)) + { + return 1; + } + return luaL_error( L, "can't index a lane with '%s'", keystr); + } + // unknown key + lua_getmetatable( L, UD); + lua_getfield( L, -1, "cached_error"); + lua_pushliteral( L, "Unknown key: "); + lua_pushvalue( L, KEY); + lua_concat( L, 2); + lua_call( L, 1, 0); // error( "Unknown key: " .. key) -> doesn't return + return 0; +} + +#if HAVE_LANE_TRACKING() +//--- +// threads() -> {}|nil +// +// Return a list of all known lanes +LUAG_FUNC( threads) +{ + int const top = lua_gettop( L); + Universe* U = universe_get( L); + + // List _all_ still running threads + // + MUTEX_LOCK( &U->tracking_cs); + if( U->tracking_first && U->tracking_first != TRACKING_END) + { + Lane* s = U->tracking_first; + int index = 0; + lua_newtable( L); // {} + while( s != TRACKING_END) + { + // insert a { name, status } tuple, so that several lanes with the same name can't clobber each other + lua_newtable( L); // {} {} + lua_pushstring( L, s->debug_name); // {} {} "name" + lua_setfield( L, -2, "name"); // {} {} + push_thread_status( L, s); // {} {} "status" + lua_setfield( L, -2, "status"); // {} {} + lua_rawseti( L, -2, ++ index); // {} + s = s->tracking_next; + } + } + MUTEX_UNLOCK( &U->tracking_cs); + return lua_gettop( L) - top; // 0 or 1 +} +#endif // HAVE_LANE_TRACKING() + +/* + * ############################################################################################### + * ######################################## Timer support ######################################## + * ############################################################################################### + */ + +/* +* secs= now_secs() +* +* Returns the current time, as seconds (millisecond resolution). +*/ +LUAG_FUNC( now_secs ) +{ + lua_pushnumber( L, now_secs() ); + return 1; +} + +/* +* wakeup_at_secs= wakeup_conv( date_tbl ) +*/ +LUAG_FUNC( wakeup_conv ) +{ + int year, month, day, hour, min, sec, isdst; + struct tm t; + memset( &t, 0, sizeof( t)); + // + // .year (four digits) + // .month (1..12) + // .day (1..31) + // .hour (0..23) + // .min (0..59) + // .sec (0..61) + // .yday (day of the year) + // .isdst (daylight saving on/off) + + STACK_CHECK( L, 0); + lua_getfield( L, 1, "year" ); year= (int)lua_tointeger(L,-1); lua_pop(L,1); + lua_getfield( L, 1, "month" ); month= (int)lua_tointeger(L,-1); lua_pop(L,1); + lua_getfield( L, 1, "day" ); day= (int)lua_tointeger(L,-1); lua_pop(L,1); + lua_getfield( L, 1, "hour" ); hour= (int)lua_tointeger(L,-1); lua_pop(L,1); + lua_getfield( L, 1, "min" ); min= (int)lua_tointeger(L,-1); lua_pop(L,1); + lua_getfield( L, 1, "sec" ); sec= (int)lua_tointeger(L,-1); lua_pop(L,1); + + // If Lua table has '.isdst' we trust that. If it does not, we'll let + // 'mktime' decide on whether the time is within DST or not (value -1). + // + lua_getfield( L, 1, "isdst" ); + isdst= lua_isboolean(L,-1) ? lua_toboolean(L,-1) : -1; + lua_pop(L,1); + STACK_END( L, 0); + + t.tm_year= year-1900; + t.tm_mon= month-1; // 0..11 + t.tm_mday= day; // 1..31 + t.tm_hour= hour; // 0..23 + t.tm_min= min; // 0..59 + t.tm_sec= sec; // 0..60 + t.tm_isdst= isdst; // 0/1/negative + + lua_pushnumber( L, (double) mktime( &t)); // ms=0 + return 1; +} + +/* + * ############################################################################################### + * ######################################## Module linkage ####################################### + * ############################################################################################### + */ + +extern int LG_linda( lua_State* L); +static const struct luaL_Reg lanes_functions [] = { + {"linda", LG_linda}, + {"now_secs", LG_now_secs}, + {"wakeup_conv", LG_wakeup_conv}, + {"set_thread_priority", LG_set_thread_priority}, + {"set_thread_affinity", LG_set_thread_affinity}, + {"nameof", luaG_nameof}, + {"register", LG_register}, + {"set_singlethreaded", LG_set_singlethreaded}, + {NULL, NULL} +}; + +/* + * One-time initializations + * settings table it at position 1 on the stack + * pushes an error string on the stack in case of problem + */ +static void init_once_LOCKED( void) +{ +#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) + now_secs(); // initialize 'now_secs()' internal offset +#endif + +#if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU) + chudInitialize(); +#endif + + //--- + // Linux needs SCHED_RR to change thread priorities, and that is only + // allowed for sudo'ers. SCHED_OTHER (default) has no priorities. + // SCHED_OTHER threads are always lower priority than SCHED_RR. + // + // ^-- those apply to 2.6 kernel. IF **wishful thinking** these + // constraints will change in the future, non-sudo priorities can + // be enabled also for Linux. + // +#ifdef PLATFORM_LINUX + sudo = (geteuid() == 0); // we are root? + + // If lower priorities (-2..-1) are wanted, we need to lift the main + // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below + // the launched threads (even -2). + // +#ifdef LINUX_SCHED_RR + if( sudo) + { + struct sched_param sp; + sp.sched_priority = _PRIO_0; + PT_CALL( pthread_setschedparam( pthread_self(), SCHED_RR, &sp)); + } +#endif // LINUX_SCHED_RR +#endif // PLATFORM_LINUX +} + +static volatile long s_initCount = 0; + +// upvalue 1: module name +// upvalue 2: module table +// param 1: settings table +LUAG_FUNC( configure) +{ + Universe* U = universe_get( L); + bool_t const from_master_state = (U == NULL); + char const* name = luaL_checkstring( L, lua_upvalueindex( 1)); + _ASSERT_L( L, lua_type( L, 1) == LUA_TTABLE); + + /* + ** Making one-time initializations. + ** + ** When the host application is single-threaded (and all threading happens via Lanes) + ** there is no problem. But if the host is multithreaded, we need to lock around the + ** initializations. + */ +#if THREADAPI == THREADAPI_WINDOWS + { + static volatile int /*bool*/ go_ahead; // = 0 + if( InterlockedCompareExchange( &s_initCount, 1, 0) == 0) + { + init_once_LOCKED(); + go_ahead = 1; // let others pass + } + else + { + while( !go_ahead) { Sleep(1); } // changes threads + } + } +#else // THREADAPI == THREADAPI_PTHREAD + if( s_initCount == 0) + { + static pthread_mutex_t my_lock = PTHREAD_MUTEX_INITIALIZER; + pthread_mutex_lock( &my_lock); + { + // Recheck now that we're within the lock + // + if( s_initCount == 0) + { + init_once_LOCKED(); + s_initCount = 1; + } + } + pthread_mutex_unlock( &my_lock); + } +#endif // THREADAPI == THREADAPI_PTHREAD + + STACK_GROW( L, 4); + STACK_CHECK_ABS( L, 1); // settings + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); + DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth); + + if( U == NULL) + { + U = universe_create( L); // settings universe + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + lua_newtable( L); // settings universe mt + lua_getfield( L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout + lua_pushcclosure( L, selfdestruct_gc, 1); // settings universe mt selfdestruct_gc + lua_setfield( L, -2, "__gc"); // settings universe mt + lua_setmetatable( L, -2); // settings universe + lua_pop( L, 1); // settings + lua_getfield( L, 1, "verbose_errors"); // settings verbose_errors + U->verboseErrors = lua_toboolean( L, -1); + lua_pop( L, 1); // settings + lua_getfield( L, 1, "demote_full_userdata"); // settings demote_full_userdata + U->demoteFullUserdata = lua_toboolean( L, -1); + lua_pop( L, 1); // settings +#if HAVE_LANE_TRACKING() + MUTEX_INIT( &U->tracking_cs); + lua_getfield( L, 1, "track_lanes"); // settings track_lanes + U->tracking_first = lua_toboolean( L, -1) ? TRACKING_END : NULL; + lua_pop( L, 1); // settings +#endif // HAVE_LANE_TRACKING() + // Linked chains handling + MUTEX_INIT( &U->selfdestruct_cs); + MUTEX_RECURSIVE_INIT( &U->require_cs); + // Locks for 'tools.c' inc/dec counters + MUTEX_INIT( &U->deep_lock); + MUTEX_INIT( &U->mtid_lock); + U->selfdestruct_first = SELFDESTRUCT_END; + initialize_allocator_function( U, L); + initialize_on_state_create( U, L); + init_keepers( U, L); + STACK_MID( L, 1); + + // Initialize 'timer_deep'; a common Linda object shared by all states + lua_pushcfunction( L, LG_linda); // settings lanes.linda + lua_pushliteral( L, "lanes-timer"); // settings lanes.linda "lanes-timer" + lua_call( L, 1, 1); // settings linda + STACK_MID( L, 2); + + // Proxy userdata contents is only a 'DEEP_PRELUDE*' pointer + U->timer_deep = *(DeepPrelude**) lua_touserdata( L, -1); + // increment refcount so that this linda remains alive as long as the universe exists. + ++ U->timer_deep->refcount; + lua_pop( L, 1); // settings + } + STACK_MID( L, 1); + + // Serialize calls to 'require' from now on, also in the primary state + serialize_require( DEBUGSPEW_PARAM_COMMA( U) L); + + // Retrieve main module interface table + lua_pushvalue( L, lua_upvalueindex( 2)); // settings M + // remove configure() (this function) from the module interface + lua_pushnil( L); // settings M nil + lua_setfield( L, -2, "configure"); // settings M + // add functions to the module's table + luaG_registerlibfuncs( L, lanes_functions); +#if HAVE_LANE_TRACKING() + // register core.threads() only if settings say it should be available + if( U->tracking_first != NULL) + { + lua_pushcfunction( L, LG_threads); // settings M LG_threads() + lua_setfield( L, -2, "threads"); // settings M + } +#endif // HAVE_LANE_TRACKING() + STACK_MID( L, 2); + + { + char const* errmsg; + errmsg = push_deep_proxy( U, L, (DeepPrelude*) U->timer_deep, 0, eLM_LaneBody); // settings M timer_deep + if( errmsg != NULL) + { + return luaL_error( L, errmsg); + } + lua_setfield( L, -2, "timer_gateway"); // settings M + } + STACK_MID( L, 2); + + // prepare the metatable for threads + // contains keys: { __gc, __index, cached_error, cached_tostring, cancel, join, get_debug_threadname } + // + if( luaL_newmetatable( L, "Lane")) // settings M mt + { + lua_pushcfunction( L, LG_thread_gc); // settings M mt LG_thread_gc + lua_setfield( L, -2, "__gc"); // settings M mt + lua_pushcfunction( L, LG_thread_index); // settings M mt LG_thread_index + lua_setfield( L, -2, "__index"); // settings M mt + lua_getglobal( L, "error"); // settings M mt error + ASSERT_L( lua_isfunction( L, -1)); + lua_setfield( L, -2, "cached_error"); // settings M mt + lua_getglobal( L, "tostring"); // settings M mt tostring + ASSERT_L( lua_isfunction( L, -1)); + lua_setfield( L, -2, "cached_tostring"); // settings M mt + lua_pushcfunction( L, LG_thread_join); // settings M mt LG_thread_join + lua_setfield( L, -2, "join"); // settings M mt + lua_pushcfunction( L, LG_get_debug_threadname); // settings M mt LG_get_debug_threadname + lua_setfield( L, -2, "get_debug_threadname"); // settings M mt + lua_pushcfunction( L, LG_thread_cancel); // settings M mt LG_thread_cancel + lua_setfield( L, -2, "cancel"); // settings M mt + lua_pushliteral( L, "Lane"); // settings M mt "Lane" + lua_setfield( L, -2, "__metatable"); // settings M mt + } + + lua_pushcclosure( L, LG_lane_new, 1); // settings M lane_new + lua_setfield( L, -2, "lane_new"); // settings M + + // we can't register 'lanes.require' normally because we want to create an upvalued closure + lua_getglobal( L, "require"); // settings M require + lua_pushcclosure( L, LG_require, 1); // settings M lanes.require + lua_setfield( L, -2, "require"); // settings M + + lua_pushfstring( + L, "%d.%d.%d" + , LANES_VERSION_MAJOR, LANES_VERSION_MINOR, LANES_VERSION_PATCH + ); // settings M VERSION + lua_setfield( L, -2, "version"); // settings M + + lua_pushinteger(L, THREAD_PRIO_MAX); // settings M THREAD_PRIO_MAX + lua_setfield( L, -2, "max_prio"); // settings M + + push_unique_key( L, CANCEL_ERROR); // settings M CANCEL_ERROR + lua_setfield( L, -2, "cancel_error"); // settings M + + STACK_MID( L, 2); // reference stack contains only the function argument 'settings' + // we'll need this every time we transfer some C function from/to this state + REGISTRY_SET( L, LOOKUP_REGKEY, lua_newtable( L)); + STACK_MID( L, 2); + + // register all native functions found in that module in the transferable functions database + // we process it before _G because we don't want to find the module when scanning _G (this would generate longer names) + // for example in package.loaded["lanes.core"].* + populate_func_lookup_table( L, -1, name); + STACK_MID( L, 2); + + // record all existing C/JIT-fast functions + // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack + if( from_master_state) + { + // don't do this when called during the initialization of a new lane, + // because we will do it after on_state_create() is called, + // and we don't want to skip _G because of caching in case globals are created then + lua_pushglobaltable( L); // settings M _G + populate_func_lookup_table( L, -1, NULL); + lua_pop( L, 1); // settings M + } + lua_pop( L, 1); // settings + + // set _R[CONFIG_REGKEY] = settings + REGISTRY_SET( L, CONFIG_REGKEY, lua_pushvalue( L, -2)); // -2 because CONFIG_REGKEY is pushed before the value itself + STACK_END( L, 1); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L)); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + // Return the settings table + return 1; +} + +#if defined PLATFORM_WIN32 && !defined NDEBUG +#include +#include + +void signal_handler( int signal) +{ + if( signal == SIGABRT) + { + _cprintf( "caught abnormal termination!"); + abort(); + } +} + +// helper to have correct callstacks when crashing a Win32 running on 64 bits Windows +// don't forget to toggle Debug/Exceptions/Win32 in visual Studio too! +static volatile long s_ecoc_initCount = 0; +static volatile int s_ecoc_go_ahead = 0; +static void EnableCrashingOnCrashes( void) +{ + if( InterlockedCompareExchange( &s_ecoc_initCount, 1, 0) == 0) + { + typedef BOOL (WINAPI* tGetPolicy)( LPDWORD lpFlags); + typedef BOOL (WINAPI* tSetPolicy)( DWORD dwFlags); + const DWORD EXCEPTION_SWALLOWING = 0x1; + + HMODULE kernel32 = LoadLibraryA("kernel32.dll"); + tGetPolicy pGetPolicy = (tGetPolicy)GetProcAddress(kernel32, "GetProcessUserModeExceptionPolicy"); + tSetPolicy pSetPolicy = (tSetPolicy)GetProcAddress(kernel32, "SetProcessUserModeExceptionPolicy"); + if( pGetPolicy && pSetPolicy) + { + DWORD dwFlags; + if( pGetPolicy( &dwFlags)) + { + // Turn off the filter + pSetPolicy( dwFlags & ~EXCEPTION_SWALLOWING); + } + } + //typedef void (* SignalHandlerPointer)( int); + /*SignalHandlerPointer previousHandler =*/ signal( SIGABRT, signal_handler); + + s_ecoc_go_ahead = 1; // let others pass + } + else + { + while( !s_ecoc_go_ahead) { Sleep(1); } // changes threads + } +} +#endif // PLATFORM_WIN32 + +LANES_API int luaopen_lanes_core( lua_State* L) +{ +#if defined PLATFORM_WIN32 && !defined NDEBUG + EnableCrashingOnCrashes(); +#endif // defined PLATFORM_WIN32 && !defined NDEBUG + + STACK_GROW( L, 4); + STACK_CHECK( L, 0); + + // Prevent PUC-Lua/LuaJIT mismatch. Hopefully this works for MoonJIT too + lua_getglobal( L, "jit"); // {jit?} +#if LUAJIT_FLAVOR() == 0 + if (!lua_isnil( L, -1)) + return luaL_error( L, "Lanes is built for PUC-Lua, don't run from LuaJIT"); +#else + if (lua_isnil( L, -1)) + return luaL_error( L, "Lanes is built for LuaJIT, don't run from PUC-Lua"); +#endif + lua_pop( L, 1); // + + // Create main module interface table + // we only have 1 closure, which must be called to configure Lanes + lua_newtable( L); // M + lua_pushvalue( L, 1); // M "lanes.core" + lua_pushvalue( L, -2); // M "lanes.core" M + lua_pushcclosure( L, LG_configure, 2); // M LG_configure() + REGISTRY_GET( L, CONFIG_REGKEY); // M LG_configure() settings + if( !lua_isnil( L, -1)) // this is not the first require "lanes.core": call configure() immediately + { + lua_pushvalue( L, -1); // M LG_configure() settings settings + lua_setfield( L, -4, "settings"); // M LG_configure() settings + lua_call( L, 1, 0); // M + } + else + { + // will do nothing on first invocation, as we haven't stored settings in the registry yet + lua_setfield( L, -3, "settings"); // M LG_configure() + lua_setfield( L, -2, "configure"); // M + } + + STACK_END( L, 1); + return 1; +} + +static int default_luaopen_lanes( lua_State* L) +{ + int rc = luaL_loadfile( L, "lanes.lua") || lua_pcall( L, 0, 1, 0); + if( rc != LUA_OK) + { + return luaL_error( L, "failed to initialize embedded Lanes"); + } + return 1; +} + +// call this instead of luaopen_lanes_core() when embedding Lua and Lanes in a custom application +LANES_API void luaopen_lanes_embedded( lua_State* L, lua_CFunction _luaopen_lanes) +{ + STACK_CHECK( L, 0); + // pre-require lanes.core so that when lanes.lua calls require "lanes.core" it finds it is already loaded + luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); // ... lanes.core + lua_pop( L, 1); // ... + STACK_MID( L, 0); + // call user-provided function that runs the chunk "lanes.lua" from wherever they stored it + luaL_requiref( L, "lanes", _luaopen_lanes ? _luaopen_lanes : default_luaopen_lanes, 0); // ... lanes + STACK_END( L, 1); +} diff --git a/src/linda.c b/src/linda.c deleted file mode 100644 index eac6458..0000000 --- a/src/linda.c +++ /dev/null @@ -1,945 +0,0 @@ -/* - * LINDA.C Copyright (c) 2018, Benoit Germain - * - * Linda deep userdata. -*/ - -/* -=============================================================================== - -Copyright (C) 2018 benoit Germain - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -=============================================================================== -*/ - -#include -#include -#include - -#include "threading.h" -#include "compat.h" -#include "tools.h" -#include "universe.h" -#include "keeper.h" -#include "deep.h" -#include "lanes_private.h" - -/* -* Actual data is kept within a keeper state, which is hashed by the 's_Linda' -* pointer (which is same to all userdatas pointing to it). -*/ -struct s_Linda -{ - DeepPrelude prelude; // Deep userdata MUST start with this header - SIGNAL_T read_happened; - SIGNAL_T write_happened; - Universe* U; // the universe this linda belongs to - ptrdiff_t group; // a group to control keeper allocation between lindas - enum e_cancel_request simulate_cancel; - char name[1]; -}; -#define LINDA_KEEPER_HASHSEED( linda) (linda->group ? linda->group : (ptrdiff_t)linda) - -static void* linda_id( lua_State*, DeepOp); - -static inline struct s_Linda* lua_toLinda( lua_State* L, int idx_) -{ - struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_); - luaL_argcheck( L, linda != NULL, idx_, "expecting a linda object"); - return linda; -} - -static void check_key_types( lua_State* L, int start_, int end_) -{ - int i; - for( i = start_; i <= end_; ++ i) - { - int t = lua_type( L, i); - if( t == LUA_TBOOLEAN || t == LUA_TNUMBER || t == LUA_TSTRING || t == LUA_TLIGHTUSERDATA) - { - continue; - } - (void) luaL_error( L, "argument #%d: invalid key type (not a boolean, string, number or light userdata)", i); - } -} - -LUAG_FUNC( linda_protected_call) -{ - int rc = LUA_OK; - struct s_Linda* linda = lua_toLinda( L, 1); - - // acquire the keeper - Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED(linda)); - lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK' - if( KL == NULL) return 0; - - // retrieve the actual function to be called and move it before the arguments - lua_pushvalue( L, lua_upvalueindex( 1)); - lua_insert( L, 1); - // do a protected call - rc = lua_pcall( L, lua_gettop( L) - 1, LUA_MULTRET, 0); - - // release the keeper - keeper_release( K); - - // if there was an error, forward it - if( rc != LUA_OK) - { - return lua_error( L); - } - // return whatever the actual operation provided - return lua_gettop( L); -} - -/* -* bool= linda_send( linda_ud, [timeout_secs=-1,] [linda.null,] key_num|str|bool|lightuserdata, ... ) -* -* Send one or more values to a Linda. If there is a limit, all values must fit. -* -* Returns: 'true' if the value was queued -* 'false' for timeout (only happens when the queue size is limited) -* nil, CANCEL_ERROR if cancelled -*/ -LUAG_FUNC( linda_send) -{ - struct s_Linda* linda = lua_toLinda( L, 1); - bool_t ret = FALSE; - enum e_cancel_request cancel = CANCEL_NONE; - int pushed; - time_d timeout = -1.0; - uint_t key_i = 2; // index of first key, if timeout not there - bool_t as_nil_sentinel; // if not NULL, send() will silently send a single nil if nothing is provided - - if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion - { - timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2)); - ++ key_i; - } - else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key - { - ++ key_i; - } - - as_nil_sentinel = equal_unique_key( L, key_i, NIL_SENTINEL); - if( as_nil_sentinel) - { - // the real key to send data to is after the NIL_SENTINEL marker - ++ key_i; - } - - // make sure the key is of a valid type - check_key_types( L, key_i, key_i); - - STACK_GROW( L, 1); - - // make sure there is something to send - if( (uint_t)lua_gettop( L) == key_i) - { - if( as_nil_sentinel) - { - // send a single nil if nothing is provided - push_unique_key( L, NIL_SENTINEL); - } - else - { - return luaL_error( L, "no data to send"); - } - } - - // convert nils to some special non-nil sentinel in sent values - keeper_toggle_nil_sentinels( L, key_i + 1, eLM_ToKeeper); - - { - bool_t try_again = TRUE; - Lane* const s = get_lane_from_registry( L); - Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK' - if( KL == NULL) return 0; - STACK_CHECK( KL, 0); - for( ;;) - { - if( s != NULL) - { - cancel = s->cancel_request; - } - cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel; - // if user wants to cancel, or looped because of a timeout, the call returns without sending anything - if( !try_again || cancel != CANCEL_NONE) - { - pushed = 0; - break; - } - - STACK_MID( KL, 0); - pushed = keeper_call( linda->U, KL, KEEPER_API( send), L, linda, key_i); - if( pushed < 0) - { - break; - } - ASSERT_L( pushed == 1); - - ret = lua_toboolean( L, -1); - lua_pop( L, 1); - - if( ret) - { - // Wake up ALL waiting threads - SIGNAL_ALL( &linda->write_happened); - break; - } - - // instant timout to bypass the wait syscall - if( timeout == 0.0) - { - break; /* no wait; instant timeout */ - } - - // storage limit hit, wait until timeout or signalled that we should try again - { - enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings - if( s != NULL) - { - // change status of lane to "waiting" - prev_status = s->status; // RUNNING, most likely - ASSERT_L( prev_status == RUNNING); // but check, just in case - s->status = WAITING; - ASSERT_L( s->waiting_on == NULL); - s->waiting_on = &linda->read_happened; - } - // could not send because no room: wait until some data was read before trying again, or until timeout is reached - try_again = SIGNAL_WAIT( &linda->read_happened, &K->keeper_cs, timeout); - if( s != NULL) - { - s->waiting_on = NULL; - s->status = prev_status; - } - } - } - STACK_END( KL, 0); - } - - if( pushed < 0) - { - return luaL_error( L, "tried to copy unsupported types"); - } - - switch( cancel) - { - case CANCEL_SOFT: - // if user wants to soft-cancel, the call returns lanes.cancel_error - push_unique_key( L, CANCEL_ERROR); - return 1; - - case CANCEL_HARD: - // raise an error interrupting execution only in case of hard cancel - return cancel_error( L); // raises an error and doesn't return - - default: - lua_pushboolean( L, ret); // true (success) or false (timeout) - return 1; - } -} - - -/* - * 2 modes of operation - * [val, key]= linda_receive( linda_ud, [timeout_secs_num=-1], key_num|str|bool|lightuserdata [, ...] ) - * Consumes a single value from the Linda, in any key. - * Returns: received value (which is consumed from the slot), and the key which had it - - * [val1, ... valCOUNT]= linda_receive( linda_ud, [timeout_secs_num=-1], linda.batched, key_num|str|bool|lightuserdata, min_COUNT[, max_COUNT]) - * Consumes between min_COUNT and max_COUNT values from the linda, from a single key. - * returns the actual consumed values, or nil if there weren't enough values to consume - * - */ -#define BATCH_SENTINEL "270e6c9d-280f-4983-8fee-a7ecdda01475" -LUAG_FUNC( linda_receive) -{ - struct s_Linda* linda = lua_toLinda( L, 1); - int pushed, expected_pushed_min, expected_pushed_max; - enum e_cancel_request cancel = CANCEL_NONE; - keeper_api_t keeper_receive; - - time_d timeout = -1.0; - uint_t key_i = 2; - - if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion - { - timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2)); - ++ key_i; - } - else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key - { - ++ key_i; - } - - // are we in batched mode? - { - int is_batched; - lua_pushliteral( L, BATCH_SENTINEL); - is_batched = lua501_equal( L, key_i, -1); - lua_pop( L, 1); - if( is_batched) - { - // no need to pass linda.batched in the keeper state - ++ key_i; - // make sure the keys are of a valid type - check_key_types( L, key_i, key_i); - // receive multiple values from a single slot - keeper_receive = KEEPER_API( receive_batched); - // we expect a user-defined amount of return value - expected_pushed_min = (int)luaL_checkinteger( L, key_i + 1); - expected_pushed_max = (int)luaL_optinteger( L, key_i + 2, expected_pushed_min); - // don't forget to count the key in addition to the values - ++ expected_pushed_min; - ++ expected_pushed_max; - if( expected_pushed_min > expected_pushed_max) - { - return luaL_error( L, "batched min/max error"); - } - } - else - { - // make sure the keys are of a valid type - check_key_types( L, key_i, lua_gettop( L)); - // receive a single value, checking multiple slots - keeper_receive = KEEPER_API( receive); - // we expect a single (value, key) pair of returned values - expected_pushed_min = expected_pushed_max = 2; - } - } - - { - bool_t try_again = TRUE; - Lane* const s = get_lane_from_registry( L); - Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - if( K == NULL) return 0; - for( ;;) - { - if( s != NULL) - { - cancel = s->cancel_request; - } - cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel; - // if user wants to cancel, or looped because of a timeout, the call returns without sending anything - if( !try_again || cancel != CANCEL_NONE) - { - pushed = 0; - break; - } - - // all arguments of receive() but the first are passed to the keeper's receive function - pushed = keeper_call( linda->U, K->L, keeper_receive, L, linda, key_i); - if( pushed < 0) - { - break; - } - if( pushed > 0) - { - ASSERT_L( pushed >= expected_pushed_min && pushed <= expected_pushed_max); - // replace sentinels with real nils - keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper); - // To be done from within the 'K' locking area - // - SIGNAL_ALL( &linda->read_happened); - break; - } - - if( timeout == 0.0) - { - break; /* instant timeout */ - } - - // nothing received, wait until timeout or signalled that we should try again - { - enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings - if( s != NULL) - { - // change status of lane to "waiting" - prev_status = s->status; // RUNNING, most likely - ASSERT_L( prev_status == RUNNING); // but check, just in case - s->status = WAITING; - ASSERT_L( s->waiting_on == NULL); - s->waiting_on = &linda->write_happened; - } - // not enough data to read: wakeup when data was sent, or when timeout is reached - try_again = SIGNAL_WAIT( &linda->write_happened, &K->keeper_cs, timeout); - if( s != NULL) - { - s->waiting_on = NULL; - s->status = prev_status; - } - } - } - } - - if( pushed < 0) - { - return luaL_error( L, "tried to copy unsupported types"); - } - - switch( cancel) - { - case CANCEL_SOFT: - // if user wants to soft-cancel, the call returns CANCEL_ERROR - push_unique_key( L, CANCEL_ERROR); - return 1; - - case CANCEL_HARD: - // raise an error interrupting execution only in case of hard cancel - return cancel_error( L); // raises an error and doesn't return - - default: - return pushed; - } -} - - -/* -* [true|lanes.cancel_error] = linda_set( linda_ud, key_num|str|bool|lightuserdata [, value [, ...]]) -* -* Set one or more value to Linda. -* TODO: what do we do if we set to non-nil and limit is 0? -* -* Existing slot value is replaced, and possible queued entries removed. -*/ -LUAG_FUNC( linda_set) -{ - struct s_Linda* const linda = lua_toLinda( L, 1); - int pushed; - bool_t has_value = lua_gettop( L) > 2; - - // make sure the key is of a valid type (throws an error if not the case) - check_key_types( L, 2, 2); - - { - Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - - if( linda->simulate_cancel == CANCEL_NONE) - { - if( has_value) - { - // convert nils to some special non-nil sentinel in sent values - keeper_toggle_nil_sentinels( L, 3, eLM_ToKeeper); - } - pushed = keeper_call( linda->U, K->L, KEEPER_API( set), L, linda, 2); - if( pushed >= 0) // no error? - { - ASSERT_L( pushed == 0 || pushed == 1); - - if( has_value) - { - // we put some data in the slot, tell readers that they should wake - SIGNAL_ALL( &linda->write_happened); // To be done from within the 'K' locking area - } - if( pushed == 1) - { - // the key was full, but it is no longer the case, tell writers they should wake - ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1); - SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area - } - } - } - else // linda is cancelled - { - // do nothing and return lanes.cancel_error - push_unique_key( L, CANCEL_ERROR); - pushed = 1; - } - } - - // must trigger any error after keeper state has been released - return (pushed < 0) ? luaL_error( L, "tried to copy unsupported types") : pushed; -} - - -/* - * [val] = linda_count( linda_ud, [key [, ...]]) - * - * Get a count of the pending elements in the specified keys - */ -LUAG_FUNC( linda_count) -{ - struct s_Linda* linda = lua_toLinda( L, 1); - int pushed; - - // make sure the keys are of a valid type - check_key_types( L, 2, lua_gettop( L)); - - { - Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - pushed = keeper_call( linda->U, K->L, KEEPER_API( count), L, linda, 2); - if( pushed < 0) - { - return luaL_error( L, "tried to count an invalid key"); - } - } - return pushed; -} - - -/* -* [val [, ...]] = linda_get( linda_ud, key_num|str|bool|lightuserdata [, count = 1]) -* -* Get one or more values from Linda. -*/ -LUAG_FUNC( linda_get) -{ - struct s_Linda* const linda = lua_toLinda( L, 1); - int pushed; - lua_Integer count = luaL_optinteger( L, 3, 1); - luaL_argcheck( L, count >= 1, 3, "count should be >= 1"); - luaL_argcheck( L, lua_gettop( L) <= 3, 4, "too many arguments"); - - // make sure the key is of a valid type (throws an error if not the case) - check_key_types( L, 2, 2); - { - Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - - if( linda->simulate_cancel == CANCEL_NONE) - { - pushed = keeper_call( linda->U, K->L, KEEPER_API( get), L, linda, 2); - if( pushed > 0) - { - keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper); - } - } - else // linda is cancelled - { - // do nothing and return lanes.cancel_error - push_unique_key( L, CANCEL_ERROR); - pushed = 1; - } - // an error can be raised if we attempt to read an unregistered function - if( pushed < 0) - { - return luaL_error( L, "tried to copy unsupported types"); - } - } - - return pushed; -} - - -/* -* [true] = linda_limit( linda_ud, key_num|str|bool|lightuserdata, int) -* -* Set limit to 1 Linda keys. -* Optionally wake threads waiting to write on the linda, in case the limit enables them to do so -*/ -LUAG_FUNC( linda_limit) -{ - struct s_Linda* linda = lua_toLinda( L, 1); - int pushed; - - // make sure we got 3 arguments: the linda, a key and a limit - luaL_argcheck( L, lua_gettop( L) == 3, 2, "wrong number of arguments"); - // make sure we got a numeric limit - luaL_checknumber( L, 3); - // make sure the key is of a valid type - check_key_types( L, 2, 2); - - { - Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - - if( linda->simulate_cancel == CANCEL_NONE) - { - pushed = keeper_call( linda->U, K->L, KEEPER_API( limit), L, linda, 2); - ASSERT_L( pushed == 0 || pushed == 1); // no error, optional boolean value saying if we should wake blocked writer threads - if( pushed == 1) - { - ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1); - SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area - } - } - else // linda is cancelled - { - // do nothing and return lanes.cancel_error - push_unique_key( L, CANCEL_ERROR); - pushed = 1; - } - } - // propagate pushed boolean if any - return pushed; -} - - -/* -* (void) = linda_cancel( linda_ud, "read"|"write"|"both"|"none") -* -* Signal linda so that waiting threads wake up as if their own lane was cancelled -*/ -LUAG_FUNC( linda_cancel) -{ - struct s_Linda* linda = lua_toLinda( L, 1); - char const* who = luaL_optstring( L, 2, "both"); - - // make sure we got 3 arguments: the linda, a key and a limit - luaL_argcheck( L, lua_gettop( L) <= 2, 2, "wrong number of arguments"); - - linda->simulate_cancel = CANCEL_SOFT; - if( strcmp( who, "both") == 0) // tell everyone writers to wake up - { - SIGNAL_ALL( &linda->write_happened); - SIGNAL_ALL( &linda->read_happened); - } - else if( strcmp( who, "none") == 0) // reset flag - { - linda->simulate_cancel = CANCEL_NONE; - } - else if( strcmp( who, "read") == 0) // tell blocked readers to wake up - { - SIGNAL_ALL( &linda->write_happened); - } - else if( strcmp( who, "write") == 0) // tell blocked writers to wake up - { - SIGNAL_ALL( &linda->read_happened); - } - else - { - return luaL_error( L, "unknown wake hint '%s'", who); - } - return 0; -} - - -/* -* lightuserdata= linda_deep( linda_ud ) -* -* Return the 'deep' userdata pointer, identifying the Linda. -* -* This is needed for using Lindas as key indices (timer system needs it); -* separately created proxies of the same underlying deep object will have -* different userdata and won't be known to be essentially the same deep one -* without this. -*/ -LUAG_FUNC( linda_deep) -{ - struct s_Linda* linda= lua_toLinda( L, 1); - lua_pushlightuserdata( L, linda); // just the address - return 1; -} - - -/* -* string = linda:__tostring( linda_ud) -* -* Return the stringification of a linda -* -* Useful for concatenation or debugging purposes -*/ - -static int linda_tostring( lua_State* L, int idx_, bool_t opt_) -{ - struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_); - if( !opt_) - { - luaL_argcheck( L, linda, idx_, "expecting a linda object"); - } - if( linda != NULL) - { - char text[128]; - int len; - if( linda->name[0]) - len = sprintf( text, "Linda: %.*s", (int)sizeof(text) - 8, linda->name); - else - len = sprintf( text, "Linda: %p", linda); - lua_pushlstring( L, text, len); - return 1; - } - return 0; -} - -LUAG_FUNC( linda_tostring) -{ - return linda_tostring( L, 1, FALSE); -} - - -/* -* string = linda:__concat( a, b) -* -* Return the concatenation of a pair of items, one of them being a linda -* -* Useful for concatenation or debugging purposes -*/ -LUAG_FUNC( linda_concat) -{ // linda1? linda2? - bool_t atLeastOneLinda = FALSE; - // Lua semantics enforce that one of the 2 arguments is a Linda, but not necessarily both. - if( linda_tostring( L, 1, TRUE)) - { - atLeastOneLinda = TRUE; - lua_replace( L, 1); - } - if( linda_tostring( L, 2, TRUE)) - { - atLeastOneLinda = TRUE; - lua_replace( L, 2); - } - if( !atLeastOneLinda) // should not be possible - { - return luaL_error( L, "internal error: linda_concat called on non-Linda"); - } - lua_concat( L, 2); - return 1; -} - -/* - * table = linda:dump() - * return a table listing all pending data inside the linda - */ -LUAG_FUNC( linda_dump) -{ - struct s_Linda* linda = lua_toLinda( L, 1); - ASSERT_L( linda->U == universe_get( L)); - return keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda)); -} - -/* - * table = linda:dump() - * return a table listing all pending data inside the linda - */ -LUAG_FUNC( linda_towatch) -{ - struct s_Linda* linda = lua_toLinda( L, 1); - int pushed; - ASSERT_L( linda->U == universe_get( L)); - pushed = keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda)); - if( pushed == 0) - { - // if the linda is empty, don't return nil - pushed = linda_tostring( L, 1, FALSE); - } - return pushed; -} - -/* -* Identity function of a shared userdata object. -* -* lightuserdata= linda_id( "new" [, ...] ) -* = linda_id( "delete", lightuserdata ) -* -* Creation and cleanup of actual 'deep' objects. 'luaG_...' will wrap them into -* regular userdata proxies, per each state using the deep data. -* -* tbl= linda_id( "metatable" ) -* -* Returns a metatable for the proxy objects ('__gc' method not needed; will -* be added by 'luaG_...') -* -* string= linda_id( "module") -* -* Returns the name of the module that a state should require -* in order to keep a handle on the shared library that exported the idfunc -* -* = linda_id( str, ... ) -* -* For any other strings, the ID function must not react at all. This allows -* future extensions of the system. -*/ -static void* linda_id( lua_State* L, DeepOp op_) -{ - switch( op_) - { - case eDO_new: - { - struct s_Linda* s; - size_t name_len = 0; - char const* linda_name = NULL; - unsigned long linda_group = 0; - // should have a string and/or a number of the stack as parameters (name and group) - switch( lua_gettop( L)) - { - default: // 0 - break; - - case 1: // 1 parameter, either a name or a group - if( lua_type( L, -1) == LUA_TSTRING) - { - linda_name = lua_tolstring( L, -1, &name_len); - } - else - { - linda_group = (unsigned long) lua_tointeger( L, -1); - } - break; - - case 2: // 2 parameters, a name and group, in that order - linda_name = lua_tolstring( L, -2, &name_len); - linda_group = (unsigned long) lua_tointeger( L, -1); - break; - } - - /* The deep data is allocated separately of Lua stack; we might no - * longer be around when last reference to it is being released. - * One can use any memory allocation scheme. - * just don't use L's allocF because we don't know which state will get the honor of GCing the linda - */ - { - Universe* const U = universe_get(L); - AllocatorDefinition* const allocD = &U->internal_allocator; - s = (struct s_Linda*) allocD->allocF(allocD->allocUD, NULL, 0, sizeof(struct s_Linda) + name_len); // terminating 0 is already included - } - if( s) - { - s->prelude.magic.value = DEEP_VERSION.value; - SIGNAL_INIT( &s->read_happened); - SIGNAL_INIT( &s->write_happened); - s->U = universe_get( L); - s->simulate_cancel = CANCEL_NONE; - s->group = linda_group << KEEPER_MAGIC_SHIFT; - s->name[0] = 0; - memcpy( s->name, linda_name, name_len ? name_len + 1 : 0); - } - return s; - } - - case eDO_delete: - { - Keeper* K; - struct s_Linda* linda = (struct s_Linda*) lua_touserdata( L, 1); - ASSERT_L( linda); - - // Clean associated structures in the keeper state. - K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - if( K && K->L) // can be NULL if this happens during main state shutdown (lanes is GC'ed -> no keepers -> no need to cleanup) - { - // hopefully this won't ever raise an error as we would jump to the closest pcall site while forgetting to release the keeper mutex... - keeper_call( linda->U, K->L, KEEPER_API( clear), L, linda, 0); - } - keeper_release( K); - - // There aren't any lanes waiting on these lindas, since all proxies have been gc'ed. Right? - SIGNAL_FREE( &linda->read_happened); - SIGNAL_FREE( &linda->write_happened); - { - Universe* const U = universe_get(L); - AllocatorDefinition* const allocD = &U->internal_allocator; - (void) allocD->allocF(allocD->allocUD, linda, sizeof(struct s_Linda) + strlen(linda->name), 0); - } - return NULL; - } - - case eDO_metatable: - { - - STACK_CHECK( L, 0); - lua_newtable( L); - // metatable is its own index - lua_pushvalue( L, -1); - lua_setfield( L, -2, "__index"); - - // protect metatable from external access - lua_pushliteral( L, "Linda"); - lua_setfield( L, -2, "__metatable"); - - lua_pushcfunction( L, LG_linda_tostring); - lua_setfield( L, -2, "__tostring"); - - // Decoda __towatch support - lua_pushcfunction( L, LG_linda_towatch); - lua_setfield( L, -2, "__towatch"); - - lua_pushcfunction( L, LG_linda_concat); - lua_setfield( L, -2, "__concat"); - - // protected calls, to ensure associated keeper is always released even in case of error - // all function are the protected call wrapper, where the actual operation is provided as upvalue - // note that this kind of thing can break function lookup as we use the function pointer here and there - - lua_pushcfunction( L, LG_linda_send); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "send"); - - lua_pushcfunction( L, LG_linda_receive); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "receive"); - - lua_pushcfunction( L, LG_linda_limit); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "limit"); - - lua_pushcfunction( L, LG_linda_set); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "set"); - - lua_pushcfunction( L, LG_linda_count); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "count"); - - lua_pushcfunction( L, LG_linda_get); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "get"); - - lua_pushcfunction( L, LG_linda_cancel); - lua_setfield( L, -2, "cancel"); - - lua_pushcfunction( L, LG_linda_deep); - lua_setfield( L, -2, "deep"); - - lua_pushcfunction( L, LG_linda_dump); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "dump"); - - // some constants - lua_pushliteral( L, BATCH_SENTINEL); - lua_setfield( L, -2, "batched"); - - push_unique_key( L, NIL_SENTINEL); - lua_setfield( L, -2, "null"); - - STACK_END( L, 1); - return NULL; - } - - case eDO_module: - // linda is a special case because we know lanes must be loaded from the main lua state - // to be able to ever get here, so we know it will remain loaded as long a the main state is around - // in other words, forever. - default: - { - return NULL; - } - } -} - -/* - * ud = lanes.linda( [name[,group]]) - * - * returns a linda object, or raises an error if creation failed - */ -LUAG_FUNC( linda) -{ - int const top = lua_gettop( L); - luaL_argcheck( L, top <= 2, top, "too many arguments"); - if( top == 1) - { - int const t = lua_type( L, 1); - luaL_argcheck( L, t == LUA_TSTRING || t == LUA_TNUMBER, 1, "wrong parameter (should be a string or a number)"); - } - else if( top == 2) - { - luaL_checktype( L, 1, LUA_TSTRING); - luaL_checktype( L, 2, LUA_TNUMBER); - } - return luaG_newdeepuserdata( L, linda_id, 0); -} diff --git a/src/linda.cpp b/src/linda.cpp new file mode 100644 index 0000000..eac6458 --- /dev/null +++ b/src/linda.cpp @@ -0,0 +1,945 @@ +/* + * LINDA.C Copyright (c) 2018, Benoit Germain + * + * Linda deep userdata. +*/ + +/* +=============================================================================== + +Copyright (C) 2018 benoit Germain + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== +*/ + +#include +#include +#include + +#include "threading.h" +#include "compat.h" +#include "tools.h" +#include "universe.h" +#include "keeper.h" +#include "deep.h" +#include "lanes_private.h" + +/* +* Actual data is kept within a keeper state, which is hashed by the 's_Linda' +* pointer (which is same to all userdatas pointing to it). +*/ +struct s_Linda +{ + DeepPrelude prelude; // Deep userdata MUST start with this header + SIGNAL_T read_happened; + SIGNAL_T write_happened; + Universe* U; // the universe this linda belongs to + ptrdiff_t group; // a group to control keeper allocation between lindas + enum e_cancel_request simulate_cancel; + char name[1]; +}; +#define LINDA_KEEPER_HASHSEED( linda) (linda->group ? linda->group : (ptrdiff_t)linda) + +static void* linda_id( lua_State*, DeepOp); + +static inline struct s_Linda* lua_toLinda( lua_State* L, int idx_) +{ + struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_); + luaL_argcheck( L, linda != NULL, idx_, "expecting a linda object"); + return linda; +} + +static void check_key_types( lua_State* L, int start_, int end_) +{ + int i; + for( i = start_; i <= end_; ++ i) + { + int t = lua_type( L, i); + if( t == LUA_TBOOLEAN || t == LUA_TNUMBER || t == LUA_TSTRING || t == LUA_TLIGHTUSERDATA) + { + continue; + } + (void) luaL_error( L, "argument #%d: invalid key type (not a boolean, string, number or light userdata)", i); + } +} + +LUAG_FUNC( linda_protected_call) +{ + int rc = LUA_OK; + struct s_Linda* linda = lua_toLinda( L, 1); + + // acquire the keeper + Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED(linda)); + lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK' + if( KL == NULL) return 0; + + // retrieve the actual function to be called and move it before the arguments + lua_pushvalue( L, lua_upvalueindex( 1)); + lua_insert( L, 1); + // do a protected call + rc = lua_pcall( L, lua_gettop( L) - 1, LUA_MULTRET, 0); + + // release the keeper + keeper_release( K); + + // if there was an error, forward it + if( rc != LUA_OK) + { + return lua_error( L); + } + // return whatever the actual operation provided + return lua_gettop( L); +} + +/* +* bool= linda_send( linda_ud, [timeout_secs=-1,] [linda.null,] key_num|str|bool|lightuserdata, ... ) +* +* Send one or more values to a Linda. If there is a limit, all values must fit. +* +* Returns: 'true' if the value was queued +* 'false' for timeout (only happens when the queue size is limited) +* nil, CANCEL_ERROR if cancelled +*/ +LUAG_FUNC( linda_send) +{ + struct s_Linda* linda = lua_toLinda( L, 1); + bool_t ret = FALSE; + enum e_cancel_request cancel = CANCEL_NONE; + int pushed; + time_d timeout = -1.0; + uint_t key_i = 2; // index of first key, if timeout not there + bool_t as_nil_sentinel; // if not NULL, send() will silently send a single nil if nothing is provided + + if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion + { + timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2)); + ++ key_i; + } + else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key + { + ++ key_i; + } + + as_nil_sentinel = equal_unique_key( L, key_i, NIL_SENTINEL); + if( as_nil_sentinel) + { + // the real key to send data to is after the NIL_SENTINEL marker + ++ key_i; + } + + // make sure the key is of a valid type + check_key_types( L, key_i, key_i); + + STACK_GROW( L, 1); + + // make sure there is something to send + if( (uint_t)lua_gettop( L) == key_i) + { + if( as_nil_sentinel) + { + // send a single nil if nothing is provided + push_unique_key( L, NIL_SENTINEL); + } + else + { + return luaL_error( L, "no data to send"); + } + } + + // convert nils to some special non-nil sentinel in sent values + keeper_toggle_nil_sentinels( L, key_i + 1, eLM_ToKeeper); + + { + bool_t try_again = TRUE; + Lane* const s = get_lane_from_registry( L); + Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK' + if( KL == NULL) return 0; + STACK_CHECK( KL, 0); + for( ;;) + { + if( s != NULL) + { + cancel = s->cancel_request; + } + cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel; + // if user wants to cancel, or looped because of a timeout, the call returns without sending anything + if( !try_again || cancel != CANCEL_NONE) + { + pushed = 0; + break; + } + + STACK_MID( KL, 0); + pushed = keeper_call( linda->U, KL, KEEPER_API( send), L, linda, key_i); + if( pushed < 0) + { + break; + } + ASSERT_L( pushed == 1); + + ret = lua_toboolean( L, -1); + lua_pop( L, 1); + + if( ret) + { + // Wake up ALL waiting threads + SIGNAL_ALL( &linda->write_happened); + break; + } + + // instant timout to bypass the wait syscall + if( timeout == 0.0) + { + break; /* no wait; instant timeout */ + } + + // storage limit hit, wait until timeout or signalled that we should try again + { + enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings + if( s != NULL) + { + // change status of lane to "waiting" + prev_status = s->status; // RUNNING, most likely + ASSERT_L( prev_status == RUNNING); // but check, just in case + s->status = WAITING; + ASSERT_L( s->waiting_on == NULL); + s->waiting_on = &linda->read_happened; + } + // could not send because no room: wait until some data was read before trying again, or until timeout is reached + try_again = SIGNAL_WAIT( &linda->read_happened, &K->keeper_cs, timeout); + if( s != NULL) + { + s->waiting_on = NULL; + s->status = prev_status; + } + } + } + STACK_END( KL, 0); + } + + if( pushed < 0) + { + return luaL_error( L, "tried to copy unsupported types"); + } + + switch( cancel) + { + case CANCEL_SOFT: + // if user wants to soft-cancel, the call returns lanes.cancel_error + push_unique_key( L, CANCEL_ERROR); + return 1; + + case CANCEL_HARD: + // raise an error interrupting execution only in case of hard cancel + return cancel_error( L); // raises an error and doesn't return + + default: + lua_pushboolean( L, ret); // true (success) or false (timeout) + return 1; + } +} + + +/* + * 2 modes of operation + * [val, key]= linda_receive( linda_ud, [timeout_secs_num=-1], key_num|str|bool|lightuserdata [, ...] ) + * Consumes a single value from the Linda, in any key. + * Returns: received value (which is consumed from the slot), and the key which had it + + * [val1, ... valCOUNT]= linda_receive( linda_ud, [timeout_secs_num=-1], linda.batched, key_num|str|bool|lightuserdata, min_COUNT[, max_COUNT]) + * Consumes between min_COUNT and max_COUNT values from the linda, from a single key. + * returns the actual consumed values, or nil if there weren't enough values to consume + * + */ +#define BATCH_SENTINEL "270e6c9d-280f-4983-8fee-a7ecdda01475" +LUAG_FUNC( linda_receive) +{ + struct s_Linda* linda = lua_toLinda( L, 1); + int pushed, expected_pushed_min, expected_pushed_max; + enum e_cancel_request cancel = CANCEL_NONE; + keeper_api_t keeper_receive; + + time_d timeout = -1.0; + uint_t key_i = 2; + + if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion + { + timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2)); + ++ key_i; + } + else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key + { + ++ key_i; + } + + // are we in batched mode? + { + int is_batched; + lua_pushliteral( L, BATCH_SENTINEL); + is_batched = lua501_equal( L, key_i, -1); + lua_pop( L, 1); + if( is_batched) + { + // no need to pass linda.batched in the keeper state + ++ key_i; + // make sure the keys are of a valid type + check_key_types( L, key_i, key_i); + // receive multiple values from a single slot + keeper_receive = KEEPER_API( receive_batched); + // we expect a user-defined amount of return value + expected_pushed_min = (int)luaL_checkinteger( L, key_i + 1); + expected_pushed_max = (int)luaL_optinteger( L, key_i + 2, expected_pushed_min); + // don't forget to count the key in addition to the values + ++ expected_pushed_min; + ++ expected_pushed_max; + if( expected_pushed_min > expected_pushed_max) + { + return luaL_error( L, "batched min/max error"); + } + } + else + { + // make sure the keys are of a valid type + check_key_types( L, key_i, lua_gettop( L)); + // receive a single value, checking multiple slots + keeper_receive = KEEPER_API( receive); + // we expect a single (value, key) pair of returned values + expected_pushed_min = expected_pushed_max = 2; + } + } + + { + bool_t try_again = TRUE; + Lane* const s = get_lane_from_registry( L); + Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + if( K == NULL) return 0; + for( ;;) + { + if( s != NULL) + { + cancel = s->cancel_request; + } + cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel; + // if user wants to cancel, or looped because of a timeout, the call returns without sending anything + if( !try_again || cancel != CANCEL_NONE) + { + pushed = 0; + break; + } + + // all arguments of receive() but the first are passed to the keeper's receive function + pushed = keeper_call( linda->U, K->L, keeper_receive, L, linda, key_i); + if( pushed < 0) + { + break; + } + if( pushed > 0) + { + ASSERT_L( pushed >= expected_pushed_min && pushed <= expected_pushed_max); + // replace sentinels with real nils + keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper); + // To be done from within the 'K' locking area + // + SIGNAL_ALL( &linda->read_happened); + break; + } + + if( timeout == 0.0) + { + break; /* instant timeout */ + } + + // nothing received, wait until timeout or signalled that we should try again + { + enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings + if( s != NULL) + { + // change status of lane to "waiting" + prev_status = s->status; // RUNNING, most likely + ASSERT_L( prev_status == RUNNING); // but check, just in case + s->status = WAITING; + ASSERT_L( s->waiting_on == NULL); + s->waiting_on = &linda->write_happened; + } + // not enough data to read: wakeup when data was sent, or when timeout is reached + try_again = SIGNAL_WAIT( &linda->write_happened, &K->keeper_cs, timeout); + if( s != NULL) + { + s->waiting_on = NULL; + s->status = prev_status; + } + } + } + } + + if( pushed < 0) + { + return luaL_error( L, "tried to copy unsupported types"); + } + + switch( cancel) + { + case CANCEL_SOFT: + // if user wants to soft-cancel, the call returns CANCEL_ERROR + push_unique_key( L, CANCEL_ERROR); + return 1; + + case CANCEL_HARD: + // raise an error interrupting execution only in case of hard cancel + return cancel_error( L); // raises an error and doesn't return + + default: + return pushed; + } +} + + +/* +* [true|lanes.cancel_error] = linda_set( linda_ud, key_num|str|bool|lightuserdata [, value [, ...]]) +* +* Set one or more value to Linda. +* TODO: what do we do if we set to non-nil and limit is 0? +* +* Existing slot value is replaced, and possible queued entries removed. +*/ +LUAG_FUNC( linda_set) +{ + struct s_Linda* const linda = lua_toLinda( L, 1); + int pushed; + bool_t has_value = lua_gettop( L) > 2; + + // make sure the key is of a valid type (throws an error if not the case) + check_key_types( L, 2, 2); + + { + Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + + if( linda->simulate_cancel == CANCEL_NONE) + { + if( has_value) + { + // convert nils to some special non-nil sentinel in sent values + keeper_toggle_nil_sentinels( L, 3, eLM_ToKeeper); + } + pushed = keeper_call( linda->U, K->L, KEEPER_API( set), L, linda, 2); + if( pushed >= 0) // no error? + { + ASSERT_L( pushed == 0 || pushed == 1); + + if( has_value) + { + // we put some data in the slot, tell readers that they should wake + SIGNAL_ALL( &linda->write_happened); // To be done from within the 'K' locking area + } + if( pushed == 1) + { + // the key was full, but it is no longer the case, tell writers they should wake + ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1); + SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area + } + } + } + else // linda is cancelled + { + // do nothing and return lanes.cancel_error + push_unique_key( L, CANCEL_ERROR); + pushed = 1; + } + } + + // must trigger any error after keeper state has been released + return (pushed < 0) ? luaL_error( L, "tried to copy unsupported types") : pushed; +} + + +/* + * [val] = linda_count( linda_ud, [key [, ...]]) + * + * Get a count of the pending elements in the specified keys + */ +LUAG_FUNC( linda_count) +{ + struct s_Linda* linda = lua_toLinda( L, 1); + int pushed; + + // make sure the keys are of a valid type + check_key_types( L, 2, lua_gettop( L)); + + { + Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + pushed = keeper_call( linda->U, K->L, KEEPER_API( count), L, linda, 2); + if( pushed < 0) + { + return luaL_error( L, "tried to count an invalid key"); + } + } + return pushed; +} + + +/* +* [val [, ...]] = linda_get( linda_ud, key_num|str|bool|lightuserdata [, count = 1]) +* +* Get one or more values from Linda. +*/ +LUAG_FUNC( linda_get) +{ + struct s_Linda* const linda = lua_toLinda( L, 1); + int pushed; + lua_Integer count = luaL_optinteger( L, 3, 1); + luaL_argcheck( L, count >= 1, 3, "count should be >= 1"); + luaL_argcheck( L, lua_gettop( L) <= 3, 4, "too many arguments"); + + // make sure the key is of a valid type (throws an error if not the case) + check_key_types( L, 2, 2); + { + Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + + if( linda->simulate_cancel == CANCEL_NONE) + { + pushed = keeper_call( linda->U, K->L, KEEPER_API( get), L, linda, 2); + if( pushed > 0) + { + keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper); + } + } + else // linda is cancelled + { + // do nothing and return lanes.cancel_error + push_unique_key( L, CANCEL_ERROR); + pushed = 1; + } + // an error can be raised if we attempt to read an unregistered function + if( pushed < 0) + { + return luaL_error( L, "tried to copy unsupported types"); + } + } + + return pushed; +} + + +/* +* [true] = linda_limit( linda_ud, key_num|str|bool|lightuserdata, int) +* +* Set limit to 1 Linda keys. +* Optionally wake threads waiting to write on the linda, in case the limit enables them to do so +*/ +LUAG_FUNC( linda_limit) +{ + struct s_Linda* linda = lua_toLinda( L, 1); + int pushed; + + // make sure we got 3 arguments: the linda, a key and a limit + luaL_argcheck( L, lua_gettop( L) == 3, 2, "wrong number of arguments"); + // make sure we got a numeric limit + luaL_checknumber( L, 3); + // make sure the key is of a valid type + check_key_types( L, 2, 2); + + { + Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + + if( linda->simulate_cancel == CANCEL_NONE) + { + pushed = keeper_call( linda->U, K->L, KEEPER_API( limit), L, linda, 2); + ASSERT_L( pushed == 0 || pushed == 1); // no error, optional boolean value saying if we should wake blocked writer threads + if( pushed == 1) + { + ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1); + SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area + } + } + else // linda is cancelled + { + // do nothing and return lanes.cancel_error + push_unique_key( L, CANCEL_ERROR); + pushed = 1; + } + } + // propagate pushed boolean if any + return pushed; +} + + +/* +* (void) = linda_cancel( linda_ud, "read"|"write"|"both"|"none") +* +* Signal linda so that waiting threads wake up as if their own lane was cancelled +*/ +LUAG_FUNC( linda_cancel) +{ + struct s_Linda* linda = lua_toLinda( L, 1); + char const* who = luaL_optstring( L, 2, "both"); + + // make sure we got 3 arguments: the linda, a key and a limit + luaL_argcheck( L, lua_gettop( L) <= 2, 2, "wrong number of arguments"); + + linda->simulate_cancel = CANCEL_SOFT; + if( strcmp( who, "both") == 0) // tell everyone writers to wake up + { + SIGNAL_ALL( &linda->write_happened); + SIGNAL_ALL( &linda->read_happened); + } + else if( strcmp( who, "none") == 0) // reset flag + { + linda->simulate_cancel = CANCEL_NONE; + } + else if( strcmp( who, "read") == 0) // tell blocked readers to wake up + { + SIGNAL_ALL( &linda->write_happened); + } + else if( strcmp( who, "write") == 0) // tell blocked writers to wake up + { + SIGNAL_ALL( &linda->read_happened); + } + else + { + return luaL_error( L, "unknown wake hint '%s'", who); + } + return 0; +} + + +/* +* lightuserdata= linda_deep( linda_ud ) +* +* Return the 'deep' userdata pointer, identifying the Linda. +* +* This is needed for using Lindas as key indices (timer system needs it); +* separately created proxies of the same underlying deep object will have +* different userdata and won't be known to be essentially the same deep one +* without this. +*/ +LUAG_FUNC( linda_deep) +{ + struct s_Linda* linda= lua_toLinda( L, 1); + lua_pushlightuserdata( L, linda); // just the address + return 1; +} + + +/* +* string = linda:__tostring( linda_ud) +* +* Return the stringification of a linda +* +* Useful for concatenation or debugging purposes +*/ + +static int linda_tostring( lua_State* L, int idx_, bool_t opt_) +{ + struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_); + if( !opt_) + { + luaL_argcheck( L, linda, idx_, "expecting a linda object"); + } + if( linda != NULL) + { + char text[128]; + int len; + if( linda->name[0]) + len = sprintf( text, "Linda: %.*s", (int)sizeof(text) - 8, linda->name); + else + len = sprintf( text, "Linda: %p", linda); + lua_pushlstring( L, text, len); + return 1; + } + return 0; +} + +LUAG_FUNC( linda_tostring) +{ + return linda_tostring( L, 1, FALSE); +} + + +/* +* string = linda:__concat( a, b) +* +* Return the concatenation of a pair of items, one of them being a linda +* +* Useful for concatenation or debugging purposes +*/ +LUAG_FUNC( linda_concat) +{ // linda1? linda2? + bool_t atLeastOneLinda = FALSE; + // Lua semantics enforce that one of the 2 arguments is a Linda, but not necessarily both. + if( linda_tostring( L, 1, TRUE)) + { + atLeastOneLinda = TRUE; + lua_replace( L, 1); + } + if( linda_tostring( L, 2, TRUE)) + { + atLeastOneLinda = TRUE; + lua_replace( L, 2); + } + if( !atLeastOneLinda) // should not be possible + { + return luaL_error( L, "internal error: linda_concat called on non-Linda"); + } + lua_concat( L, 2); + return 1; +} + +/* + * table = linda:dump() + * return a table listing all pending data inside the linda + */ +LUAG_FUNC( linda_dump) +{ + struct s_Linda* linda = lua_toLinda( L, 1); + ASSERT_L( linda->U == universe_get( L)); + return keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda)); +} + +/* + * table = linda:dump() + * return a table listing all pending data inside the linda + */ +LUAG_FUNC( linda_towatch) +{ + struct s_Linda* linda = lua_toLinda( L, 1); + int pushed; + ASSERT_L( linda->U == universe_get( L)); + pushed = keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda)); + if( pushed == 0) + { + // if the linda is empty, don't return nil + pushed = linda_tostring( L, 1, FALSE); + } + return pushed; +} + +/* +* Identity function of a shared userdata object. +* +* lightuserdata= linda_id( "new" [, ...] ) +* = linda_id( "delete", lightuserdata ) +* +* Creation and cleanup of actual 'deep' objects. 'luaG_...' will wrap them into +* regular userdata proxies, per each state using the deep data. +* +* tbl= linda_id( "metatable" ) +* +* Returns a metatable for the proxy objects ('__gc' method not needed; will +* be added by 'luaG_...') +* +* string= linda_id( "module") +* +* Returns the name of the module that a state should require +* in order to keep a handle on the shared library that exported the idfunc +* +* = linda_id( str, ... ) +* +* For any other strings, the ID function must not react at all. This allows +* future extensions of the system. +*/ +static void* linda_id( lua_State* L, DeepOp op_) +{ + switch( op_) + { + case eDO_new: + { + struct s_Linda* s; + size_t name_len = 0; + char const* linda_name = NULL; + unsigned long linda_group = 0; + // should have a string and/or a number of the stack as parameters (name and group) + switch( lua_gettop( L)) + { + default: // 0 + break; + + case 1: // 1 parameter, either a name or a group + if( lua_type( L, -1) == LUA_TSTRING) + { + linda_name = lua_tolstring( L, -1, &name_len); + } + else + { + linda_group = (unsigned long) lua_tointeger( L, -1); + } + break; + + case 2: // 2 parameters, a name and group, in that order + linda_name = lua_tolstring( L, -2, &name_len); + linda_group = (unsigned long) lua_tointeger( L, -1); + break; + } + + /* The deep data is allocated separately of Lua stack; we might no + * longer be around when last reference to it is being released. + * One can use any memory allocation scheme. + * just don't use L's allocF because we don't know which state will get the honor of GCing the linda + */ + { + Universe* const U = universe_get(L); + AllocatorDefinition* const allocD = &U->internal_allocator; + s = (struct s_Linda*) allocD->allocF(allocD->allocUD, NULL, 0, sizeof(struct s_Linda) + name_len); // terminating 0 is already included + } + if( s) + { + s->prelude.magic.value = DEEP_VERSION.value; + SIGNAL_INIT( &s->read_happened); + SIGNAL_INIT( &s->write_happened); + s->U = universe_get( L); + s->simulate_cancel = CANCEL_NONE; + s->group = linda_group << KEEPER_MAGIC_SHIFT; + s->name[0] = 0; + memcpy( s->name, linda_name, name_len ? name_len + 1 : 0); + } + return s; + } + + case eDO_delete: + { + Keeper* K; + struct s_Linda* linda = (struct s_Linda*) lua_touserdata( L, 1); + ASSERT_L( linda); + + // Clean associated structures in the keeper state. + K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + if( K && K->L) // can be NULL if this happens during main state shutdown (lanes is GC'ed -> no keepers -> no need to cleanup) + { + // hopefully this won't ever raise an error as we would jump to the closest pcall site while forgetting to release the keeper mutex... + keeper_call( linda->U, K->L, KEEPER_API( clear), L, linda, 0); + } + keeper_release( K); + + // There aren't any lanes waiting on these lindas, since all proxies have been gc'ed. Right? + SIGNAL_FREE( &linda->read_happened); + SIGNAL_FREE( &linda->write_happened); + { + Universe* const U = universe_get(L); + AllocatorDefinition* const allocD = &U->internal_allocator; + (void) allocD->allocF(allocD->allocUD, linda, sizeof(struct s_Linda) + strlen(linda->name), 0); + } + return NULL; + } + + case eDO_metatable: + { + + STACK_CHECK( L, 0); + lua_newtable( L); + // metatable is its own index + lua_pushvalue( L, -1); + lua_setfield( L, -2, "__index"); + + // protect metatable from external access + lua_pushliteral( L, "Linda"); + lua_setfield( L, -2, "__metatable"); + + lua_pushcfunction( L, LG_linda_tostring); + lua_setfield( L, -2, "__tostring"); + + // Decoda __towatch support + lua_pushcfunction( L, LG_linda_towatch); + lua_setfield( L, -2, "__towatch"); + + lua_pushcfunction( L, LG_linda_concat); + lua_setfield( L, -2, "__concat"); + + // protected calls, to ensure associated keeper is always released even in case of error + // all function are the protected call wrapper, where the actual operation is provided as upvalue + // note that this kind of thing can break function lookup as we use the function pointer here and there + + lua_pushcfunction( L, LG_linda_send); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "send"); + + lua_pushcfunction( L, LG_linda_receive); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "receive"); + + lua_pushcfunction( L, LG_linda_limit); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "limit"); + + lua_pushcfunction( L, LG_linda_set); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "set"); + + lua_pushcfunction( L, LG_linda_count); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "count"); + + lua_pushcfunction( L, LG_linda_get); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "get"); + + lua_pushcfunction( L, LG_linda_cancel); + lua_setfield( L, -2, "cancel"); + + lua_pushcfunction( L, LG_linda_deep); + lua_setfield( L, -2, "deep"); + + lua_pushcfunction( L, LG_linda_dump); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "dump"); + + // some constants + lua_pushliteral( L, BATCH_SENTINEL); + lua_setfield( L, -2, "batched"); + + push_unique_key( L, NIL_SENTINEL); + lua_setfield( L, -2, "null"); + + STACK_END( L, 1); + return NULL; + } + + case eDO_module: + // linda is a special case because we know lanes must be loaded from the main lua state + // to be able to ever get here, so we know it will remain loaded as long a the main state is around + // in other words, forever. + default: + { + return NULL; + } + } +} + +/* + * ud = lanes.linda( [name[,group]]) + * + * returns a linda object, or raises an error if creation failed + */ +LUAG_FUNC( linda) +{ + int const top = lua_gettop( L); + luaL_argcheck( L, top <= 2, top, "too many arguments"); + if( top == 1) + { + int const t = lua_type( L, 1); + luaL_argcheck( L, t == LUA_TSTRING || t == LUA_TNUMBER, 1, "wrong parameter (should be a string or a number)"); + } + else if( top == 2) + { + luaL_checktype( L, 1, LUA_TSTRING); + luaL_checktype( L, 2, LUA_TNUMBER); + } + return luaG_newdeepuserdata( L, linda_id, 0); +} diff --git a/src/state.c b/src/state.c deleted file mode 100644 index 85ad31e..0000000 --- a/src/state.c +++ /dev/null @@ -1,442 +0,0 @@ -/* -* STATE.C -* -* Lua tools to support Lanes. -*/ - -/* -=============================================================================== - -Copyright (C) 2002-10 Asko Kauppi -2011-21 benoit Germain - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -=============================================================================== -*/ - -#include -#include -#include -#include -#include -#if !defined(__APPLE__) -#include -#endif // __APPLE__ - -#include "compat.h" -#include "macros_and_utils.h" -#include "universe.h" -#include "tools.h" -#include "lanes.h" - -// ################################################################################################ - -/*---=== Serialize require ===--- -*/ - -//--- -// [val,...]= new_require( ... ) -// -// Call 'old_require' but only one lane at a time. -// -// Upvalues: [1]: original 'require' function -// -static int luaG_new_require( lua_State* L) -{ - int rc; - int const args = lua_gettop( L); // args - Universe* U = universe_get( L); - //char const* modname = luaL_checkstring( L, 1); - - STACK_GROW( L, 1); - - lua_pushvalue( L, lua_upvalueindex( 1)); // args require - lua_insert( L, 1); // require args - - // Using 'lua_pcall()' to catch errors; otherwise a failing 'require' would - // leave us locked, blocking any future 'require' calls from other lanes. - - MUTEX_LOCK( &U->require_cs); - // starting with Lua 5.4, require may return a second optional value, so we need LUA_MULTRET - rc = lua_pcall( L, args, LUA_MULTRET, 0 /*errfunc*/ ); // err|result(s) - MUTEX_UNLOCK( &U->require_cs); - - // the required module (or an error message) is left on the stack as returned value by original require function - - if( rc != LUA_OK) // LUA_ERRRUN / LUA_ERRMEM ? - { - return lua_error( L); - } - // should be 1 for Lua <= 5.3, 1 or 2 starting with Lua 5.4 - return lua_gettop(L); // result(s) -} - -/* -* Serialize calls to 'require', if it exists -*/ -void serialize_require( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L) -{ - STACK_GROW( L, 1); - STACK_CHECK( L, 0); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "serializing require()\n" INDENT_END)); - - // Check 'require' is there and not already wrapped; if not, do nothing - // - lua_getglobal( L, "require"); - if( lua_isfunction( L, -1) && lua_tocfunction( L, -1) != luaG_new_require) - { - // [-1]: original 'require' function - lua_pushcclosure( L, luaG_new_require, 1 /*upvalues*/); - lua_setglobal( L, "require"); - } - else - { - // [-1]: nil - lua_pop( L, 1); - } - - STACK_END( L, 0); -} - -// ################################################################################################ - -/*---=== luaG_newstate ===---*/ - -static int require_lanes_core( lua_State* L) -{ - // leaves a copy of 'lanes.core' module table on the stack - luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); - return 1; -} - - -static const luaL_Reg libs[] = -{ - { LUA_LOADLIBNAME, luaopen_package}, - { LUA_TABLIBNAME, luaopen_table}, - { LUA_STRLIBNAME, luaopen_string}, - { LUA_MATHLIBNAME, luaopen_math}, -#ifndef PLATFORM_XBOX // no os/io libs on xbox - { LUA_OSLIBNAME, luaopen_os}, - { LUA_IOLIBNAME, luaopen_io}, -#endif // PLATFORM_XBOX -#if LUA_VERSION_NUM >= 503 - { LUA_UTF8LIBNAME, luaopen_utf8}, -#endif -#if LUA_VERSION_NUM >= 502 -#ifdef luaopen_bit32 - { LUA_BITLIBNAME, luaopen_bit32}, -#endif - { LUA_COLIBNAME, luaopen_coroutine}, // Lua 5.2: coroutine is no longer a part of base! -#else // LUA_VERSION_NUM - { LUA_COLIBNAME, NULL}, // Lua 5.1: part of base package -#endif // LUA_VERSION_NUM - { LUA_DBLIBNAME, luaopen_debug}, -#if LUAJIT_FLAVOR() != 0 // building against LuaJIT headers, add some LuaJIT-specific libs -//#pragma message( "supporting JIT base libs") - { LUA_BITLIBNAME, luaopen_bit}, - { LUA_JITLIBNAME, luaopen_jit}, - { LUA_FFILIBNAME, luaopen_ffi}, -#endif // LUAJIT_FLAVOR() - -{ LUA_DBLIBNAME, luaopen_debug}, -{ "lanes.core", require_lanes_core}, // So that we can open it like any base library (possible since we have access to the init function) - // -{ "base", NULL}, // ignore "base" (already acquired it) -{ NULL, NULL } -}; - -static void open1lib( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, char const* name_, size_t len_) -{ - int i; - for( i = 0; libs[i].name; ++ i) - { - if( strncmp( name_, libs[i].name, len_) == 0) - { - lua_CFunction libfunc = libs[i].func; - name_ = libs[i].name; // note that the provided name_ doesn't necessarily ends with '\0', hence len_ - if( libfunc != NULL) - { - bool_t const isLanesCore = (libfunc == require_lanes_core) ? TRUE : FALSE; // don't want to create a global for "lanes.core" - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening %.*s library\n" INDENT_END, (int) len_, name_)); - STACK_CHECK( L, 0); - // open the library as if through require(), and create a global as well if necessary (the library table is left on the stack) - luaL_requiref( L, name_, libfunc, !isLanesCore); - // lanes.core doesn't declare a global, so scan it here and now - if( isLanesCore == TRUE) - { - populate_func_lookup_table( L, -1, name_); - } - lua_pop( L, 1); - STACK_END( L, 0); - } - break; - } - } -} - - -// just like lua_xmove, args are (from, to) -static void copy_one_time_settings( Universe* U, lua_State* L, lua_State* L2) -{ - STACK_GROW( L, 2); - STACK_CHECK( L, 0); - STACK_CHECK( L2, 0); - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "copy_one_time_settings()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - - REGISTRY_GET( L, CONFIG_REGKEY); // config - // copy settings from from source to destination registry - if( luaG_inter_move( U, L, L2, 1, eLM_LaneBody) < 0) // // config - { - (void) luaL_error( L, "failed to copy settings when loading lanes.core"); - } - // set L2:_R[CONFIG_REGKEY] = settings - REGISTRY_SET( L2, CONFIG_REGKEY, lua_insert( L2, -2)); // - STACK_END( L2, 0); - STACK_END( L, 0); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); -} - -void initialize_on_state_create( Universe* U, lua_State* L) -{ - STACK_CHECK( L, 0); - lua_getfield( L, -1, "on_state_create"); // settings on_state_create|nil - if( !lua_isnil( L, -1)) - { - // store C function pointer in an internal variable - U->on_state_create_func = lua_tocfunction( L, -1); // settings on_state_create - if( U->on_state_create_func != NULL) - { - // make sure the function doesn't have upvalues - char const* upname = lua_getupvalue( L, -1, 1); // settings on_state_create upval? - if( upname != NULL) // should be "" for C functions with upvalues if any - { - (void) luaL_error( L, "on_state_create shouldn't have upvalues"); - } - // remove this C function from the config table so that it doesn't cause problems - // when we transfer the config table in newly created Lua states - lua_pushnil( L); // settings on_state_create nil - lua_setfield( L, -3, "on_state_create"); // settings on_state_create - } - else - { - // optim: store marker saying we have such a function in the config table - U->on_state_create_func = (lua_CFunction) initialize_on_state_create; - } - } - lua_pop( L, 1); // settings - STACK_END( L, 0); -} - -lua_State* create_state( Universe* U, lua_State* from_) -{ - lua_State* L; -#if LUAJIT_FLAVOR() == 64 - // for some reason, LuaJIT 64 bits does not support creating a state with lua_newstate... - L = luaL_newstate(); -#else // LUAJIT_FLAVOR() == 64 - if( U->provide_allocator != NULL) // we have a function we can call to obtain an allocator - { - lua_pushcclosure( from_, U->provide_allocator, 0); - lua_call( from_, 0, 1); - { - AllocatorDefinition* const def = (AllocatorDefinition*) lua_touserdata( from_, -1); - L = lua_newstate( def->allocF, def->allocUD); - } - lua_pop( from_, 1); - } - else - { - // reuse the allocator provided when the master state was created - L = lua_newstate( U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD); - } -#endif // LUAJIT_FLAVOR() == 64 - - if( L == NULL) - { - (void) luaL_error( from_, "luaG_newstate() failed while creating state; out of memory"); - } - return L; -} - -void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMode mode_) -{ - if( U->on_state_create_func != NULL) - { - STACK_CHECK( L, 0); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "calling on_state_create()\n" INDENT_END)); - if( U->on_state_create_func != (lua_CFunction) initialize_on_state_create) - { - // C function: recreate a closure in the new state, bypassing the lookup scheme - lua_pushcfunction( L, U->on_state_create_func); // on_state_create() - } - else // Lua function located in the config table, copied when we opened "lanes.core" - { - if( mode_ != eLM_LaneBody) - { - // if attempting to call in a keeper state, do nothing because the function doesn't exist there - // this doesn't count as an error though - return; - } - REGISTRY_GET( L, CONFIG_REGKEY); // {} - STACK_MID( L, 1); - lua_getfield( L, -1, "on_state_create"); // {} on_state_create() - lua_remove( L, -2); // on_state_create() - } - STACK_MID( L, 1); - // capture error and raise it in caller state - if( lua_pcall( L, 0, 0, 0) != LUA_OK) - { - luaL_error( from_, "on_state_create failed: \"%s\"", lua_isstring( L, -1) ? lua_tostring( L, -1) : lua_typename( L, lua_type( L, -1))); - } - STACK_END( L, 0); - } -} - -/* -* Like 'luaL_openlibs()' but allows the set of libraries be selected -* -* NULL no libraries, not even base -* "" base library only -* "io,string" named libraries -* "*" all libraries -* -* Base ("unpack", "print" etc.) is always added, unless 'libs' is NULL. -* -* *NOT* called for keeper states! -* -*/ -lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_) -{ - lua_State* L = create_state( U, from_); - - STACK_GROW( L, 2); - STACK_CHECK_ABS( L, 0); - - // copy the universe as a light userdata (only the master state holds the full userdata) - // that way, if Lanes is required in this new state, we'll know we are part of this universe - universe_store( L, U); - STACK_MID( L, 0); - - // we'll need this every time we transfer some C function from/to this state - REGISTRY_SET( L, LOOKUP_REGKEY, lua_newtable( L)); - STACK_MID( L, 0); - - // neither libs (not even 'base') nor special init func: we are done - if( libs_ == NULL && U->on_state_create_func == NULL) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate(NULL)\n" INDENT_END)); - return L; - } - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - - // copy settings (for example because it may contain a Lua on_state_create function) - copy_one_time_settings( U, from_, L); - - // 'lua.c' stops GC during initialization so perhaps its a good idea. :) - lua_gc( L, LUA_GCSTOP, 0); - - - // Anything causes 'base' to be taken in - // - if( libs_ != NULL) - { - // special "*" case (mainly to help with LuaJIT compatibility) - // as we are called from luaopen_lanes_core() already, and that would deadlock - if( libs_[0] == '*' && libs_[1] == 0) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening ALL standard libraries\n" INDENT_END)); - luaL_openlibs( L); - // don't forget lanes.core for regular lane states - open1lib( DEBUGSPEW_PARAM_COMMA( U) L, "lanes.core", 10); - libs_ = NULL; // done with libs - } - else - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening base library\n" INDENT_END)); -#if LUA_VERSION_NUM >= 502 - // open base library the same way as in luaL_openlibs() - luaL_requiref( L, "_G", luaopen_base, 1); - lua_pop( L, 1); -#else // LUA_VERSION_NUM - lua_pushcfunction( L, luaopen_base); - lua_pushstring( L, ""); - lua_call( L, 1, 0); -#endif // LUA_VERSION_NUM - } - } - STACK_END( L, 0); - - // scan all libraries, open them one by one - if( libs_) - { - char const* p; - unsigned int len = 0; - for( p = libs_; *p; p += len) - { - // skip delimiters ('.' can be part of name for "lanes.core") - while( *p && !isalnum( *p) && *p != '.') - ++ p; - // skip name - len = 0; - while( isalnum( p[len]) || p[len] == '.') - ++ len; - // open library - open1lib( DEBUGSPEW_PARAM_COMMA( U) L, p, len); - } - } - lua_gc( L, LUA_GCRESTART, 0); - - serialize_require( DEBUGSPEW_PARAM_COMMA( U) L); - - // call this after the base libraries are loaded and GC is restarted - // will raise an error in from_ in case of problem - call_on_state_create( U, L, from_, eLM_LaneBody); - - STACK_CHECK( L, 0); - // after all this, register everything we find in our name<->function database - lua_pushglobaltable( L); // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack - populate_func_lookup_table( L, -1, NULL); - -#if 0 && USE_DEBUG_SPEW() - // dump the lookup database contents - lua_getfield( L, LUA_REGISTRYINDEX, LOOKUP_REGKEY); // {} - lua_pushnil( L); // {} nil - while( lua_next( L, -2)) // {} k v - { - lua_getglobal( L, "print"); // {} k v print - lua_pushlstring( L, debugspew_indent, U->debugspew_indent_depth); // {} k v print " " - lua_pushvalue( L, -4); // {} k v print " " k - lua_pushvalue( L, -4); // {} k v print " " k v - lua_call( L, 3, 0); // {} k v - lua_pop( L, 1); // {} k - } - lua_pop( L, 1); // {} -#endif // USE_DEBUG_SPEW() - - lua_pop( L, 1); - STACK_END( L, 0); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - return L; -} diff --git a/src/state.cpp b/src/state.cpp new file mode 100644 index 0000000..85ad31e --- /dev/null +++ b/src/state.cpp @@ -0,0 +1,442 @@ +/* +* STATE.C +* +* Lua tools to support Lanes. +*/ + +/* +=============================================================================== + +Copyright (C) 2002-10 Asko Kauppi +2011-21 benoit Germain + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== +*/ + +#include +#include +#include +#include +#include +#if !defined(__APPLE__) +#include +#endif // __APPLE__ + +#include "compat.h" +#include "macros_and_utils.h" +#include "universe.h" +#include "tools.h" +#include "lanes.h" + +// ################################################################################################ + +/*---=== Serialize require ===--- +*/ + +//--- +// [val,...]= new_require( ... ) +// +// Call 'old_require' but only one lane at a time. +// +// Upvalues: [1]: original 'require' function +// +static int luaG_new_require( lua_State* L) +{ + int rc; + int const args = lua_gettop( L); // args + Universe* U = universe_get( L); + //char const* modname = luaL_checkstring( L, 1); + + STACK_GROW( L, 1); + + lua_pushvalue( L, lua_upvalueindex( 1)); // args require + lua_insert( L, 1); // require args + + // Using 'lua_pcall()' to catch errors; otherwise a failing 'require' would + // leave us locked, blocking any future 'require' calls from other lanes. + + MUTEX_LOCK( &U->require_cs); + // starting with Lua 5.4, require may return a second optional value, so we need LUA_MULTRET + rc = lua_pcall( L, args, LUA_MULTRET, 0 /*errfunc*/ ); // err|result(s) + MUTEX_UNLOCK( &U->require_cs); + + // the required module (or an error message) is left on the stack as returned value by original require function + + if( rc != LUA_OK) // LUA_ERRRUN / LUA_ERRMEM ? + { + return lua_error( L); + } + // should be 1 for Lua <= 5.3, 1 or 2 starting with Lua 5.4 + return lua_gettop(L); // result(s) +} + +/* +* Serialize calls to 'require', if it exists +*/ +void serialize_require( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L) +{ + STACK_GROW( L, 1); + STACK_CHECK( L, 0); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "serializing require()\n" INDENT_END)); + + // Check 'require' is there and not already wrapped; if not, do nothing + // + lua_getglobal( L, "require"); + if( lua_isfunction( L, -1) && lua_tocfunction( L, -1) != luaG_new_require) + { + // [-1]: original 'require' function + lua_pushcclosure( L, luaG_new_require, 1 /*upvalues*/); + lua_setglobal( L, "require"); + } + else + { + // [-1]: nil + lua_pop( L, 1); + } + + STACK_END( L, 0); +} + +// ################################################################################################ + +/*---=== luaG_newstate ===---*/ + +static int require_lanes_core( lua_State* L) +{ + // leaves a copy of 'lanes.core' module table on the stack + luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); + return 1; +} + + +static const luaL_Reg libs[] = +{ + { LUA_LOADLIBNAME, luaopen_package}, + { LUA_TABLIBNAME, luaopen_table}, + { LUA_STRLIBNAME, luaopen_string}, + { LUA_MATHLIBNAME, luaopen_math}, +#ifndef PLATFORM_XBOX // no os/io libs on xbox + { LUA_OSLIBNAME, luaopen_os}, + { LUA_IOLIBNAME, luaopen_io}, +#endif // PLATFORM_XBOX +#if LUA_VERSION_NUM >= 503 + { LUA_UTF8LIBNAME, luaopen_utf8}, +#endif +#if LUA_VERSION_NUM >= 502 +#ifdef luaopen_bit32 + { LUA_BITLIBNAME, luaopen_bit32}, +#endif + { LUA_COLIBNAME, luaopen_coroutine}, // Lua 5.2: coroutine is no longer a part of base! +#else // LUA_VERSION_NUM + { LUA_COLIBNAME, NULL}, // Lua 5.1: part of base package +#endif // LUA_VERSION_NUM + { LUA_DBLIBNAME, luaopen_debug}, +#if LUAJIT_FLAVOR() != 0 // building against LuaJIT headers, add some LuaJIT-specific libs +//#pragma message( "supporting JIT base libs") + { LUA_BITLIBNAME, luaopen_bit}, + { LUA_JITLIBNAME, luaopen_jit}, + { LUA_FFILIBNAME, luaopen_ffi}, +#endif // LUAJIT_FLAVOR() + +{ LUA_DBLIBNAME, luaopen_debug}, +{ "lanes.core", require_lanes_core}, // So that we can open it like any base library (possible since we have access to the init function) + // +{ "base", NULL}, // ignore "base" (already acquired it) +{ NULL, NULL } +}; + +static void open1lib( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, char const* name_, size_t len_) +{ + int i; + for( i = 0; libs[i].name; ++ i) + { + if( strncmp( name_, libs[i].name, len_) == 0) + { + lua_CFunction libfunc = libs[i].func; + name_ = libs[i].name; // note that the provided name_ doesn't necessarily ends with '\0', hence len_ + if( libfunc != NULL) + { + bool_t const isLanesCore = (libfunc == require_lanes_core) ? TRUE : FALSE; // don't want to create a global for "lanes.core" + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening %.*s library\n" INDENT_END, (int) len_, name_)); + STACK_CHECK( L, 0); + // open the library as if through require(), and create a global as well if necessary (the library table is left on the stack) + luaL_requiref( L, name_, libfunc, !isLanesCore); + // lanes.core doesn't declare a global, so scan it here and now + if( isLanesCore == TRUE) + { + populate_func_lookup_table( L, -1, name_); + } + lua_pop( L, 1); + STACK_END( L, 0); + } + break; + } + } +} + + +// just like lua_xmove, args are (from, to) +static void copy_one_time_settings( Universe* U, lua_State* L, lua_State* L2) +{ + STACK_GROW( L, 2); + STACK_CHECK( L, 0); + STACK_CHECK( L2, 0); + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "copy_one_time_settings()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + + REGISTRY_GET( L, CONFIG_REGKEY); // config + // copy settings from from source to destination registry + if( luaG_inter_move( U, L, L2, 1, eLM_LaneBody) < 0) // // config + { + (void) luaL_error( L, "failed to copy settings when loading lanes.core"); + } + // set L2:_R[CONFIG_REGKEY] = settings + REGISTRY_SET( L2, CONFIG_REGKEY, lua_insert( L2, -2)); // + STACK_END( L2, 0); + STACK_END( L, 0); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); +} + +void initialize_on_state_create( Universe* U, lua_State* L) +{ + STACK_CHECK( L, 0); + lua_getfield( L, -1, "on_state_create"); // settings on_state_create|nil + if( !lua_isnil( L, -1)) + { + // store C function pointer in an internal variable + U->on_state_create_func = lua_tocfunction( L, -1); // settings on_state_create + if( U->on_state_create_func != NULL) + { + // make sure the function doesn't have upvalues + char const* upname = lua_getupvalue( L, -1, 1); // settings on_state_create upval? + if( upname != NULL) // should be "" for C functions with upvalues if any + { + (void) luaL_error( L, "on_state_create shouldn't have upvalues"); + } + // remove this C function from the config table so that it doesn't cause problems + // when we transfer the config table in newly created Lua states + lua_pushnil( L); // settings on_state_create nil + lua_setfield( L, -3, "on_state_create"); // settings on_state_create + } + else + { + // optim: store marker saying we have such a function in the config table + U->on_state_create_func = (lua_CFunction) initialize_on_state_create; + } + } + lua_pop( L, 1); // settings + STACK_END( L, 0); +} + +lua_State* create_state( Universe* U, lua_State* from_) +{ + lua_State* L; +#if LUAJIT_FLAVOR() == 64 + // for some reason, LuaJIT 64 bits does not support creating a state with lua_newstate... + L = luaL_newstate(); +#else // LUAJIT_FLAVOR() == 64 + if( U->provide_allocator != NULL) // we have a function we can call to obtain an allocator + { + lua_pushcclosure( from_, U->provide_allocator, 0); + lua_call( from_, 0, 1); + { + AllocatorDefinition* const def = (AllocatorDefinition*) lua_touserdata( from_, -1); + L = lua_newstate( def->allocF, def->allocUD); + } + lua_pop( from_, 1); + } + else + { + // reuse the allocator provided when the master state was created + L = lua_newstate( U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD); + } +#endif // LUAJIT_FLAVOR() == 64 + + if( L == NULL) + { + (void) luaL_error( from_, "luaG_newstate() failed while creating state; out of memory"); + } + return L; +} + +void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMode mode_) +{ + if( U->on_state_create_func != NULL) + { + STACK_CHECK( L, 0); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "calling on_state_create()\n" INDENT_END)); + if( U->on_state_create_func != (lua_CFunction) initialize_on_state_create) + { + // C function: recreate a closure in the new state, bypassing the lookup scheme + lua_pushcfunction( L, U->on_state_create_func); // on_state_create() + } + else // Lua function located in the config table, copied when we opened "lanes.core" + { + if( mode_ != eLM_LaneBody) + { + // if attempting to call in a keeper state, do nothing because the function doesn't exist there + // this doesn't count as an error though + return; + } + REGISTRY_GET( L, CONFIG_REGKEY); // {} + STACK_MID( L, 1); + lua_getfield( L, -1, "on_state_create"); // {} on_state_create() + lua_remove( L, -2); // on_state_create() + } + STACK_MID( L, 1); + // capture error and raise it in caller state + if( lua_pcall( L, 0, 0, 0) != LUA_OK) + { + luaL_error( from_, "on_state_create failed: \"%s\"", lua_isstring( L, -1) ? lua_tostring( L, -1) : lua_typename( L, lua_type( L, -1))); + } + STACK_END( L, 0); + } +} + +/* +* Like 'luaL_openlibs()' but allows the set of libraries be selected +* +* NULL no libraries, not even base +* "" base library only +* "io,string" named libraries +* "*" all libraries +* +* Base ("unpack", "print" etc.) is always added, unless 'libs' is NULL. +* +* *NOT* called for keeper states! +* +*/ +lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_) +{ + lua_State* L = create_state( U, from_); + + STACK_GROW( L, 2); + STACK_CHECK_ABS( L, 0); + + // copy the universe as a light userdata (only the master state holds the full userdata) + // that way, if Lanes is required in this new state, we'll know we are part of this universe + universe_store( L, U); + STACK_MID( L, 0); + + // we'll need this every time we transfer some C function from/to this state + REGISTRY_SET( L, LOOKUP_REGKEY, lua_newtable( L)); + STACK_MID( L, 0); + + // neither libs (not even 'base') nor special init func: we are done + if( libs_ == NULL && U->on_state_create_func == NULL) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate(NULL)\n" INDENT_END)); + return L; + } + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + + // copy settings (for example because it may contain a Lua on_state_create function) + copy_one_time_settings( U, from_, L); + + // 'lua.c' stops GC during initialization so perhaps its a good idea. :) + lua_gc( L, LUA_GCSTOP, 0); + + + // Anything causes 'base' to be taken in + // + if( libs_ != NULL) + { + // special "*" case (mainly to help with LuaJIT compatibility) + // as we are called from luaopen_lanes_core() already, and that would deadlock + if( libs_[0] == '*' && libs_[1] == 0) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening ALL standard libraries\n" INDENT_END)); + luaL_openlibs( L); + // don't forget lanes.core for regular lane states + open1lib( DEBUGSPEW_PARAM_COMMA( U) L, "lanes.core", 10); + libs_ = NULL; // done with libs + } + else + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening base library\n" INDENT_END)); +#if LUA_VERSION_NUM >= 502 + // open base library the same way as in luaL_openlibs() + luaL_requiref( L, "_G", luaopen_base, 1); + lua_pop( L, 1); +#else // LUA_VERSION_NUM + lua_pushcfunction( L, luaopen_base); + lua_pushstring( L, ""); + lua_call( L, 1, 0); +#endif // LUA_VERSION_NUM + } + } + STACK_END( L, 0); + + // scan all libraries, open them one by one + if( libs_) + { + char const* p; + unsigned int len = 0; + for( p = libs_; *p; p += len) + { + // skip delimiters ('.' can be part of name for "lanes.core") + while( *p && !isalnum( *p) && *p != '.') + ++ p; + // skip name + len = 0; + while( isalnum( p[len]) || p[len] == '.') + ++ len; + // open library + open1lib( DEBUGSPEW_PARAM_COMMA( U) L, p, len); + } + } + lua_gc( L, LUA_GCRESTART, 0); + + serialize_require( DEBUGSPEW_PARAM_COMMA( U) L); + + // call this after the base libraries are loaded and GC is restarted + // will raise an error in from_ in case of problem + call_on_state_create( U, L, from_, eLM_LaneBody); + + STACK_CHECK( L, 0); + // after all this, register everything we find in our name<->function database + lua_pushglobaltable( L); // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack + populate_func_lookup_table( L, -1, NULL); + +#if 0 && USE_DEBUG_SPEW() + // dump the lookup database contents + lua_getfield( L, LUA_REGISTRYINDEX, LOOKUP_REGKEY); // {} + lua_pushnil( L); // {} nil + while( lua_next( L, -2)) // {} k v + { + lua_getglobal( L, "print"); // {} k v print + lua_pushlstring( L, debugspew_indent, U->debugspew_indent_depth); // {} k v print " " + lua_pushvalue( L, -4); // {} k v print " " k + lua_pushvalue( L, -4); // {} k v print " " k v + lua_call( L, 3, 0); // {} k v + lua_pop( L, 1); // {} k + } + lua_pop( L, 1); // {} +#endif // USE_DEBUG_SPEW() + + lua_pop( L, 1); + STACK_END( L, 0); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + return L; +} diff --git a/src/threading.c b/src/threading.c deleted file mode 100644 index 2464d03..0000000 --- a/src/threading.c +++ /dev/null @@ -1,1041 +0,0 @@ -/* - * THREADING.C Copyright (c) 2007-08, Asko Kauppi - * Copyright (C) 2009-19, Benoit Germain - * - * Lua Lanes OS threading specific code. - * - * References: - * -*/ - -/* -=============================================================================== - -Copyright (C) 2007-10 Asko Kauppi -Copyright (C) 2009-14, Benoit Germain - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -=============================================================================== -*/ -#if defined(__linux__) - -# ifndef _GNU_SOURCE // definition by the makefile can cause a redefinition error -# define _GNU_SOURCE // must be defined before any include -# endif // _GNU_SOURCE - -# ifdef __ANDROID__ -# include -# define LOG_TAG "LuaLanes" -# endif // __ANDROID__ - -#endif // __linux__ - -#include -#include -#include -#include -#include - -#include "threading.h" - -#if !defined( PLATFORM_XBOX) && !defined( PLATFORM_WIN32) && !defined( PLATFORM_POCKETPC) -# include -#endif // non-WIN32 timing - - -#if defined(PLATFORM_LINUX) || defined(PLATFORM_CYGWIN) -# include -# include -#endif - -/* Linux needs to check, whether it's been run as root -*/ -#ifdef PLATFORM_LINUX - volatile bool_t sudo; -#endif - -#ifdef PLATFORM_OSX -# include "threading_osx.h" -#endif - -/* Linux with older glibc (such as Debian) don't have pthread_setname_np, but have prctl -*/ -#if defined PLATFORM_LINUX -#if defined __GNU_LIBRARY__ && __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 12 -#define LINUX_USE_PTHREAD_SETNAME_NP 1 -#else // glibc without pthread_setname_np -#include -#define LINUX_USE_PTHREAD_SETNAME_NP 0 -#endif // glibc without pthread_setname_np -#endif // PLATFORM_LINUX - -#ifdef _MSC_VER -// ".. selected for automatic inline expansion" (/O2 option) -# pragma warning( disable : 4711 ) -// ".. type cast from function pointer ... to data pointer" -# pragma warning( disable : 4054 ) -#endif - -//#define THREAD_CREATE_RETRIES_MAX 20 - // loops (maybe retry forever?) - -/* -* FAIL is for unexpected API return values - essentially programming -* error in _this_ code. -*/ -#if defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) -static void FAIL( char const* funcname, int rc) -{ -#if defined( PLATFORM_XBOX) - fprintf( stderr, "%s() failed! (%d)\n", funcname, rc ); -#else // PLATFORM_XBOX - char buf[256]; - FormatMessageA( FORMAT_MESSAGE_FROM_SYSTEM, NULL, rc, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, 256, NULL); - fprintf( stderr, "%s() failed! [GetLastError() -> %d] '%s'", funcname, rc, buf); -#endif // PLATFORM_XBOX -#ifdef _MSC_VER - __debugbreak(); // give a chance to the debugger! -#endif // _MSC_VER - abort(); -} -#endif // win32 build - - -/* -* Returns millisecond timing (in seconds) for the current time. -* -* Note: This function should be called once in single-threaded mode in Win32, -* to get it initialized. -*/ -time_d now_secs(void) { - -#if defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) - /* - * Windows FILETIME values are "100-nanosecond intervals since - * January 1, 1601 (UTC)" (MSDN). Well, we'd want Unix Epoch as - * the offset and it seems, so would they: - * - * - */ - SYSTEMTIME st; - FILETIME ft; - ULARGE_INTEGER uli; - static ULARGE_INTEGER uli_epoch; // Jan 1st 1970 0:0:0 - - if (uli_epoch.HighPart==0) { - st.wYear= 1970; - st.wMonth= 1; // Jan - st.wDay= 1; - st.wHour= st.wMinute= st.wSecond= st.wMilliseconds= 0; - - if (!SystemTimeToFileTime( &st, &ft )) - FAIL( "SystemTimeToFileTime", GetLastError() ); - - uli_epoch.LowPart= ft.dwLowDateTime; - uli_epoch.HighPart= ft.dwHighDateTime; - } - - GetSystemTime( &st ); // current system date/time in UTC - if (!SystemTimeToFileTime( &st, &ft )) - FAIL( "SystemTimeToFileTime", GetLastError() ); - - uli.LowPart= ft.dwLowDateTime; - uli.HighPart= ft.dwHighDateTime; - - /* 'double' has less accuracy than 64-bit int, but if it were to degrade, - * it would do so gracefully. In practice, the integer accuracy is not - * of the 100ns class but just 1ms (Windows XP). - */ -# if 1 - // >= 2.0.3 code - return (double) ((uli.QuadPart - uli_epoch.QuadPart)/10000) / 1000.0; -# elif 0 - // fix from Kriss Daniels, see: - // - // - // "seem to be getting negative numbers from the old version, probably number - // conversion clipping, this fixes it and maintains ms resolution" - // - // This was a bad fix, and caused timer test 5 sec timers to disappear. - // --AKa 25-Jan-2009 - // - return ((double)((signed)((uli.QuadPart/10000) - (uli_epoch.QuadPart/10000)))) / 1000.0; -# else - // <= 2.0.2 code - return (double)(uli.QuadPart - uli_epoch.QuadPart) / 10000000.0; -# endif -#else // !(defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)) - struct timeval tv; - // { - // time_t tv_sec; /* seconds since Jan. 1, 1970 */ - // suseconds_t tv_usec; /* and microseconds */ - // }; - - int rc= gettimeofday( &tv, NULL /*time zone not used any more (in Linux)*/ ); - assert( rc==0 ); - - return ((double)tv.tv_sec) + ((tv.tv_usec)/1000) / 1000.0; -#endif // !(defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)) -} - - -/* -*/ -time_d SIGNAL_TIMEOUT_PREPARE( double secs ) { - if (secs<=0.0) return secs; - else return now_secs() + secs; -} - - -#if THREADAPI == THREADAPI_PTHREAD -/* -* Prepare 'abs_secs' kind of timeout to 'timespec' format -*/ -static void prepare_timeout( struct timespec *ts, time_d abs_secs ) { - assert(ts); - assert( abs_secs >= 0.0 ); - - if (abs_secs==0.0) - abs_secs= now_secs(); - - ts->tv_sec= (time_t) floor( abs_secs ); - ts->tv_nsec= ((long)((abs_secs - ts->tv_sec) * 1000.0 +0.5)) * 1000000UL; // 1ms = 1000000ns - if (ts->tv_nsec == 1000000000UL) - { - ts->tv_nsec = 0; - ts->tv_sec = ts->tv_sec + 1; - } -} -#endif // THREADAPI == THREADAPI_PTHREAD - - -/*---=== Threading ===---*/ - -//--- -// It may be meaningful to explicitly limit the new threads' C stack size. -// We should know how much Lua needs in the C stack, all Lua side allocations -// are done in heap so they don't count. -// -// Consequence of _not_ limiting the stack is running out of virtual memory -// with 1000-5000 threads on 32-bit systems. -// -// Note: using external C modules may be affected by the stack size check. -// if having problems, set back to '0' (default stack size of the system). -// -// Win32: 64K (?) -// Win64: xxx -// -// Linux x86: 2MB Ubuntu 7.04 via 'pthread_getstacksize()' -// Linux x64: xxx -// Linux ARM: xxx -// -// OS X 10.4.9: 512K -// valid values N * 4KB -// -#ifndef _THREAD_STACK_SIZE -# if defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) || defined( PLATFORM_CYGWIN) -# define _THREAD_STACK_SIZE 0 - // Win32: does it work with less? -# elif (defined PLATFORM_OSX) -# define _THREAD_STACK_SIZE (524288/2) // 262144 - // OS X: "make test" works on 65536 and even below - // "make perftest" works on >= 4*65536 == 262144 (not 3*65536) -# elif (defined PLATFORM_LINUX) && (defined __i386) -# define _THREAD_STACK_SIZE (2097152/16) // 131072 - // Linux x86 (Ubuntu 7.04): "make perftest" works on /16 (not on /32) -# elif (defined PLATFORM_BSD) && (defined __i386) -# define _THREAD_STACK_SIZE (1048576/8) // 131072 - // FreeBSD 6.2 SMP i386: ("gmake perftest" works on /8 (not on /16) -# endif -#endif - -#if THREADAPI == THREADAPI_WINDOWS - -#if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available - // - void MUTEX_INIT( MUTEX_T *ref ) { - *ref= CreateMutex( NULL /*security attr*/, FALSE /*not locked*/, NULL ); - if (!ref) FAIL( "CreateMutex", GetLastError() ); - } - void MUTEX_FREE( MUTEX_T *ref ) { - if (!CloseHandle(*ref)) FAIL( "CloseHandle (mutex)", GetLastError() ); - *ref= NULL; - } - void MUTEX_LOCK( MUTEX_T *ref ) - { - DWORD rc = WaitForSingleObject( *ref, INFINITE); - // ERROR_WAIT_NO_CHILDREN means a thread was killed (lane terminated because of error raised during a linda transfer for example) while having grabbed this mutex - // this is not a big problem as we will grab it just the same, so ignore this particular error - if( rc != 0 && rc != ERROR_WAIT_NO_CHILDREN) - FAIL( "WaitForSingleObject", (rc == WAIT_FAILED) ? GetLastError() : rc); - } - void MUTEX_UNLOCK( MUTEX_T *ref ) { - if (!ReleaseMutex(*ref)) - FAIL( "ReleaseMutex", GetLastError() ); - } -#endif // CONDITION_VARIABLE aren't available - -static int const gs_prio_remap[] = -{ - THREAD_PRIORITY_IDLE, - THREAD_PRIORITY_LOWEST, - THREAD_PRIORITY_BELOW_NORMAL, - THREAD_PRIORITY_NORMAL, - THREAD_PRIORITY_ABOVE_NORMAL, - THREAD_PRIORITY_HIGHEST, - THREAD_PRIORITY_TIME_CRITICAL -}; - -/* MSDN: "If you would like to use the CRT in ThreadProc, use the -_beginthreadex function instead (of CreateThread)." -MSDN: "you can create at most 2028 threads" -*/ -// Note: Visual C++ requires '__stdcall' where it is -void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (__stdcall *func)( void*), void* data, int prio /* -3..+3 */) -{ - HANDLE h = (HANDLE) _beginthreadex( NULL, // security - _THREAD_STACK_SIZE, - func, - data, - 0, // flags (0/CREATE_SUSPENDED) - NULL // thread id (not used) - ); - - if( h == NULL) // _beginthreadex returns 0L on failure instead of -1L (like _beginthread) - { - FAIL( "CreateThread", GetLastError()); - } - - if (prio != THREAD_PRIO_DEFAULT) - { - if (!SetThreadPriority( h, gs_prio_remap[prio + 3])) - { - FAIL( "SetThreadPriority", GetLastError()); - } - } - - *ref = h; -} - - -void THREAD_SET_PRIORITY( int prio) -{ - // prio range [-3,+3] was checked by the caller - if (!SetThreadPriority( GetCurrentThread(), gs_prio_remap[prio + 3])) - { - FAIL( "THREAD_SET_PRIORITY", GetLastError()); - } -} - -void THREAD_SET_AFFINITY( unsigned int aff) -{ - if( !SetThreadAffinityMask( GetCurrentThread(), aff)) - { - FAIL( "THREAD_SET_AFFINITY", GetLastError()); - } -} - -bool_t THREAD_WAIT_IMPL( THREAD_T *ref, double secs) -{ - DWORD ms = (secs<0.0) ? INFINITE : (DWORD)((secs*1000.0)+0.5); - - DWORD rc= WaitForSingleObject( *ref, ms /*timeout*/ ); - // - // (WAIT_ABANDONED) - // WAIT_OBJECT_0 success (0) - // WAIT_TIMEOUT - // WAIT_FAILED more info via GetLastError() - - if (rc == WAIT_TIMEOUT) return FALSE; - if( rc !=0) FAIL( "WaitForSingleObject", rc==WAIT_FAILED ? GetLastError() : rc); - *ref= NULL; // thread no longer usable - return TRUE; - } - // - void THREAD_KILL( THREAD_T *ref ) - { - // nonexistent on Xbox360, simply disable until a better solution is found - #if !defined( PLATFORM_XBOX) - // in theory no-one should call this as it is very dangerous (memory and mutex leaks, no notification of DLLs, etc.) - if (!TerminateThread( *ref, 0 )) FAIL("TerminateThread", GetLastError()); - #endif // PLATFORM_XBOX - *ref= NULL; - } - - void THREAD_MAKE_ASYNCH_CANCELLABLE() {} // nothing to do for windows threads, we can cancel them anytime we want - -#if !defined __GNUC__ - //see http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx - #define MS_VC_EXCEPTION 0x406D1388 - #pragma pack(push,8) - typedef struct tagTHREADNAME_INFO - { - DWORD dwType; // Must be 0x1000. - LPCSTR szName; // Pointer to name (in user addr space). - DWORD dwThreadID; // Thread ID (-1=caller thread). - DWORD dwFlags; // Reserved for future use, must be zero. - } THREADNAME_INFO; - #pragma pack(pop) -#endif // !__GNUC__ - - void THREAD_SETNAME( char const* _name) - { -#if !defined __GNUC__ - THREADNAME_INFO info; - info.dwType = 0x1000; - info.szName = _name; - info.dwThreadID = GetCurrentThreadId(); - info.dwFlags = 0; - - __try - { - RaiseException( MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info ); - } - __except(EXCEPTION_EXECUTE_HANDLER) - { - } -#endif // !__GNUC__ - } - -#if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available - - void SIGNAL_INIT( SIGNAL_T* ref) - { - InitializeCriticalSection( &ref->signalCS); - InitializeCriticalSection( &ref->countCS); - if( 0 == (ref->waitEvent = CreateEvent( 0, TRUE, FALSE, 0))) // manual-reset - FAIL( "CreateEvent", GetLastError()); - if( 0 == (ref->waitDoneEvent = CreateEvent( 0, FALSE, FALSE, 0))) // auto-reset - FAIL( "CreateEvent", GetLastError()); - ref->waitersCount = 0; - } - - void SIGNAL_FREE( SIGNAL_T* ref) - { - CloseHandle( ref->waitDoneEvent); - CloseHandle( ref->waitEvent); - DeleteCriticalSection( &ref->countCS); - DeleteCriticalSection( &ref->signalCS); - } - - bool_t SIGNAL_WAIT( SIGNAL_T* ref, MUTEX_T* mu_ref, time_d abs_secs) - { - DWORD errc; - DWORD ms; - - if( abs_secs < 0.0) - ms = INFINITE; - else if( abs_secs == 0.0) - ms = 0; - else - { - time_d msd = (abs_secs - now_secs()) * 1000.0 + 0.5; - // If the time already passed, still try once (ms==0). A short timeout - // may have turned negative or 0 because of the two time samples done. - ms = msd <= 0.0 ? 0 : (DWORD)msd; - } - - EnterCriticalSection( &ref->signalCS); - EnterCriticalSection( &ref->countCS); - ++ ref->waitersCount; - LeaveCriticalSection( &ref->countCS); - LeaveCriticalSection( &ref->signalCS); - - errc = SignalObjectAndWait( *mu_ref, ref->waitEvent, ms, FALSE); - - EnterCriticalSection( &ref->countCS); - if( 0 == -- ref->waitersCount) - { - // we're the last one leaving... - ResetEvent( ref->waitEvent); - SetEvent( ref->waitDoneEvent); - } - LeaveCriticalSection( &ref->countCS); - MUTEX_LOCK( mu_ref); - - switch( errc) - { - case WAIT_TIMEOUT: - return FALSE; - case WAIT_OBJECT_0: - return TRUE; - } - - FAIL( "SignalObjectAndWait", GetLastError()); - return FALSE; - } - - void SIGNAL_ALL( SIGNAL_T* ref) - { - DWORD errc = WAIT_OBJECT_0; - - EnterCriticalSection( &ref->signalCS); - EnterCriticalSection( &ref->countCS); - - if( ref->waitersCount > 0) - { - ResetEvent( ref->waitDoneEvent); - SetEvent( ref->waitEvent); - LeaveCriticalSection( &ref->countCS); - errc = WaitForSingleObject( ref->waitDoneEvent, INFINITE); - } - else - { - LeaveCriticalSection( &ref->countCS); - } - - LeaveCriticalSection( &ref->signalCS); - - if( WAIT_OBJECT_0 != errc) - FAIL( "WaitForSingleObject", GetLastError()); - } - -#else // CONDITION_VARIABLE are available, use them - - // - void SIGNAL_INIT( SIGNAL_T *ref ) - { - InitializeConditionVariable( ref); - } - - void SIGNAL_FREE( SIGNAL_T *ref ) - { - // nothing to do - (void)ref; - } - - bool_t SIGNAL_WAIT( SIGNAL_T *ref, MUTEX_T *mu_ref, time_d abs_secs) - { - long ms; - - if( abs_secs < 0.0) - ms = INFINITE; - else if( abs_secs == 0.0) - ms = 0; - else - { - ms = (long) ((abs_secs - now_secs())*1000.0 + 0.5); - - // If the time already passed, still try once (ms==0). A short timeout - // may have turned negative or 0 because of the two time samples done. - // - if( ms < 0) - ms = 0; - } - - if( !SleepConditionVariableCS( ref, mu_ref, ms)) - { - if( GetLastError() == ERROR_TIMEOUT) - { - return FALSE; - } - else - { - FAIL( "SleepConditionVariableCS", GetLastError()); - } - } - return TRUE; - } - - void SIGNAL_ONE( SIGNAL_T *ref ) - { - WakeConditionVariable( ref); - } - - void SIGNAL_ALL( SIGNAL_T *ref ) - { - WakeAllConditionVariable( ref); - } - -#endif // CONDITION_VARIABLE are available - -#else // THREADAPI == THREADAPI_PTHREAD - // PThread (Linux, OS X, ...) - // - // On OS X, user processes seem to be able to change priorities. - // On Linux, SCHED_RR and su privileges are required.. !-( - // - #include - #include - -# if (defined(__MINGW32__) || defined(__MINGW64__)) && defined pthread_attr_setschedpolicy -# if pthread_attr_setschedpolicy( A, S) == ENOTSUP - // from the mingw-w64 team: - // Well, we support pthread_setschedparam by which you can specify - // threading-policy. Nevertheless, yes we lack this function. In - // general its implementation is pretty much trivial, as on Win32 target - // just SCHED_OTHER can be supported. - #undef pthread_attr_setschedpolicy - static int pthread_attr_setschedpolicy( pthread_attr_t* attr, int policy) - { - if( policy != SCHED_OTHER) - { - return ENOTSUP; - } - return 0; - } -# endif // pthread_attr_setschedpolicy() -# endif // defined(__MINGW32__) || defined(__MINGW64__) - - static void _PT_FAIL( int rc, const char *name, const char *file, uint_t line ) { - const char *why= (rc==EINVAL) ? "EINVAL" : - (rc==EBUSY) ? "EBUSY" : - (rc==EPERM) ? "EPERM" : - (rc==ENOMEM) ? "ENOMEM" : - (rc==ESRCH) ? "ESRCH" : - (rc==ENOTSUP) ? "ENOTSUP": - //... - ""; - fprintf( stderr, "%s %d: %s failed, %d %s\n", file, line, name, rc, why ); - abort(); - } - #define PT_CALL( call ) { int rc= call; if (rc!=0) _PT_FAIL( rc, #call, __FILE__, __LINE__ ); } - // - void SIGNAL_INIT( SIGNAL_T *ref ) { - PT_CALL( pthread_cond_init(ref,NULL /*attr*/) ); - } - void SIGNAL_FREE( SIGNAL_T *ref ) { - PT_CALL( pthread_cond_destroy(ref) ); - } - // - /* - * Timeout is given as absolute since we may have fake wakeups during - * a timed out sleep. A Linda with some other key read, or just because - * PThread cond vars can wake up unwantedly. - */ - bool_t SIGNAL_WAIT( SIGNAL_T *ref, pthread_mutex_t *mu, time_d abs_secs ) { - if (abs_secs<0.0) { - PT_CALL( pthread_cond_wait( ref, mu ) ); // infinite - } else { - int rc; - struct timespec ts; - - assert( abs_secs != 0.0 ); - prepare_timeout( &ts, abs_secs ); - - rc= pthread_cond_timedwait( ref, mu, &ts ); - - if (rc==ETIMEDOUT) return FALSE; - if (rc) { _PT_FAIL( rc, "pthread_cond_timedwait()", __FILE__, __LINE__ ); } - } - return TRUE; - } - // - void SIGNAL_ONE( SIGNAL_T *ref ) { - PT_CALL( pthread_cond_signal(ref) ); // wake up ONE (or no) waiting thread - } - // - void SIGNAL_ALL( SIGNAL_T *ref ) { - PT_CALL( pthread_cond_broadcast(ref) ); // wake up ALL waiting threads - } - -// array of 7 thread priority values, hand-tuned by platform so that we offer a uniform [-3,+3] public priority range -static int const gs_prio_remap[] = -{ - // NB: PThreads priority handling is about as twisty as one can get it - // (and then some). DON*T TRUST ANYTHING YOU READ ON THE NET!!! - - //--- - // "Select the scheduling policy for the thread: one of SCHED_OTHER - // (regular, non-real-time scheduling), SCHED_RR (real-time, - // round-robin) or SCHED_FIFO (real-time, first-in first-out)." - // - // "Using the RR policy ensures that all threads having the same - // priority level will be scheduled equally, regardless of their activity." - // - // "For SCHED_FIFO and SCHED_RR, the only required member of the - // sched_param structure is the priority sched_priority. For SCHED_OTHER, - // the affected scheduling parameters are implementation-defined." - // - // "The priority of a thread is specified as a delta which is added to - // the priority of the process." - // - // ".. priority is an integer value, in the range from 1 to 127. - // 1 is the least-favored priority, 127 is the most-favored." - // - // "Priority level 0 cannot be used: it is reserved for the system." - // - // "When you use specify a priority of -99 in a call to - // pthread_setschedparam(), the priority of the target thread is - // lowered to the lowest possible value." - // - // ... - - // ** CONCLUSION ** - // - // PThread priorities are _hugely_ system specific, and we need at - // least OS specific settings. Hopefully, Linuxes and OS X versions - // are uniform enough, among each other... - // -# if defined PLATFORM_OSX - // AK 10-Apr-07 (OS X PowerPC 10.4.9): - // - // With SCHED_RR, 26 seems to be the "normal" priority, where setting - // it does not seem to affect the order of threads processed. - // - // With SCHED_OTHER, the range 25..32 is normal (maybe the same 26, - // but the difference is not so clear with OTHER). - // - // 'sched_get_priority_min()' and '..max()' give 15, 47 as the - // priority limits. This could imply, user mode applications won't - // be able to use values outside of that range. - // -# define _PRIO_MODE SCHED_OTHER - - // OS X 10.4.9 (PowerPC) gives ENOTSUP for process scope - //#define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS - -# define _PRIO_HI 32 // seems to work (_carefully_ picked!) -# define _PRIO_0 26 // detected -# define _PRIO_LO 1 // seems to work (tested) - -# elif defined PLATFORM_LINUX - // (based on Ubuntu Linux 2.6.15 kernel) - // - // SCHED_OTHER is the default policy, but does not allow for priorities. - // SCHED_RR allows priorities, all of which (1..99) are higher than - // a thread with SCHED_OTHER policy. - // - // - // - // - // - // Manuals suggest checking #ifdef _POSIX_THREAD_PRIORITY_SCHEDULING, - // but even Ubuntu does not seem to define it. - // -# define _PRIO_MODE SCHED_RR - - // NTLP 2.5: only system scope allowed (being the basic reason why - // root privileges are required..) - //#define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS - -# define _PRIO_HI 99 -# define _PRIO_0 50 -# define _PRIO_LO 1 - -# elif defined(PLATFORM_BSD) - // - // - // - // "When control over the thread scheduling is desired, then FreeBSD - // with the libpthread implementation is by far the best choice .." - // -# define _PRIO_MODE SCHED_OTHER -# define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS -# define _PRIO_HI 31 -# define _PRIO_0 15 -# define _PRIO_LO 1 - -# elif defined(PLATFORM_CYGWIN) - // - // TBD: Find right values for Cygwin - // -# elif defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) - // any other value not supported by win32-pthread as of version 2.9.1 -# define _PRIO_MODE SCHED_OTHER - - // PTHREAD_SCOPE_PROCESS not supported by win32-pthread as of version 2.9.1 - //#define _PRIO_SCOPE PTHREAD_SCOPE_SYSTEM // but do we need this at all to start with? - THREAD_PRIORITY_IDLE, THREAD_PRIORITY_LOWEST, THREAD_PRIORITY_BELOW_NORMAL, THREAD_PRIORITY_NORMAL, THREAD_PRIORITY_ABOVE_NORMAL, THREAD_PRIORITY_HIGHEST, THREAD_PRIORITY_TIME_CRITICAL - -# else -# error "Unknown OS: not implemented!" -# endif - -#if defined _PRIO_0 -# define _PRIO_AN (_PRIO_0 + ((_PRIO_HI-_PRIO_0)/2)) -# define _PRIO_BN (_PRIO_LO + ((_PRIO_0-_PRIO_LO)/2)) - - _PRIO_LO, _PRIO_LO, _PRIO_BN, _PRIO_0, _PRIO_AN, _PRIO_HI, _PRIO_HI -#endif // _PRIO_0 -}; - -static int select_prio(int prio /* -3..+3 */) -{ - if (prio == THREAD_PRIO_DEFAULT) - prio = 0; - // prio range [-3,+3] was checked by the caller - return gs_prio_remap[prio + 3]; -} - -void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (*func)( void*), void* data, int prio /* -3..+3 */) -{ - pthread_attr_t a; - bool_t const change_priority = -#ifdef PLATFORM_LINUX - sudo && // only root-privileged process can change priorities -#endif - (prio != THREAD_PRIO_DEFAULT); - - PT_CALL( pthread_attr_init( &a)); - -#ifndef PTHREAD_TIMEDJOIN - // We create a NON-JOINABLE thread. This is mainly due to the lack of - // 'pthread_timedjoin()', but does offer other benefits (s.a. earlier - // freeing of the thread's resources). - // - PT_CALL( pthread_attr_setdetachstate( &a, PTHREAD_CREATE_DETACHED)); -#endif // PTHREAD_TIMEDJOIN - - // Use this to find a system's default stack size (DEBUG) -#if 0 - { - size_t n; - pthread_attr_getstacksize( &a, &n); - fprintf( stderr, "Getstack: %u\n", (unsigned int)n); - } - // 524288 on OS X - // 2097152 on Linux x86 (Ubuntu 7.04) - // 1048576 on FreeBSD 6.2 SMP i386 -#endif // 0 - -#if defined _THREAD_STACK_SIZE && _THREAD_STACK_SIZE > 0 - PT_CALL( pthread_attr_setstacksize( &a, _THREAD_STACK_SIZE)); -#endif - - if (change_priority) - { - struct sched_param sp; - // "The specified scheduling parameters are only used if the scheduling - // parameter inheritance attribute is PTHREAD_EXPLICIT_SCHED." - // -#if !defined __ANDROID__ || ( defined __ANDROID__ && __ANDROID_API__ >= 28 ) - PT_CALL( pthread_attr_setinheritsched( &a, PTHREAD_EXPLICIT_SCHED)); -#endif - -#ifdef _PRIO_SCOPE - PT_CALL( pthread_attr_setscope( &a, _PRIO_SCOPE)); -#endif // _PRIO_SCOPE - - PT_CALL( pthread_attr_setschedpolicy( &a, _PRIO_MODE)); - - sp.sched_priority = select_prio(prio); - PT_CALL( pthread_attr_setschedparam( &a, &sp)); - } - - //--- - // Seems on OS X, _POSIX_THREAD_THREADS_MAX is some kind of system - // thread limit (not userland thread). Actual limit for us is way higher. - // PTHREAD_THREADS_MAX is not defined (even though man page refers to it!) - // -# ifndef THREAD_CREATE_RETRIES_MAX - // Don't bother with retries; a failure is a failure - // - { - int rc = pthread_create( ref, &a, func, data); - if( rc) _PT_FAIL( rc, "pthread_create()", __FILE__, __LINE__ - 1); - } -# else -# error "This code deprecated" - /* - // Wait slightly if thread creation has exchausted the system - // - { uint_t retries; - for( retries=0; retries>= 1; - } -#ifdef __ANDROID__ - PT_CALL( sched_setaffinity( pthread_self(), sizeof(cpu_set_t), &cpuset)); -#elif defined(__NetBSD__) - PT_CALL( pthread_setaffinity_np( pthread_self(), cpuset_size(cpuset), cpuset)); - cpuset_destroy( cpuset); -#else - PT_CALL( pthread_setaffinity_np( pthread_self(), sizeof(cpu_set_t), &cpuset)); -#endif -} - - /* - * Wait for a thread to finish. - * - * 'mu_ref' is a lock we should use for the waiting; initially unlocked. - * Same lock as passed to THREAD_EXIT. - * - * Returns TRUE for successful wait, FALSE for timed out - */ -bool_t THREAD_WAIT( THREAD_T *ref, double secs , SIGNAL_T *signal_ref, MUTEX_T *mu_ref, volatile enum e_status *st_ref) -{ - struct timespec ts_store; - const struct timespec *timeout= NULL; - bool_t done; - - // Do timeout counting before the locks - // -#if THREADWAIT_METHOD == THREADWAIT_TIMEOUT - if (secs>=0.0) -#else // THREADWAIT_METHOD == THREADWAIT_CONDVAR - if (secs>0.0) -#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR - { - prepare_timeout( &ts_store, now_secs()+secs ); - timeout= &ts_store; - } - -#if THREADWAIT_METHOD == THREADWAIT_TIMEOUT - /* Thread is joinable - */ - if (!timeout) { - PT_CALL( pthread_join( *ref, NULL /*ignore exit value*/ )); - done= TRUE; - } else { - int rc= PTHREAD_TIMEDJOIN( *ref, NULL, timeout ); - if ((rc!=0) && (rc!=ETIMEDOUT)) { - _PT_FAIL( rc, "PTHREAD_TIMEDJOIN", __FILE__, __LINE__-2 ); - } - done= rc==0; - } -#else // THREADWAIT_METHOD == THREADWAIT_CONDVAR - /* Since we've set the thread up as PTHREAD_CREATE_DETACHED, we cannot - * join with it. Use the cond.var. - */ - (void) ref; // unused - MUTEX_LOCK( mu_ref ); - - // 'secs'==0.0 does not need to wait, just take the current status - // within the 'mu_ref' locks - // - if (secs != 0.0) { - while( *st_ref < DONE ) { - if (!timeout) { - PT_CALL( pthread_cond_wait( signal_ref, mu_ref )); - } else { - int rc= pthread_cond_timedwait( signal_ref, mu_ref, timeout ); - if (rc==ETIMEDOUT) break; - if (rc!=0) _PT_FAIL( rc, "pthread_cond_timedwait", __FILE__, __LINE__-2 ); - } - } - } - done= *st_ref >= DONE; // DONE|ERROR_ST|CANCELLED - - MUTEX_UNLOCK( mu_ref ); -#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR - return done; - } - // - void THREAD_KILL( THREAD_T *ref ) { -#ifdef __ANDROID__ - __android_log_print(ANDROID_LOG_WARN, LOG_TAG, "Cannot kill thread!"); -#else - pthread_cancel( *ref ); -#endif - } - - void THREAD_MAKE_ASYNCH_CANCELLABLE() - { -#ifdef __ANDROID__ - __android_log_print(ANDROID_LOG_WARN, LOG_TAG, "Cannot make thread async cancellable!"); -#else - // that's the default, but just in case... - pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); - // we want cancellation to take effect immediately if possible, instead of waiting for a cancellation point (which is the default) - pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, NULL); -#endif - } - - void THREAD_SETNAME( char const* _name) - { - // exact API to set the thread name is platform-dependant - // if you need to fix the build, or if you know how to fill a hole, tell me (bnt.germain@gmail.com) so that I can submit the fix in github. -#if defined PLATFORM_BSD && !defined __NetBSD__ - pthread_set_name_np( pthread_self(), _name); -#elif defined PLATFORM_BSD && defined __NetBSD__ - pthread_setname_np( pthread_self(), "%s", (void *)_name); -#elif defined PLATFORM_LINUX - #if LINUX_USE_PTHREAD_SETNAME_NP - pthread_setname_np( pthread_self(), _name); - #else // LINUX_USE_PTHREAD_SETNAME_NP - prctl(PR_SET_NAME, _name, 0, 0, 0); - #endif // LINUX_USE_PTHREAD_SETNAME_NP -#elif defined PLATFORM_QNX || defined PLATFORM_CYGWIN - pthread_setname_np( pthread_self(), _name); -#elif defined PLATFORM_OSX - pthread_setname_np(_name); -#elif defined PLATFORM_WIN32 || defined PLATFORM_POCKETPC - PT_CALL( pthread_setname_np( pthread_self(), _name)); -#endif - } -#endif // THREADAPI == THREADAPI_PTHREAD diff --git a/src/threading.cpp b/src/threading.cpp new file mode 100644 index 0000000..2464d03 --- /dev/null +++ b/src/threading.cpp @@ -0,0 +1,1041 @@ +/* + * THREADING.C Copyright (c) 2007-08, Asko Kauppi + * Copyright (C) 2009-19, Benoit Germain + * + * Lua Lanes OS threading specific code. + * + * References: + * +*/ + +/* +=============================================================================== + +Copyright (C) 2007-10 Asko Kauppi +Copyright (C) 2009-14, Benoit Germain + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== +*/ +#if defined(__linux__) + +# ifndef _GNU_SOURCE // definition by the makefile can cause a redefinition error +# define _GNU_SOURCE // must be defined before any include +# endif // _GNU_SOURCE + +# ifdef __ANDROID__ +# include +# define LOG_TAG "LuaLanes" +# endif // __ANDROID__ + +#endif // __linux__ + +#include +#include +#include +#include +#include + +#include "threading.h" + +#if !defined( PLATFORM_XBOX) && !defined( PLATFORM_WIN32) && !defined( PLATFORM_POCKETPC) +# include +#endif // non-WIN32 timing + + +#if defined(PLATFORM_LINUX) || defined(PLATFORM_CYGWIN) +# include +# include +#endif + +/* Linux needs to check, whether it's been run as root +*/ +#ifdef PLATFORM_LINUX + volatile bool_t sudo; +#endif + +#ifdef PLATFORM_OSX +# include "threading_osx.h" +#endif + +/* Linux with older glibc (such as Debian) don't have pthread_setname_np, but have prctl +*/ +#if defined PLATFORM_LINUX +#if defined __GNU_LIBRARY__ && __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 12 +#define LINUX_USE_PTHREAD_SETNAME_NP 1 +#else // glibc without pthread_setname_np +#include +#define LINUX_USE_PTHREAD_SETNAME_NP 0 +#endif // glibc without pthread_setname_np +#endif // PLATFORM_LINUX + +#ifdef _MSC_VER +// ".. selected for automatic inline expansion" (/O2 option) +# pragma warning( disable : 4711 ) +// ".. type cast from function pointer ... to data pointer" +# pragma warning( disable : 4054 ) +#endif + +//#define THREAD_CREATE_RETRIES_MAX 20 + // loops (maybe retry forever?) + +/* +* FAIL is for unexpected API return values - essentially programming +* error in _this_ code. +*/ +#if defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) +static void FAIL( char const* funcname, int rc) +{ +#if defined( PLATFORM_XBOX) + fprintf( stderr, "%s() failed! (%d)\n", funcname, rc ); +#else // PLATFORM_XBOX + char buf[256]; + FormatMessageA( FORMAT_MESSAGE_FROM_SYSTEM, NULL, rc, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, 256, NULL); + fprintf( stderr, "%s() failed! [GetLastError() -> %d] '%s'", funcname, rc, buf); +#endif // PLATFORM_XBOX +#ifdef _MSC_VER + __debugbreak(); // give a chance to the debugger! +#endif // _MSC_VER + abort(); +} +#endif // win32 build + + +/* +* Returns millisecond timing (in seconds) for the current time. +* +* Note: This function should be called once in single-threaded mode in Win32, +* to get it initialized. +*/ +time_d now_secs(void) { + +#if defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) + /* + * Windows FILETIME values are "100-nanosecond intervals since + * January 1, 1601 (UTC)" (MSDN). Well, we'd want Unix Epoch as + * the offset and it seems, so would they: + * + * + */ + SYSTEMTIME st; + FILETIME ft; + ULARGE_INTEGER uli; + static ULARGE_INTEGER uli_epoch; // Jan 1st 1970 0:0:0 + + if (uli_epoch.HighPart==0) { + st.wYear= 1970; + st.wMonth= 1; // Jan + st.wDay= 1; + st.wHour= st.wMinute= st.wSecond= st.wMilliseconds= 0; + + if (!SystemTimeToFileTime( &st, &ft )) + FAIL( "SystemTimeToFileTime", GetLastError() ); + + uli_epoch.LowPart= ft.dwLowDateTime; + uli_epoch.HighPart= ft.dwHighDateTime; + } + + GetSystemTime( &st ); // current system date/time in UTC + if (!SystemTimeToFileTime( &st, &ft )) + FAIL( "SystemTimeToFileTime", GetLastError() ); + + uli.LowPart= ft.dwLowDateTime; + uli.HighPart= ft.dwHighDateTime; + + /* 'double' has less accuracy than 64-bit int, but if it were to degrade, + * it would do so gracefully. In practice, the integer accuracy is not + * of the 100ns class but just 1ms (Windows XP). + */ +# if 1 + // >= 2.0.3 code + return (double) ((uli.QuadPart - uli_epoch.QuadPart)/10000) / 1000.0; +# elif 0 + // fix from Kriss Daniels, see: + // + // + // "seem to be getting negative numbers from the old version, probably number + // conversion clipping, this fixes it and maintains ms resolution" + // + // This was a bad fix, and caused timer test 5 sec timers to disappear. + // --AKa 25-Jan-2009 + // + return ((double)((signed)((uli.QuadPart/10000) - (uli_epoch.QuadPart/10000)))) / 1000.0; +# else + // <= 2.0.2 code + return (double)(uli.QuadPart - uli_epoch.QuadPart) / 10000000.0; +# endif +#else // !(defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)) + struct timeval tv; + // { + // time_t tv_sec; /* seconds since Jan. 1, 1970 */ + // suseconds_t tv_usec; /* and microseconds */ + // }; + + int rc= gettimeofday( &tv, NULL /*time zone not used any more (in Linux)*/ ); + assert( rc==0 ); + + return ((double)tv.tv_sec) + ((tv.tv_usec)/1000) / 1000.0; +#endif // !(defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)) +} + + +/* +*/ +time_d SIGNAL_TIMEOUT_PREPARE( double secs ) { + if (secs<=0.0) return secs; + else return now_secs() + secs; +} + + +#if THREADAPI == THREADAPI_PTHREAD +/* +* Prepare 'abs_secs' kind of timeout to 'timespec' format +*/ +static void prepare_timeout( struct timespec *ts, time_d abs_secs ) { + assert(ts); + assert( abs_secs >= 0.0 ); + + if (abs_secs==0.0) + abs_secs= now_secs(); + + ts->tv_sec= (time_t) floor( abs_secs ); + ts->tv_nsec= ((long)((abs_secs - ts->tv_sec) * 1000.0 +0.5)) * 1000000UL; // 1ms = 1000000ns + if (ts->tv_nsec == 1000000000UL) + { + ts->tv_nsec = 0; + ts->tv_sec = ts->tv_sec + 1; + } +} +#endif // THREADAPI == THREADAPI_PTHREAD + + +/*---=== Threading ===---*/ + +//--- +// It may be meaningful to explicitly limit the new threads' C stack size. +// We should know how much Lua needs in the C stack, all Lua side allocations +// are done in heap so they don't count. +// +// Consequence of _not_ limiting the stack is running out of virtual memory +// with 1000-5000 threads on 32-bit systems. +// +// Note: using external C modules may be affected by the stack size check. +// if having problems, set back to '0' (default stack size of the system). +// +// Win32: 64K (?) +// Win64: xxx +// +// Linux x86: 2MB Ubuntu 7.04 via 'pthread_getstacksize()' +// Linux x64: xxx +// Linux ARM: xxx +// +// OS X 10.4.9: 512K +// valid values N * 4KB +// +#ifndef _THREAD_STACK_SIZE +# if defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) || defined( PLATFORM_CYGWIN) +# define _THREAD_STACK_SIZE 0 + // Win32: does it work with less? +# elif (defined PLATFORM_OSX) +# define _THREAD_STACK_SIZE (524288/2) // 262144 + // OS X: "make test" works on 65536 and even below + // "make perftest" works on >= 4*65536 == 262144 (not 3*65536) +# elif (defined PLATFORM_LINUX) && (defined __i386) +# define _THREAD_STACK_SIZE (2097152/16) // 131072 + // Linux x86 (Ubuntu 7.04): "make perftest" works on /16 (not on /32) +# elif (defined PLATFORM_BSD) && (defined __i386) +# define _THREAD_STACK_SIZE (1048576/8) // 131072 + // FreeBSD 6.2 SMP i386: ("gmake perftest" works on /8 (not on /16) +# endif +#endif + +#if THREADAPI == THREADAPI_WINDOWS + +#if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available + // + void MUTEX_INIT( MUTEX_T *ref ) { + *ref= CreateMutex( NULL /*security attr*/, FALSE /*not locked*/, NULL ); + if (!ref) FAIL( "CreateMutex", GetLastError() ); + } + void MUTEX_FREE( MUTEX_T *ref ) { + if (!CloseHandle(*ref)) FAIL( "CloseHandle (mutex)", GetLastError() ); + *ref= NULL; + } + void MUTEX_LOCK( MUTEX_T *ref ) + { + DWORD rc = WaitForSingleObject( *ref, INFINITE); + // ERROR_WAIT_NO_CHILDREN means a thread was killed (lane terminated because of error raised during a linda transfer for example) while having grabbed this mutex + // this is not a big problem as we will grab it just the same, so ignore this particular error + if( rc != 0 && rc != ERROR_WAIT_NO_CHILDREN) + FAIL( "WaitForSingleObject", (rc == WAIT_FAILED) ? GetLastError() : rc); + } + void MUTEX_UNLOCK( MUTEX_T *ref ) { + if (!ReleaseMutex(*ref)) + FAIL( "ReleaseMutex", GetLastError() ); + } +#endif // CONDITION_VARIABLE aren't available + +static int const gs_prio_remap[] = +{ + THREAD_PRIORITY_IDLE, + THREAD_PRIORITY_LOWEST, + THREAD_PRIORITY_BELOW_NORMAL, + THREAD_PRIORITY_NORMAL, + THREAD_PRIORITY_ABOVE_NORMAL, + THREAD_PRIORITY_HIGHEST, + THREAD_PRIORITY_TIME_CRITICAL +}; + +/* MSDN: "If you would like to use the CRT in ThreadProc, use the +_beginthreadex function instead (of CreateThread)." +MSDN: "you can create at most 2028 threads" +*/ +// Note: Visual C++ requires '__stdcall' where it is +void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (__stdcall *func)( void*), void* data, int prio /* -3..+3 */) +{ + HANDLE h = (HANDLE) _beginthreadex( NULL, // security + _THREAD_STACK_SIZE, + func, + data, + 0, // flags (0/CREATE_SUSPENDED) + NULL // thread id (not used) + ); + + if( h == NULL) // _beginthreadex returns 0L on failure instead of -1L (like _beginthread) + { + FAIL( "CreateThread", GetLastError()); + } + + if (prio != THREAD_PRIO_DEFAULT) + { + if (!SetThreadPriority( h, gs_prio_remap[prio + 3])) + { + FAIL( "SetThreadPriority", GetLastError()); + } + } + + *ref = h; +} + + +void THREAD_SET_PRIORITY( int prio) +{ + // prio range [-3,+3] was checked by the caller + if (!SetThreadPriority( GetCurrentThread(), gs_prio_remap[prio + 3])) + { + FAIL( "THREAD_SET_PRIORITY", GetLastError()); + } +} + +void THREAD_SET_AFFINITY( unsigned int aff) +{ + if( !SetThreadAffinityMask( GetCurrentThread(), aff)) + { + FAIL( "THREAD_SET_AFFINITY", GetLastError()); + } +} + +bool_t THREAD_WAIT_IMPL( THREAD_T *ref, double secs) +{ + DWORD ms = (secs<0.0) ? INFINITE : (DWORD)((secs*1000.0)+0.5); + + DWORD rc= WaitForSingleObject( *ref, ms /*timeout*/ ); + // + // (WAIT_ABANDONED) + // WAIT_OBJECT_0 success (0) + // WAIT_TIMEOUT + // WAIT_FAILED more info via GetLastError() + + if (rc == WAIT_TIMEOUT) return FALSE; + if( rc !=0) FAIL( "WaitForSingleObject", rc==WAIT_FAILED ? GetLastError() : rc); + *ref= NULL; // thread no longer usable + return TRUE; + } + // + void THREAD_KILL( THREAD_T *ref ) + { + // nonexistent on Xbox360, simply disable until a better solution is found + #if !defined( PLATFORM_XBOX) + // in theory no-one should call this as it is very dangerous (memory and mutex leaks, no notification of DLLs, etc.) + if (!TerminateThread( *ref, 0 )) FAIL("TerminateThread", GetLastError()); + #endif // PLATFORM_XBOX + *ref= NULL; + } + + void THREAD_MAKE_ASYNCH_CANCELLABLE() {} // nothing to do for windows threads, we can cancel them anytime we want + +#if !defined __GNUC__ + //see http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx + #define MS_VC_EXCEPTION 0x406D1388 + #pragma pack(push,8) + typedef struct tagTHREADNAME_INFO + { + DWORD dwType; // Must be 0x1000. + LPCSTR szName; // Pointer to name (in user addr space). + DWORD dwThreadID; // Thread ID (-1=caller thread). + DWORD dwFlags; // Reserved for future use, must be zero. + } THREADNAME_INFO; + #pragma pack(pop) +#endif // !__GNUC__ + + void THREAD_SETNAME( char const* _name) + { +#if !defined __GNUC__ + THREADNAME_INFO info; + info.dwType = 0x1000; + info.szName = _name; + info.dwThreadID = GetCurrentThreadId(); + info.dwFlags = 0; + + __try + { + RaiseException( MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info ); + } + __except(EXCEPTION_EXECUTE_HANDLER) + { + } +#endif // !__GNUC__ + } + +#if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available + + void SIGNAL_INIT( SIGNAL_T* ref) + { + InitializeCriticalSection( &ref->signalCS); + InitializeCriticalSection( &ref->countCS); + if( 0 == (ref->waitEvent = CreateEvent( 0, TRUE, FALSE, 0))) // manual-reset + FAIL( "CreateEvent", GetLastError()); + if( 0 == (ref->waitDoneEvent = CreateEvent( 0, FALSE, FALSE, 0))) // auto-reset + FAIL( "CreateEvent", GetLastError()); + ref->waitersCount = 0; + } + + void SIGNAL_FREE( SIGNAL_T* ref) + { + CloseHandle( ref->waitDoneEvent); + CloseHandle( ref->waitEvent); + DeleteCriticalSection( &ref->countCS); + DeleteCriticalSection( &ref->signalCS); + } + + bool_t SIGNAL_WAIT( SIGNAL_T* ref, MUTEX_T* mu_ref, time_d abs_secs) + { + DWORD errc; + DWORD ms; + + if( abs_secs < 0.0) + ms = INFINITE; + else if( abs_secs == 0.0) + ms = 0; + else + { + time_d msd = (abs_secs - now_secs()) * 1000.0 + 0.5; + // If the time already passed, still try once (ms==0). A short timeout + // may have turned negative or 0 because of the two time samples done. + ms = msd <= 0.0 ? 0 : (DWORD)msd; + } + + EnterCriticalSection( &ref->signalCS); + EnterCriticalSection( &ref->countCS); + ++ ref->waitersCount; + LeaveCriticalSection( &ref->countCS); + LeaveCriticalSection( &ref->signalCS); + + errc = SignalObjectAndWait( *mu_ref, ref->waitEvent, ms, FALSE); + + EnterCriticalSection( &ref->countCS); + if( 0 == -- ref->waitersCount) + { + // we're the last one leaving... + ResetEvent( ref->waitEvent); + SetEvent( ref->waitDoneEvent); + } + LeaveCriticalSection( &ref->countCS); + MUTEX_LOCK( mu_ref); + + switch( errc) + { + case WAIT_TIMEOUT: + return FALSE; + case WAIT_OBJECT_0: + return TRUE; + } + + FAIL( "SignalObjectAndWait", GetLastError()); + return FALSE; + } + + void SIGNAL_ALL( SIGNAL_T* ref) + { + DWORD errc = WAIT_OBJECT_0; + + EnterCriticalSection( &ref->signalCS); + EnterCriticalSection( &ref->countCS); + + if( ref->waitersCount > 0) + { + ResetEvent( ref->waitDoneEvent); + SetEvent( ref->waitEvent); + LeaveCriticalSection( &ref->countCS); + errc = WaitForSingleObject( ref->waitDoneEvent, INFINITE); + } + else + { + LeaveCriticalSection( &ref->countCS); + } + + LeaveCriticalSection( &ref->signalCS); + + if( WAIT_OBJECT_0 != errc) + FAIL( "WaitForSingleObject", GetLastError()); + } + +#else // CONDITION_VARIABLE are available, use them + + // + void SIGNAL_INIT( SIGNAL_T *ref ) + { + InitializeConditionVariable( ref); + } + + void SIGNAL_FREE( SIGNAL_T *ref ) + { + // nothing to do + (void)ref; + } + + bool_t SIGNAL_WAIT( SIGNAL_T *ref, MUTEX_T *mu_ref, time_d abs_secs) + { + long ms; + + if( abs_secs < 0.0) + ms = INFINITE; + else if( abs_secs == 0.0) + ms = 0; + else + { + ms = (long) ((abs_secs - now_secs())*1000.0 + 0.5); + + // If the time already passed, still try once (ms==0). A short timeout + // may have turned negative or 0 because of the two time samples done. + // + if( ms < 0) + ms = 0; + } + + if( !SleepConditionVariableCS( ref, mu_ref, ms)) + { + if( GetLastError() == ERROR_TIMEOUT) + { + return FALSE; + } + else + { + FAIL( "SleepConditionVariableCS", GetLastError()); + } + } + return TRUE; + } + + void SIGNAL_ONE( SIGNAL_T *ref ) + { + WakeConditionVariable( ref); + } + + void SIGNAL_ALL( SIGNAL_T *ref ) + { + WakeAllConditionVariable( ref); + } + +#endif // CONDITION_VARIABLE are available + +#else // THREADAPI == THREADAPI_PTHREAD + // PThread (Linux, OS X, ...) + // + // On OS X, user processes seem to be able to change priorities. + // On Linux, SCHED_RR and su privileges are required.. !-( + // + #include + #include + +# if (defined(__MINGW32__) || defined(__MINGW64__)) && defined pthread_attr_setschedpolicy +# if pthread_attr_setschedpolicy( A, S) == ENOTSUP + // from the mingw-w64 team: + // Well, we support pthread_setschedparam by which you can specify + // threading-policy. Nevertheless, yes we lack this function. In + // general its implementation is pretty much trivial, as on Win32 target + // just SCHED_OTHER can be supported. + #undef pthread_attr_setschedpolicy + static int pthread_attr_setschedpolicy( pthread_attr_t* attr, int policy) + { + if( policy != SCHED_OTHER) + { + return ENOTSUP; + } + return 0; + } +# endif // pthread_attr_setschedpolicy() +# endif // defined(__MINGW32__) || defined(__MINGW64__) + + static void _PT_FAIL( int rc, const char *name, const char *file, uint_t line ) { + const char *why= (rc==EINVAL) ? "EINVAL" : + (rc==EBUSY) ? "EBUSY" : + (rc==EPERM) ? "EPERM" : + (rc==ENOMEM) ? "ENOMEM" : + (rc==ESRCH) ? "ESRCH" : + (rc==ENOTSUP) ? "ENOTSUP": + //... + ""; + fprintf( stderr, "%s %d: %s failed, %d %s\n", file, line, name, rc, why ); + abort(); + } + #define PT_CALL( call ) { int rc= call; if (rc!=0) _PT_FAIL( rc, #call, __FILE__, __LINE__ ); } + // + void SIGNAL_INIT( SIGNAL_T *ref ) { + PT_CALL( pthread_cond_init(ref,NULL /*attr*/) ); + } + void SIGNAL_FREE( SIGNAL_T *ref ) { + PT_CALL( pthread_cond_destroy(ref) ); + } + // + /* + * Timeout is given as absolute since we may have fake wakeups during + * a timed out sleep. A Linda with some other key read, or just because + * PThread cond vars can wake up unwantedly. + */ + bool_t SIGNAL_WAIT( SIGNAL_T *ref, pthread_mutex_t *mu, time_d abs_secs ) { + if (abs_secs<0.0) { + PT_CALL( pthread_cond_wait( ref, mu ) ); // infinite + } else { + int rc; + struct timespec ts; + + assert( abs_secs != 0.0 ); + prepare_timeout( &ts, abs_secs ); + + rc= pthread_cond_timedwait( ref, mu, &ts ); + + if (rc==ETIMEDOUT) return FALSE; + if (rc) { _PT_FAIL( rc, "pthread_cond_timedwait()", __FILE__, __LINE__ ); } + } + return TRUE; + } + // + void SIGNAL_ONE( SIGNAL_T *ref ) { + PT_CALL( pthread_cond_signal(ref) ); // wake up ONE (or no) waiting thread + } + // + void SIGNAL_ALL( SIGNAL_T *ref ) { + PT_CALL( pthread_cond_broadcast(ref) ); // wake up ALL waiting threads + } + +// array of 7 thread priority values, hand-tuned by platform so that we offer a uniform [-3,+3] public priority range +static int const gs_prio_remap[] = +{ + // NB: PThreads priority handling is about as twisty as one can get it + // (and then some). DON*T TRUST ANYTHING YOU READ ON THE NET!!! + + //--- + // "Select the scheduling policy for the thread: one of SCHED_OTHER + // (regular, non-real-time scheduling), SCHED_RR (real-time, + // round-robin) or SCHED_FIFO (real-time, first-in first-out)." + // + // "Using the RR policy ensures that all threads having the same + // priority level will be scheduled equally, regardless of their activity." + // + // "For SCHED_FIFO and SCHED_RR, the only required member of the + // sched_param structure is the priority sched_priority. For SCHED_OTHER, + // the affected scheduling parameters are implementation-defined." + // + // "The priority of a thread is specified as a delta which is added to + // the priority of the process." + // + // ".. priority is an integer value, in the range from 1 to 127. + // 1 is the least-favored priority, 127 is the most-favored." + // + // "Priority level 0 cannot be used: it is reserved for the system." + // + // "When you use specify a priority of -99 in a call to + // pthread_setschedparam(), the priority of the target thread is + // lowered to the lowest possible value." + // + // ... + + // ** CONCLUSION ** + // + // PThread priorities are _hugely_ system specific, and we need at + // least OS specific settings. Hopefully, Linuxes and OS X versions + // are uniform enough, among each other... + // +# if defined PLATFORM_OSX + // AK 10-Apr-07 (OS X PowerPC 10.4.9): + // + // With SCHED_RR, 26 seems to be the "normal" priority, where setting + // it does not seem to affect the order of threads processed. + // + // With SCHED_OTHER, the range 25..32 is normal (maybe the same 26, + // but the difference is not so clear with OTHER). + // + // 'sched_get_priority_min()' and '..max()' give 15, 47 as the + // priority limits. This could imply, user mode applications won't + // be able to use values outside of that range. + // +# define _PRIO_MODE SCHED_OTHER + + // OS X 10.4.9 (PowerPC) gives ENOTSUP for process scope + //#define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS + +# define _PRIO_HI 32 // seems to work (_carefully_ picked!) +# define _PRIO_0 26 // detected +# define _PRIO_LO 1 // seems to work (tested) + +# elif defined PLATFORM_LINUX + // (based on Ubuntu Linux 2.6.15 kernel) + // + // SCHED_OTHER is the default policy, but does not allow for priorities. + // SCHED_RR allows priorities, all of which (1..99) are higher than + // a thread with SCHED_OTHER policy. + // + // + // + // + // + // Manuals suggest checking #ifdef _POSIX_THREAD_PRIORITY_SCHEDULING, + // but even Ubuntu does not seem to define it. + // +# define _PRIO_MODE SCHED_RR + + // NTLP 2.5: only system scope allowed (being the basic reason why + // root privileges are required..) + //#define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS + +# define _PRIO_HI 99 +# define _PRIO_0 50 +# define _PRIO_LO 1 + +# elif defined(PLATFORM_BSD) + // + // + // + // "When control over the thread scheduling is desired, then FreeBSD + // with the libpthread implementation is by far the best choice .." + // +# define _PRIO_MODE SCHED_OTHER +# define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS +# define _PRIO_HI 31 +# define _PRIO_0 15 +# define _PRIO_LO 1 + +# elif defined(PLATFORM_CYGWIN) + // + // TBD: Find right values for Cygwin + // +# elif defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) + // any other value not supported by win32-pthread as of version 2.9.1 +# define _PRIO_MODE SCHED_OTHER + + // PTHREAD_SCOPE_PROCESS not supported by win32-pthread as of version 2.9.1 + //#define _PRIO_SCOPE PTHREAD_SCOPE_SYSTEM // but do we need this at all to start with? + THREAD_PRIORITY_IDLE, THREAD_PRIORITY_LOWEST, THREAD_PRIORITY_BELOW_NORMAL, THREAD_PRIORITY_NORMAL, THREAD_PRIORITY_ABOVE_NORMAL, THREAD_PRIORITY_HIGHEST, THREAD_PRIORITY_TIME_CRITICAL + +# else +# error "Unknown OS: not implemented!" +# endif + +#if defined _PRIO_0 +# define _PRIO_AN (_PRIO_0 + ((_PRIO_HI-_PRIO_0)/2)) +# define _PRIO_BN (_PRIO_LO + ((_PRIO_0-_PRIO_LO)/2)) + + _PRIO_LO, _PRIO_LO, _PRIO_BN, _PRIO_0, _PRIO_AN, _PRIO_HI, _PRIO_HI +#endif // _PRIO_0 +}; + +static int select_prio(int prio /* -3..+3 */) +{ + if (prio == THREAD_PRIO_DEFAULT) + prio = 0; + // prio range [-3,+3] was checked by the caller + return gs_prio_remap[prio + 3]; +} + +void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (*func)( void*), void* data, int prio /* -3..+3 */) +{ + pthread_attr_t a; + bool_t const change_priority = +#ifdef PLATFORM_LINUX + sudo && // only root-privileged process can change priorities +#endif + (prio != THREAD_PRIO_DEFAULT); + + PT_CALL( pthread_attr_init( &a)); + +#ifndef PTHREAD_TIMEDJOIN + // We create a NON-JOINABLE thread. This is mainly due to the lack of + // 'pthread_timedjoin()', but does offer other benefits (s.a. earlier + // freeing of the thread's resources). + // + PT_CALL( pthread_attr_setdetachstate( &a, PTHREAD_CREATE_DETACHED)); +#endif // PTHREAD_TIMEDJOIN + + // Use this to find a system's default stack size (DEBUG) +#if 0 + { + size_t n; + pthread_attr_getstacksize( &a, &n); + fprintf( stderr, "Getstack: %u\n", (unsigned int)n); + } + // 524288 on OS X + // 2097152 on Linux x86 (Ubuntu 7.04) + // 1048576 on FreeBSD 6.2 SMP i386 +#endif // 0 + +#if defined _THREAD_STACK_SIZE && _THREAD_STACK_SIZE > 0 + PT_CALL( pthread_attr_setstacksize( &a, _THREAD_STACK_SIZE)); +#endif + + if (change_priority) + { + struct sched_param sp; + // "The specified scheduling parameters are only used if the scheduling + // parameter inheritance attribute is PTHREAD_EXPLICIT_SCHED." + // +#if !defined __ANDROID__ || ( defined __ANDROID__ && __ANDROID_API__ >= 28 ) + PT_CALL( pthread_attr_setinheritsched( &a, PTHREAD_EXPLICIT_SCHED)); +#endif + +#ifdef _PRIO_SCOPE + PT_CALL( pthread_attr_setscope( &a, _PRIO_SCOPE)); +#endif // _PRIO_SCOPE + + PT_CALL( pthread_attr_setschedpolicy( &a, _PRIO_MODE)); + + sp.sched_priority = select_prio(prio); + PT_CALL( pthread_attr_setschedparam( &a, &sp)); + } + + //--- + // Seems on OS X, _POSIX_THREAD_THREADS_MAX is some kind of system + // thread limit (not userland thread). Actual limit for us is way higher. + // PTHREAD_THREADS_MAX is not defined (even though man page refers to it!) + // +# ifndef THREAD_CREATE_RETRIES_MAX + // Don't bother with retries; a failure is a failure + // + { + int rc = pthread_create( ref, &a, func, data); + if( rc) _PT_FAIL( rc, "pthread_create()", __FILE__, __LINE__ - 1); + } +# else +# error "This code deprecated" + /* + // Wait slightly if thread creation has exchausted the system + // + { uint_t retries; + for( retries=0; retries>= 1; + } +#ifdef __ANDROID__ + PT_CALL( sched_setaffinity( pthread_self(), sizeof(cpu_set_t), &cpuset)); +#elif defined(__NetBSD__) + PT_CALL( pthread_setaffinity_np( pthread_self(), cpuset_size(cpuset), cpuset)); + cpuset_destroy( cpuset); +#else + PT_CALL( pthread_setaffinity_np( pthread_self(), sizeof(cpu_set_t), &cpuset)); +#endif +} + + /* + * Wait for a thread to finish. + * + * 'mu_ref' is a lock we should use for the waiting; initially unlocked. + * Same lock as passed to THREAD_EXIT. + * + * Returns TRUE for successful wait, FALSE for timed out + */ +bool_t THREAD_WAIT( THREAD_T *ref, double secs , SIGNAL_T *signal_ref, MUTEX_T *mu_ref, volatile enum e_status *st_ref) +{ + struct timespec ts_store; + const struct timespec *timeout= NULL; + bool_t done; + + // Do timeout counting before the locks + // +#if THREADWAIT_METHOD == THREADWAIT_TIMEOUT + if (secs>=0.0) +#else // THREADWAIT_METHOD == THREADWAIT_CONDVAR + if (secs>0.0) +#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR + { + prepare_timeout( &ts_store, now_secs()+secs ); + timeout= &ts_store; + } + +#if THREADWAIT_METHOD == THREADWAIT_TIMEOUT + /* Thread is joinable + */ + if (!timeout) { + PT_CALL( pthread_join( *ref, NULL /*ignore exit value*/ )); + done= TRUE; + } else { + int rc= PTHREAD_TIMEDJOIN( *ref, NULL, timeout ); + if ((rc!=0) && (rc!=ETIMEDOUT)) { + _PT_FAIL( rc, "PTHREAD_TIMEDJOIN", __FILE__, __LINE__-2 ); + } + done= rc==0; + } +#else // THREADWAIT_METHOD == THREADWAIT_CONDVAR + /* Since we've set the thread up as PTHREAD_CREATE_DETACHED, we cannot + * join with it. Use the cond.var. + */ + (void) ref; // unused + MUTEX_LOCK( mu_ref ); + + // 'secs'==0.0 does not need to wait, just take the current status + // within the 'mu_ref' locks + // + if (secs != 0.0) { + while( *st_ref < DONE ) { + if (!timeout) { + PT_CALL( pthread_cond_wait( signal_ref, mu_ref )); + } else { + int rc= pthread_cond_timedwait( signal_ref, mu_ref, timeout ); + if (rc==ETIMEDOUT) break; + if (rc!=0) _PT_FAIL( rc, "pthread_cond_timedwait", __FILE__, __LINE__-2 ); + } + } + } + done= *st_ref >= DONE; // DONE|ERROR_ST|CANCELLED + + MUTEX_UNLOCK( mu_ref ); +#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR + return done; + } + // + void THREAD_KILL( THREAD_T *ref ) { +#ifdef __ANDROID__ + __android_log_print(ANDROID_LOG_WARN, LOG_TAG, "Cannot kill thread!"); +#else + pthread_cancel( *ref ); +#endif + } + + void THREAD_MAKE_ASYNCH_CANCELLABLE() + { +#ifdef __ANDROID__ + __android_log_print(ANDROID_LOG_WARN, LOG_TAG, "Cannot make thread async cancellable!"); +#else + // that's the default, but just in case... + pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); + // we want cancellation to take effect immediately if possible, instead of waiting for a cancellation point (which is the default) + pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, NULL); +#endif + } + + void THREAD_SETNAME( char const* _name) + { + // exact API to set the thread name is platform-dependant + // if you need to fix the build, or if you know how to fill a hole, tell me (bnt.germain@gmail.com) so that I can submit the fix in github. +#if defined PLATFORM_BSD && !defined __NetBSD__ + pthread_set_name_np( pthread_self(), _name); +#elif defined PLATFORM_BSD && defined __NetBSD__ + pthread_setname_np( pthread_self(), "%s", (void *)_name); +#elif defined PLATFORM_LINUX + #if LINUX_USE_PTHREAD_SETNAME_NP + pthread_setname_np( pthread_self(), _name); + #else // LINUX_USE_PTHREAD_SETNAME_NP + prctl(PR_SET_NAME, _name, 0, 0, 0); + #endif // LINUX_USE_PTHREAD_SETNAME_NP +#elif defined PLATFORM_QNX || defined PLATFORM_CYGWIN + pthread_setname_np( pthread_self(), _name); +#elif defined PLATFORM_OSX + pthread_setname_np(_name); +#elif defined PLATFORM_WIN32 || defined PLATFORM_POCKETPC + PT_CALL( pthread_setname_np( pthread_self(), _name)); +#endif + } +#endif // THREADAPI == THREADAPI_PTHREAD diff --git a/src/tools.c b/src/tools.c deleted file mode 100644 index 6f4a06a..0000000 --- a/src/tools.c +++ /dev/null @@ -1,2080 +0,0 @@ -/* - * TOOLS.C Copyright (c) 2002-10, Asko Kauppi - * - * Lua tools to support Lanes. -*/ - -/* -=============================================================================== - -Copyright (C) 2002-10 Asko Kauppi - 2011-17 benoit Germain - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -=============================================================================== -*/ - -#include -#include -#include -#include -#include -#if !defined(__APPLE__) -#include -#endif // __APPLE__ - -#include "tools.h" -#include "compat.h" -#include "universe.h" -#include "keeper.h" -#include "lanes.h" -#include "uniquekey.h" - -// functions implemented in deep.c -extern bool_t copydeep( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_); -extern void push_registry_subtable( lua_State* L, UniqueKey key_); - -DEBUGSPEW_CODE( char const* debugspew_indent = "----+----!----+----!----+----!----+----!----+----!----+----!----+----!----+"); - - -// ################################################################################################ - -/* - * Does what the original 'push_registry_subtable' function did, but adds an optional mode argument to it - */ -void push_registry_subtable_mode( lua_State* L, UniqueKey key_, const char* mode_) -{ - STACK_GROW( L, 3); - STACK_CHECK( L, 0); - - REGISTRY_GET( L, key_); // {}|nil - STACK_MID( L, 1); - - if( lua_isnil( L, -1)) - { - lua_pop( L, 1); // - lua_newtable( L); // {} - // _R[key_] = {} - REGISTRY_SET( L, key_, lua_pushvalue( L, -2)); // {} - STACK_MID( L, 1); - - // Set its metatable if requested - if( mode_) - { - lua_newtable( L); // {} mt - lua_pushliteral( L, "__mode"); // {} mt "__mode" - lua_pushstring( L, mode_); // {} mt "__mode" mode - lua_rawset( L, -3); // {} mt - lua_setmetatable( L, -2); // {} - } - } - STACK_END( L, 1); - ASSERT_L( lua_istable( L, -1)); -} - -// ################################################################################################ - -/* - * Push a registry subtable (keyed by unique 'key_') onto the stack. - * If the subtable does not exist, it is created and chained. - */ -void push_registry_subtable( lua_State* L, UniqueKey key_) -{ - push_registry_subtable_mode( L, key_, NULL); -} - -// ################################################################################################ - -/*---=== luaG_dump ===---*/ -#ifdef _DEBUG -void luaG_dump( lua_State* L) -{ - int top = lua_gettop( L); - int i; - - fprintf( stderr, "\n\tDEBUG STACK:\n"); - - if( top == 0) - fprintf( stderr, "\t(none)\n"); - - for( i = 1; i <= top; ++ i) - { - int type = lua_type( L, i); - - fprintf( stderr, "\t[%d]= (%s) ", i, lua_typename( L, type)); - - // Print item contents here... - // - // Note: this requires 'tostring()' to be defined. If it is NOT, - // enable it for more debugging. - // - STACK_CHECK( L, 0); - STACK_GROW( L, 2); - - lua_getglobal( L, "tostring"); - // - // [-1]: tostring function, or nil - - if( !lua_isfunction( L, -1)) - { - fprintf( stderr, "('tostring' not available)"); - } - else - { - lua_pushvalue( L, i); - lua_call( L, 1 /*args*/, 1 /*retvals*/); - - // Don't trust the string contents - // - fprintf( stderr, "%s", lua_tostring( L, -1)); - } - lua_pop( L, 1); - STACK_END( L, 0); - fprintf( stderr, "\n"); - } - fprintf( stderr, "\n"); -} -#endif // _DEBUG - -// ################################################################################################ - -// same as PUC-Lua l_alloc -static void* libc_lua_Alloc(void* ud, void* ptr, size_t osize, size_t nsize) -{ - (void)ud; (void)osize; /* not used */ - if (nsize == 0) - { - free(ptr); - return NULL; - } - else - { - return realloc(ptr, nsize); - } -} - -static void* protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize) -{ - void* p; - ProtectedAllocator* s = (ProtectedAllocator*) ud; - MUTEX_LOCK( &s->lock); - p = s->definition.allocF( s->definition.allocUD, ptr, osize, nsize); - MUTEX_UNLOCK( &s->lock); - return p; -} - -static int luaG_provide_protected_allocator( lua_State* L) -{ - Universe* U = universe_get( L); - AllocatorDefinition* const def = (AllocatorDefinition*) lua_newuserdatauv( L, sizeof(AllocatorDefinition), 0); - def->allocF = protected_lua_Alloc; - def->allocUD = &U->protected_allocator; - return 1; -} - -// called once at the creation of the universe (therefore L is the master Lua state everything originates from) -// Do I need to disable this when compiling for LuaJIT to prevent issues? -void initialize_allocator_function( Universe* U, lua_State* L) -{ - STACK_CHECK( L, 0); - lua_getfield( L, -1, "allocator"); // settings allocator|nil|"protected" - if( !lua_isnil( L, -1)) - { - // store C function pointer in an internal variable - U->provide_allocator = lua_tocfunction( L, -1); // settings allocator - if( U->provide_allocator != NULL) - { - // make sure the function doesn't have upvalues - char const* upname = lua_getupvalue( L, -1, 1); // settings allocator upval? - if( upname != NULL) // should be "" for C functions with upvalues if any - { - (void) luaL_error( L, "config.allocator() shouldn't have upvalues"); - } - // remove this C function from the config table so that it doesn't cause problems - // when we transfer the config table in newly created Lua states - lua_pushnil( L); // settings allocator nil - lua_setfield( L, -3, "allocator"); // settings allocator - } - else if( lua_type( L, -1) == LUA_TSTRING) // should be "protected" - { - // initialize all we need for the protected allocator - MUTEX_INIT( &U->protected_allocator.lock); // the mutex - // and the original allocator to call from inside protection by the mutex - U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); - // before a state is created, this function will be called to obtain the allocator - U->provide_allocator = luaG_provide_protected_allocator; - - lua_setallocf( L, protected_lua_Alloc, &U->protected_allocator); - } - } - else - { - // initialize the mutex even if we are not going to use it, because cleanup_allocator_function will deinitialize it - MUTEX_INIT( &U->protected_allocator.lock); - // just grab whatever allocator was provided to lua_newstate - U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); - } - lua_pop( L, 1); // settings - STACK_MID(L, 0); - - lua_getfield( L, -1, "internal_allocator"); // settings "libc"|"allocator" - { - char const* allocator = lua_tostring( L, -1); - if (strcmp(allocator, "libc") == 0) - { - U->internal_allocator.allocF = libc_lua_Alloc; - U->internal_allocator.allocUD = NULL; - } - else - { - U->internal_allocator = U->protected_allocator.definition; - } - } - lua_pop( L, 1); // settings - STACK_END( L, 0); -} - -void cleanup_allocator_function( Universe* U, lua_State* L) -{ - // remove the protected allocator, if any - if( U->protected_allocator.definition.allocF != NULL) - { - // install the non-protected allocator - lua_setallocf( L, U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD); - // release the mutex - MUTEX_FREE( &U->protected_allocator.lock); - } -} - -// ################################################################################################ - -static int dummy_writer( lua_State* L, void const* p, size_t sz, void* ud) -{ - (void)L; (void)p; (void)sz; (void) ud; // unused - return 666; -} - - -/* - * differentiation between C, bytecode and JIT-fast functions - * - * - * +----------+------------+----------+ - * | bytecode | C function | JIT-fast | - * +-----------------+----------+------------+----------+ - * | lua_topointer | | | | - * +-----------------+----------+------------+----------+ - * | lua_tocfunction | NULL | | NULL | - * +-----------------+----------+------------+----------+ - * | lua_dump | 666 | 1 | 1 | - * +-----------------+----------+------------+----------+ - */ - -typedef enum -{ - FST_Bytecode, - FST_Native, - FST_FastJIT -} FuncSubType; - -FuncSubType luaG_getfuncsubtype( lua_State *L, int _i) -{ - if( lua_tocfunction( L, _i)) - { - return FST_Native; - } - { - int mustpush = 0, dumpres; - if( lua_absindex( L, _i) != lua_gettop( L)) - { - lua_pushvalue( L, _i); - mustpush = 1; - } - // the provided writer fails with code 666 - // therefore, anytime we get 666, this means that lua_dump() attempted a dump - // all other cases mean this is either a C or LuaJIT-fast function - dumpres = lua504_dump( L, dummy_writer, NULL, 0); - lua_pop( L, mustpush); - if( dumpres == 666) - { - return FST_Bytecode; - } - } - return FST_FastJIT; -} - -static lua_CFunction luaG_tocfunction( lua_State *L, int _i, FuncSubType *_out) -{ - lua_CFunction p = lua_tocfunction( L, _i); - *_out = luaG_getfuncsubtype( L, _i); - return p; -} - -// crc64/we of string "LOOKUPCACHE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ -static DECLARE_CONST_UNIQUE_KEY( LOOKUPCACHE_REGKEY, 0x837a68dfc6fcb716); - -// inspired from tconcat() in ltablib.c -static char const* luaG_pushFQN( lua_State* L, int t, int last, size_t* length) -{ - int i = 1; - luaL_Buffer b; - STACK_CHECK( L, 0); - // Lua 5.4 pushes &b as light userdata on the stack. be aware of it... - luaL_buffinit( L, &b); // ... {} ... &b? - for( ; i < last; ++ i) - { - lua_rawgeti( L, t, i); - luaL_addvalue( &b); - luaL_addlstring(&b, "/", 1); - } - if( i == last) // add last value (if interval was not empty) - { - lua_rawgeti( L, t, i); - luaL_addvalue( &b); - } - // &b is popped at that point (-> replaced by the result) - luaL_pushresult( &b); // ... {} ... "" - STACK_END( L, 1); - return lua_tolstring( L, -1, length); -} - -/* - * receives 2 arguments: a name k and an object o - * add two entries ["fully.qualified.name"] = o - * and [o] = "fully.qualified.name" - * where is either a table or a function - * if we already had an entry of type [o] = ..., replace the name if the new one is shorter - * pops the processed object from the stack - */ -static void update_lookup_entry( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, int _ctx_base, int _depth) -{ - // slot 1 in the stack contains the table that receives everything we found - int const dest = _ctx_base; - // slot 2 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot _i - int const fqn = _ctx_base + 1; - - size_t prevNameLength, newNameLength; - char const* prevName; - DEBUGSPEW_CODE( char const *newName); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "update_lookup_entry()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - - STACK_CHECK( L, 0); - // first, raise an error if the function is already known - lua_pushvalue( L, -1); // ... {bfc} k o o - lua_rawget( L, dest); // ... {bfc} k o name? - prevName = lua_tolstring( L, -1, &prevNameLength); // NULL if we got nil (first encounter of this object) - // push name in fqn stack (note that concatenation will crash if name is a not string or a number) - lua_pushvalue( L, -3); // ... {bfc} k o name? k - ASSERT_L( lua_type( L, -1) == LUA_TNUMBER || lua_type( L, -1) == LUA_TSTRING); - ++ _depth; - lua_rawseti( L, fqn, _depth); // ... {bfc} k o name? - // generate name - DEBUGSPEW_CODE( newName =) luaG_pushFQN( L, fqn, _depth, &newNameLength); // ... {bfc} k o name? "f.q.n" - // Lua 5.2 introduced a hash randomizer seed which causes table iteration to yield a different key order - // on different VMs even when the tables are populated the exact same way. - // When Lua is built with compatibility options (such as LUA_COMPAT_ALL), - // this causes several base libraries to register functions under multiple names. - // This, with the randomizer, can cause the first generated name of an object to be different on different VMs, - // which breaks function transfer. - // Also, nothing prevents any external module from exposing a given object under several names, so... - // Therefore, when we encounter an object for which a name was previously registered, we need to select the names - // based on some sorting order so that we end up with the same name in all databases whatever order the table walk yielded - if( prevName != NULL && (prevNameLength < newNameLength || lua_lessthan( L, -2, -1))) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s '%s' remained named '%s'\n" INDENT_END, lua_typename( L, lua_type( L, -3)), newName, prevName)); - // the previous name is 'smaller' than the one we just generated: keep it! - lua_pop( L, 3); // ... {bfc} k - } - else - { - // the name we generated is either the first one, or a better fit for our purposes - if( prevName) - { - // clear the previous name for the database to avoid clutter - lua_insert( L, -2); // ... {bfc} k o "f.q.n" prevName - // t[prevName] = nil - lua_pushnil( L); // ... {bfc} k o "f.q.n" prevName nil - lua_rawset( L, dest); // ... {bfc} k o "f.q.n" - } - else - { - lua_remove( L, -2); // ... {bfc} k o "f.q.n" - } - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s '%s'\n" INDENT_END, lua_typename( L, lua_type( L, -2)), newName)); - // prepare the stack for database feed - lua_pushvalue( L, -1); // ... {bfc} k o "f.q.n" "f.q.n" - lua_pushvalue( L, -3); // ... {bfc} k o "f.q.n" "f.q.n" o - ASSERT_L( lua_rawequal( L, -1, -4)); - ASSERT_L( lua_rawequal( L, -2, -3)); - // t["f.q.n"] = o - lua_rawset( L, dest); // ... {bfc} k o "f.q.n" - // t[o] = "f.q.n" - lua_rawset( L, dest); // ... {bfc} k - // remove table name from fqn stack - lua_pushnil( L); // ... {bfc} k nil - lua_rawseti( L, fqn, _depth); // ... {bfc} k - } - -- _depth; - STACK_END( L, -1); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); -} - -static void populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, int _ctx_base, int _i, int _depth) -{ - lua_Integer visit_count; - // slot 2 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot _i - int const fqn = _ctx_base + 1; - // slot 3 contains a cache that stores all already visited tables to avoid infinite recursion loops - int const cache = _ctx_base + 2; - // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search) - int const breadth_first_cache = lua_gettop( L) + 1; - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "populate_func_lookup_table_recur()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - - STACK_GROW( L, 6); - // slot _i contains a table where we search for functions (or a full userdata with a metatable) - STACK_CHECK( L, 0); // ... {_i} - - // if object is a userdata, replace it by its metatable - if( lua_type( L, _i) == LUA_TUSERDATA) - { - lua_getmetatable( L, _i); // ... {_i} mt - lua_replace( L, _i); // ... {_i} - } - - // if table is already visited, we are done - lua_pushvalue( L, _i); // ... {_i} {} - lua_rawget( L, cache); // ... {_i} nil|n - visit_count = lua_tointeger( L, -1); // 0 if nil, else n - lua_pop( L, 1); // ... {_i} - STACK_MID( L, 0); - if( visit_count > 0) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "already visited\n" INDENT_END)); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - return; - } - - // remember we visited this table (1-visit count) - lua_pushvalue( L, _i); // ... {_i} {} - lua_pushinteger( L, visit_count + 1); // ... {_i} {} 1 - lua_rawset( L, cache); // ... {_i} - STACK_MID( L, 0); - - // this table is at breadth_first_cache index - lua_newtable( L); // ... {_i} {bfc} - ASSERT_L( lua_gettop( L) == breadth_first_cache); - // iterate over all entries in the processed table - lua_pushnil( L); // ... {_i} {bfc} nil - while( lua_next( L, _i) != 0) // ... {_i} {bfc} k v - { - // just for debug, not actually needed - //char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"; - // subtable: process it recursively - if( lua_istable( L, -1)) // ... {_i} {bfc} k {} - { - // increment visit count to make sure we will actually scan it at this recursive level - lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} - lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} {} - lua_rawget( L, cache); // ... {_i} {bfc} k {} {} n? - visit_count = lua_tointeger( L, -1) + 1; // 1 if we got nil, else n+1 - lua_pop( L, 1); // ... {_i} {bfc} k {} {} - lua_pushinteger( L, visit_count); // ... {_i} {bfc} k {} {} n - lua_rawset( L, cache); // ... {_i} {bfc} k {} - // store the table in the breadth-first cache - lua_pushvalue( L, -2); // ... {_i} {bfc} k {} k - lua_pushvalue( L, -2); // ... {_i} {bfc} k {} k {} - lua_rawset( L, breadth_first_cache); // ... {_i} {bfc} k {} - // generate a name, and if we already had one name, keep whichever is the shorter - update_lookup_entry( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, _depth); // ... {_i} {bfc} k - } - else if( lua_isfunction( L, -1) && (luaG_getfuncsubtype( L, -1) != FST_Bytecode)) // ... {_i} {bfc} k func - { - // generate a name, and if we already had one name, keep whichever is the shorter - update_lookup_entry( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, _depth); // ... {_i} {bfc} k - } - else - { - lua_pop( L, 1); // ... {_i} {bfc} k - } - STACK_MID( L, 2); - } - // now process the tables we encountered at that depth - ++ _depth; - lua_pushnil( L); // ... {_i} {bfc} nil - while( lua_next( L, breadth_first_cache) != 0) // ... {_i} {bfc} k {} - { - DEBUGSPEW_CODE( char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "table '%s'\n" INDENT_END, key)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - // un-visit this table in case we do need to process it - lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} - lua_rawget( L, cache); // ... {_i} {bfc} k {} n - ASSERT_L( lua_type( L, -1) == LUA_TNUMBER); - visit_count = lua_tointeger( L, -1) - 1; - lua_pop( L, 1); // ... {_i} {bfc} k {} - lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} - if( visit_count > 0) - { - lua_pushinteger( L, visit_count); // ... {_i} {bfc} k {} {} n - } - else - { - lua_pushnil( L); // ... {_i} {bfc} k {} {} nil - } - lua_rawset( L, cache); // ... {_i} {bfc} k {} - // push table name in fqn stack (note that concatenation will crash if name is a not string!) - lua_pushvalue( L, -2); // ... {_i} {bfc} k {} k - lua_rawseti( L, fqn, _depth); // ... {_i} {bfc} k {} - populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, lua_gettop( L), _depth); - lua_pop( L, 1); // ... {_i} {bfc} k - STACK_MID( L, 2); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - } - // remove table name from fqn stack - lua_pushnil( L); // ... {_i} {bfc} nil - lua_rawseti( L, fqn, _depth); // ... {_i} {bfc} - -- _depth; - // we are done with our cache - lua_pop( L, 1); // ... {_i} - STACK_END( L, 0); - // we are done // ... {_i} {bfc} - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); -} - -/* - * create a "fully.qualified.name" <-> function equivalence database - */ -void populate_func_lookup_table( lua_State* L, int _i, char const* name_) -{ - int const ctx_base = lua_gettop( L) + 1; - int const in_base = lua_absindex( L, _i); - int start_depth = 0; - DEBUGSPEW_CODE( Universe* U = universe_get( L)); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END, L, name_ ? name_ : "NULL")); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - STACK_GROW( L, 3); - STACK_CHECK( L, 0); - REGISTRY_GET( L, LOOKUP_REGKEY); // {} - STACK_MID( L, 1); - ASSERT_L( lua_istable( L, -1)); - if( lua_type( L, in_base) == LUA_TFUNCTION) // for example when a module is a simple function - { - name_ = name_ ? name_ : "NULL"; - lua_pushvalue( L, in_base); // {} f - lua_pushstring( L, name_); // {} f _name - lua_rawset( L, -3); // {} - lua_pushstring( L, name_); // {} _name - lua_pushvalue( L, in_base); // {} _name f - lua_rawset( L, -3); // {} - lua_pop( L, 1); // - } - else if( lua_type( L, in_base) == LUA_TTABLE) - { - lua_newtable( L); // {} {fqn} - if( name_) - { - STACK_MID( L, 2); - lua_pushstring( L, name_); // {} {fqn} "name" - // generate a name, and if we already had one name, keep whichever is the shorter - lua_pushvalue( L, in_base); // {} {fqn} "name" t - update_lookup_entry( DEBUGSPEW_PARAM_COMMA( U) L, ctx_base, start_depth); // {} {fqn} "name" - // don't forget to store the name at the bottom of the fqn stack - ++ start_depth; - lua_rawseti( L, -2, start_depth); // {} {fqn} - STACK_MID( L, 2); - } - // retrieve the cache, create it if we haven't done it yet - REGISTRY_GET( L, LOOKUPCACHE_REGKEY); // {} {fqn} {cache}? - if( lua_isnil( L, -1)) - { - lua_pop( L, 1); // {} {fqn} - lua_newtable( L); // {} {fqn} {cache} - REGISTRY_SET( L, LOOKUPCACHE_REGKEY, lua_pushvalue( L, -2)); - STACK_MID( L, 3); - } - // process everything we find in that table, filling in lookup data for all functions and tables we see there - populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, ctx_base, in_base, start_depth); - lua_pop( L, 3); - } - else - { - lua_pop( L, 1); // - (void) luaL_error( L, "unsupported module type %s", lua_typename( L, lua_type( L, in_base))); - } - STACK_END( L, 0); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); -} - -/*---=== Inter-state copying ===---*/ - -// crc64/we of string "REG_MTID" generated at http://www.nitrxgen.net/hashgen/ -static DECLARE_CONST_UNIQUE_KEY( REG_MTID, 0x2e68f9b4751584dc); - -/* -* Get a unique ID for metatable at [i]. -*/ -static lua_Integer get_mt_id( Universe* U, lua_State* L, int i) -{ - lua_Integer id; - - i = lua_absindex( L, i); - - STACK_GROW( L, 3); - - STACK_CHECK( L, 0); - push_registry_subtable( L, REG_MTID); // ... _R[REG_MTID] - lua_pushvalue( L, i); // ... _R[REG_MTID] {mt} - lua_rawget( L, -2); // ... _R[REG_MTID] mtk? - - id = lua_tointeger( L, -1); // 0 for nil - lua_pop( L, 1); // ... _R[REG_MTID] - STACK_MID( L, 1); - - if( id == 0) - { - MUTEX_LOCK( &U->mtid_lock); - id = ++ U->last_mt_id; - MUTEX_UNLOCK( &U->mtid_lock); - - /* Create two-way references: id_uint <-> table - */ - lua_pushvalue( L, i); // ... _R[REG_MTID] {mt} - lua_pushinteger( L, id); // ... _R[REG_MTID] {mt} id - lua_rawset( L, -3); // ... _R[REG_MTID] - - lua_pushinteger( L, id); // ... _R[REG_MTID] id - lua_pushvalue( L, i); // ... _R[REG_MTID] id {mt} - lua_rawset( L, -3); // ... _R[REG_MTID] - } - lua_pop( L, 1); // ... - - STACK_END( L, 0); - - return id; -} - -// function sentinel used to transfer native functions from/to keeper states -static int func_lookup_sentinel( lua_State* L) -{ - return luaL_error( L, "function lookup sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); -} - - -// function sentinel used to transfer native table from/to keeper states -static int table_lookup_sentinel( lua_State* L) -{ - return luaL_error( L, "table lookup sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); -} - -// function sentinel used to transfer cloned full userdata from/to keeper states -static int userdata_clone_sentinel( lua_State* L) -{ - return luaL_error( L, "userdata clone sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); -} - -/* - * retrieve the name of a function/table in the lookup database - */ -static char const* find_lookup_name( lua_State* L, uint_t i, LookupMode mode_, char const* upName_, size_t* len_) -{ - DEBUGSPEW_CODE( Universe* const U = universe_get( L)); - char const* fqn; - ASSERT_L( lua_isfunction( L, i) || lua_istable( L, i)); // ... v ... - STACK_CHECK( L, 0); - STACK_GROW( L, 3); // up to 3 slots are necessary on error - if( mode_ == eLM_FromKeeper) - { - lua_CFunction f = lua_tocfunction( L, i); // should *always* be func_lookup_sentinel or table_lookup_sentinel! - if( f == func_lookup_sentinel || f == table_lookup_sentinel || f == userdata_clone_sentinel) - { - lua_getupvalue( L, i, 1); // ... v ... "f.q.n" - } - else - { - // if this is not a sentinel, this is some user-created table we wanted to lookup - ASSERT_L( NULL == f && lua_istable( L, i)); - // push anything that will convert to NULL string - lua_pushnil( L); // ... v ... nil - } - } - else - { - // fetch the name from the source state's lookup table - REGISTRY_GET( L, LOOKUP_REGKEY); // ... v ... {} - STACK_MID( L, 1); - ASSERT_L( lua_istable( L, -1)); - lua_pushvalue( L, i); // ... v ... {} v - lua_rawget( L, -2); // ... v ... {} "f.q.n" - } - fqn = lua_tolstring( L, -1, len_); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "function [C] %s \n" INDENT_END, fqn)); - // popping doesn't invalidate the pointer since this is an interned string gotten from the lookup database - lua_pop( L, (mode_ == eLM_FromKeeper) ? 1 : 2); // ... v ... - STACK_MID( L, 0); - if( NULL == fqn && !lua_istable( L, i)) // raise an error if we try to send an unknown function (but not for tables) - { - char const *from, *typewhat, *what, *gotchaA, *gotchaB; - // try to discover the name of the function we want to send - lua_getglobal( L, "decoda_name"); // ... v ... decoda_name - from = lua_tostring( L, -1); - lua_pushcfunction( L, luaG_nameof); // ... v ... decoda_name luaG_nameof - lua_pushvalue( L, i); // ... v ... decoda_name luaG_nameof t - lua_call( L, 1, 2); // ... v ... decoda_name "type" "name"|nil - typewhat = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : luaL_typename( L, -2); - // second return value can be nil if the table was not found - // probable reason: the function was removed from the source Lua state before Lanes was required. - if( lua_isnil( L, -1)) - { - gotchaA = " referenced by"; - gotchaB = "\n(did you remove it from the source Lua state before requiring Lanes?)"; - what = upName_; - } - else - { - gotchaA = ""; - gotchaB = ""; - what = (lua_type( L, -1) == LUA_TSTRING) ? lua_tostring( L, -1) : luaL_typename( L, -1); - } - (void) luaL_error( L, "%s%s '%s' not found in %s origin transfer database.%s", typewhat, gotchaA, what, from ? from : "main", gotchaB); - *len_ = 0; - return NULL; - } - STACK_END( L, 0); - return fqn; -} - - -/* - * Push a looked-up table, or nothing if we found nothing - */ -static bool_t lookup_table( lua_State* L2, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) -{ - // get the name of the table we want to send - size_t len; - char const* fqn = find_lookup_name( L, i, mode_, upName_, &len); - if( NULL == fqn) // name not found, it is some user-created table - { - return FALSE; - } - // push the equivalent table in the destination's stack, retrieved from the lookup table - STACK_CHECK( L2, 0); // L // L2 - STACK_GROW( L2, 3); // up to 3 slots are necessary on error - switch( mode_) - { - default: // shouldn't happen, in theory... - (void) luaL_error( L, "internal error: unknown lookup mode"); - return FALSE; - - case eLM_ToKeeper: - // push a sentinel closure that holds the lookup name as upvalue - lua_pushlstring( L2, fqn, len); // "f.q.n" - lua_pushcclosure( L2, table_lookup_sentinel, 1); // f - break; - - case eLM_LaneBody: - case eLM_FromKeeper: - REGISTRY_GET( L2, LOOKUP_REGKEY); // {} - STACK_MID( L2, 1); - ASSERT_L( lua_istable( L2, -1)); - lua_pushlstring( L2, fqn, len); // {} "f.q.n" - lua_rawget( L2, -2); // {} t - // we accept destination lookup failures in the case of transfering the Lanes body function (this will result in the source table being cloned instead) - // but not when we extract something out of a keeper, as there is nothing to clone! - if( lua_isnil( L2, -1) && mode_ == eLM_LaneBody) - { - lua_pop( L2, 2); // - STACK_MID( L2, 0); - return FALSE; - } - else if( !lua_istable( L2, -1)) - { - char const* from, *to; - lua_getglobal( L, "decoda_name"); // ... t ... decoda_name - from = lua_tostring( L, -1); - lua_pop( L, 1); // ... t ... - lua_getglobal( L2, "decoda_name"); // {} t decoda_name - to = lua_tostring( L2, -1); - lua_pop( L2, 1); // {} t - // when mode_ == eLM_FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error - (void) luaL_error( - (mode_ == eLM_FromKeeper) ? L2 : L - , "INTERNAL ERROR IN %s: table '%s' not found in %s destination transfer database." - , from ? from : "main" - , fqn - , to ? to : "main" - ); - return FALSE; - } - lua_remove( L2, -2); // t - break; - } - STACK_END( L2, 1); - return TRUE; -} - - -/* - * Check if we've already copied the same table from 'L', and - * reuse the old copy. This allows table upvalues shared by multiple - * local functions to point to the same table, also in the target. - * - * Always pushes a table to 'L2'. - * - * Returns TRUE if the table was cached (no need to fill it!); FALSE if - * it's a virgin. - */ -static bool_t push_cached_table( lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i) -{ - bool_t not_found_in_cache; // L2 - DECLARE_CONST_UNIQUE_KEY( p, lua_topointer( L, i)); - - ASSERT_L( L2_cache_i != 0); - STACK_GROW( L2, 3); - STACK_CHECK( L2, 0); - - // We don't need to use the from state ('L') in ID since the life span - // is only for the duration of a copy (both states are locked). - // push a light userdata uniquely representing the table - push_unique_key( L2, p); // ... p - - //fprintf( stderr, "<< ID: %s >>\n", lua_tostring( L2, -1)); - - lua_rawget( L2, L2_cache_i); // ... {cached|nil} - not_found_in_cache = lua_isnil( L2, -1); - if( not_found_in_cache) - { - lua_pop( L2, 1); // ... - lua_newtable( L2); // ... {} - push_unique_key( L2, p); // ... {} p - lua_pushvalue( L2, -2); // ... {} p {} - lua_rawset( L2, L2_cache_i); // ... {} - } - STACK_END( L2, 1); - ASSERT_L( lua_istable( L2, -1)); - return !not_found_in_cache; -} - - -/* - * Return some name helping to identify an object - */ -static int discover_object_name_recur( lua_State* L, int shortest_, int depth_) -{ - int const what = 1; // o "r" {c} {fqn} ... {?} - int const result = 2; - int const cache = 3; - int const fqn = 4; - // no need to scan this table if the name we will discover is longer than one we already know - if( shortest_ <= depth_ + 1) - { - return shortest_; - } - STACK_GROW( L, 3); - STACK_CHECK( L, 0); - // stack top contains the table to search in - lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} - lua_rawget( L, cache); // o "r" {c} {fqn} ... {?} nil/1 - // if table is already visited, we are done - if( !lua_isnil( L, -1)) - { - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} - return shortest_; - } - // examined table is not in the cache, add it now - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} - lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} - lua_pushinteger( L, 1); // o "r" {c} {fqn} ... {?} {?} 1 - lua_rawset( L, cache); // o "r" {c} {fqn} ... {?} - // scan table contents - lua_pushnil( L); // o "r" {c} {fqn} ... {?} nil - while( lua_next( L, -2)) // o "r" {c} {fqn} ... {?} k v - { - //char const *const strKey = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : NULL; // only for debugging - //lua_Number const numKey = (lua_type( L, -2) == LUA_TNUMBER) ? lua_tonumber( L, -2) : -6666; // only for debugging - STACK_MID( L, 2); - // append key name to fqn stack - ++ depth_; - lua_pushvalue( L, -2); // o "r" {c} {fqn} ... {?} k v k - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v - if( lua_rawequal( L, -1, what)) // is it what we are looking for? - { - STACK_MID( L, 2); - // update shortest name - if( depth_ < shortest_) - { - shortest_ = depth_; - luaG_pushFQN( L, fqn, depth_, NULL); // o "r" {c} {fqn} ... {?} k v "fqn" - lua_replace( L, result); // o "r" {c} {fqn} ... {?} k v - } - // no need to search further at this level - lua_pop( L, 2); // o "r" {c} {fqn} ... {?} - STACK_MID( L, 0); - break; - } - switch( lua_type( L, -1)) // o "r" {c} {fqn} ... {?} k v - { - default: // nil, boolean, light userdata, number and string aren't identifiable - break; - - case LUA_TTABLE: // o "r" {c} {fqn} ... {?} k {} - STACK_MID( L, 2); - shortest_ = discover_object_name_recur( L, shortest_, depth_); - // search in the table's metatable too - if( lua_getmetatable( L, -1)) // o "r" {c} {fqn} ... {?} k {} {mt} - { - if( lua_istable( L, -1)) - { - ++ depth_; - lua_pushliteral( L, "__metatable"); // o "r" {c} {fqn} ... {?} k {} {mt} "__metatable" - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} - shortest_ = discover_object_name_recur( L, shortest_, depth_); - lua_pushnil( L); // o "r" {c} {fqn} ... {?} k {} {mt} nil - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} - -- depth_; - } - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k {} - } - STACK_MID( L, 2); - break; - - case LUA_TTHREAD: // o "r" {c} {fqn} ... {?} k T - // TODO: explore the thread's stack frame looking for our culprit? - break; - - case LUA_TUSERDATA: // o "r" {c} {fqn} ... {?} k U - STACK_MID( L, 2); - // search in the object's metatable (some modules are built that way) - if( lua_getmetatable( L, -1)) // o "r" {c} {fqn} ... {?} k U {mt} - { - if( lua_istable( L, -1)) - { - ++ depth_; - lua_pushliteral( L, "__metatable"); // o "r" {c} {fqn} ... {?} k U {mt} "__metatable" - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} - shortest_ = discover_object_name_recur( L, shortest_, depth_); - lua_pushnil( L); // o "r" {c} {fqn} ... {?} k U {mt} nil - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} - -- depth_; - } - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U - } - STACK_MID( L, 2); - // search in the object's uservalues - { - int uvi = 1; - while( lua_getiuservalue( L, -1, uvi) != LUA_TNONE) // o "r" {c} {fqn} ... {?} k U {u} - { - if( lua_istable( L, -1)) // if it is a table, look inside - { - ++ depth_; - lua_pushliteral( L, "uservalue"); // o "r" {c} {fqn} ... {?} k v {u} "uservalue" - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} - shortest_ = discover_object_name_recur( L, shortest_, depth_); - lua_pushnil( L); // o "r" {c} {fqn} ... {?} k v {u} nil - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} - -- depth_; - } - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U - ++ uvi; - } - // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U - } - STACK_MID( L, 2); - break; - } - // make ready for next iteration - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k - // remove name from fqn stack - lua_pushnil( L); // o "r" {c} {fqn} ... {?} k nil - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k - STACK_MID( L, 1); - -- depth_; - } // o "r" {c} {fqn} ... {?} - STACK_MID( L, 0); - // remove the visited table from the cache, in case a shorter path to the searched object exists - lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} - lua_pushnil( L); // o "r" {c} {fqn} ... {?} {?} nil - lua_rawset( L, cache); // o "r" {c} {fqn} ... {?} - STACK_END( L, 0); - return shortest_; -} - - -/* - * "type", "name" = lanes.nameof( o) - */ -int luaG_nameof( lua_State* L) -{ - int what = lua_gettop( L); - if( what > 1) - { - luaL_argerror( L, what, "too many arguments."); - } - - // nil, boolean, light userdata, number and string aren't identifiable - if( lua_type( L, 1) < LUA_TTABLE) - { - lua_pushstring( L, luaL_typename( L, 1)); // o "type" - lua_insert( L, -2); // "type" o - return 2; - } - - STACK_GROW( L, 4); - STACK_CHECK( L, 0); - // this slot will contain the shortest name we found when we are done - lua_pushnil( L); // o nil - // push a cache that will contain all already visited tables - lua_newtable( L); // o nil {c} - // push a table whose contents are strings that, when concatenated, produce unique name - lua_newtable( L); // o nil {c} {fqn} - lua_pushliteral( L, "_G"); // o nil {c} {fqn} "_G" - lua_rawseti( L, -2, 1); // o nil {c} {fqn} - // this is where we start the search - lua_pushglobaltable( L); // o nil {c} {fqn} _G - (void) discover_object_name_recur( L, 6666, 1); - if( lua_isnil( L, 2)) // try again with registry, just in case... - { - lua_pop( L, 1); // o nil {c} {fqn} - lua_pushliteral( L, "_R"); // o nil {c} {fqn} "_R" - lua_rawseti( L, -2, 1); // o nil {c} {fqn} - lua_pushvalue( L, LUA_REGISTRYINDEX); // o nil {c} {fqn} _R - (void) discover_object_name_recur( L, 6666, 1); - } - lua_pop( L, 3); // o "result" - STACK_END( L, 1); - lua_pushstring( L, luaL_typename( L, 1)); // o "result" "type" - lua_replace( L, -3); // "type" "result" - return 2; -} - - -/* - * Push a looked-up native/LuaJIT function. - */ -static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) -{ - // get the name of the function we want to send - size_t len; - char const* fqn = find_lookup_name( L, i, mode_, upName_, &len); - // push the equivalent function in the destination's stack, retrieved from the lookup table - STACK_CHECK( L2, 0); // L // L2 - STACK_GROW( L2, 3); // up to 3 slots are necessary on error - switch( mode_) - { - default: // shouldn't happen, in theory... - (void) luaL_error( L, "internal error: unknown lookup mode"); - return; - - case eLM_ToKeeper: - // push a sentinel closure that holds the lookup name as upvalue - lua_pushlstring( L2, fqn, len); // "f.q.n" - lua_pushcclosure( L2, func_lookup_sentinel, 1); // f - break; - - case eLM_LaneBody: - case eLM_FromKeeper: - REGISTRY_GET( L2, LOOKUP_REGKEY); // {} - STACK_MID( L2, 1); - ASSERT_L( lua_istable( L2, -1)); - lua_pushlstring( L2, fqn, len); // {} "f.q.n" - lua_rawget( L2, -2); // {} f - // nil means we don't know how to transfer stuff: user should do something - // anything other than function or table should not happen! - if( !lua_isfunction( L2, -1) && !lua_istable( L2, -1)) - { - char const* from, * to; - lua_getglobal( L, "decoda_name"); // ... f ... decoda_name - from = lua_tostring( L, -1); - lua_pop( L, 1); // ... f ... - lua_getglobal( L2, "decoda_name"); // {} f decoda_name - to = lua_tostring( L2, -1); - lua_pop( L2, 1); // {} f - // when mode_ == eLM_FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error - (void) luaL_error( - (mode_ == eLM_FromKeeper) ? L2 : L - , "%s%s: function '%s' not found in %s destination transfer database." - , lua_isnil( L2, -1) ? "" : "INTERNAL ERROR IN " - , from ? from : "main" - , fqn - , to ? to : "main" - ); - return; - } - lua_remove( L2, -2); // f - break; - - /* keep it in case I need it someday, who knows... - case eLM_RawFunctions: - { - int n; - char const* upname; - lua_CFunction f = lua_tocfunction( L, i); - // copy upvalues - for( n = 0; (upname = lua_getupvalue( L, i, 1 + n)) != NULL; ++ n) - { - luaG_inter_move( U, L, L2, 1, mode_); // [up[,up ...]] - } - lua_pushcclosure( L2, f, n); // - } - break; - */ - } - STACK_END( L2, 1); -} - - -/* - * Copy a function over, which has not been found in the cache. - * L2 has the cache key for this function at the top of the stack -*/ - -#if USE_DEBUG_SPEW() -static char const* lua_type_names[] = -{ - "LUA_TNIL" - , "LUA_TBOOLEAN" - , "LUA_TLIGHTUSERDATA" - , "LUA_TNUMBER" - , "LUA_TSTRING" - , "LUA_TTABLE" - , "LUA_TFUNCTION" - , "LUA_TUSERDATA" - , "LUA_TTHREAD" - , "" // not really a type - , "LUA_TJITCDATA" // LuaJIT specific -}; -static char const* vt_names[] = -{ - "VT_NORMAL" - , "VT_KEY" - , "VT_METATABLE" -}; -#endif // USE_DEBUG_SPEW() - -// Lua 5.4.3 style of dumping (see lstrlib.c) -// we have to do it that way because we can't unbalance the stack between buffer operations -// namely, this means we can't push a function on top of the stack *after* we initialize the buffer! -// luckily, this also works with earlier Lua versions -static int buf_writer( lua_State* L, void const* b, size_t size, void* ud) -{ - luaL_Buffer* B = (luaL_Buffer*) ud; - if( !B->L) - { - luaL_buffinit( L, B); - } - luaL_addlstring( B, (char const*) b, size); - return 0; -} - -static void copy_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) -{ - int n, needToPush; - luaL_Buffer B; - B.L = NULL; - - ASSERT_L( L2_cache_i != 0); // ... {cache} ... p - STACK_GROW( L, 2); - STACK_CHECK( L, 0); - - - // 'lua_dump()' needs the function at top of stack - // if already on top of the stack, no need to push again - needToPush = (i != (uint_t)lua_gettop( L)); - if( needToPush) - { - lua_pushvalue( L, i); // ... f - } - - // - // "value returned is the error code returned by the last call - // to the writer" (and we only return 0) - // not sure this could ever fail but for memory shortage reasons - // last parameter is Lua 5.4-specific (no stripping) - if( lua504_dump( L, buf_writer, &B, 0) != 0) - { - luaL_error( L, "internal error: function dump failed."); - } - - // pushes dumped string on 'L' - luaL_pushresult( &B); // ... f b - - // if not pushed, no need to pop - if( needToPush) - { - lua_remove( L, -2); // ... b - } - - // transfer the bytecode, then the upvalues, to create a similar closure - { - char const* name = NULL; - - #if LOG_FUNC_INFO - // "To get information about a function you push it onto the - // stack and start the what string with the character '>'." - // - { - lua_Debug ar; - lua_pushvalue( L, i); // ... b f - // fills 'name' 'namewhat' and 'linedefined', pops function - lua_getinfo( L, ">nS", &ar); // ... b - name = ar.namewhat; - fprintf( stderr, INDENT_BEGIN "FNAME: %s @ %d\n", i, s_indent, ar.short_src, ar.linedefined); // just gives NULL - } - #endif // LOG_FUNC_INFO - { - size_t sz; - char const* s = lua_tolstring( L, -1, &sz); // ... b - ASSERT_L( s && sz); - STACK_GROW( L2, 2); - // Note: Line numbers seem to be taken precisely from the - // original function. 'name' is not used since the chunk - // is precompiled (it seems...). - // - // TBD: Can we get the function's original name through, as well? - // - if( luaL_loadbuffer( L2, s, sz, name) != 0) // ... {cache} ... p function - { - // chunk is precompiled so only LUA_ERRMEM can happen - // "Otherwise, it pushes an error message" - // - STACK_GROW( L, 1); - luaL_error( L, "%s: %s", upName_, lua_tostring( L2, -1)); - } - // remove the dumped string - lua_pop( L, 1); // ... - // now set the cache as soon as we can. - // this is necessary if one of the function's upvalues references it indirectly - // we need to find it in the cache even if it isn't fully transfered yet - lua_insert( L2, -2); // ... {cache} ... function p - lua_pushvalue( L2, -2); // ... {cache} ... function p function - // cache[p] = function - lua_rawset( L2, L2_cache_i); // ... {cache} ... function - } - STACK_MID( L, 0); - - /* push over any upvalues; references to this function will come from - * cache so we don't end up in eternal loop. - * Lua5.2 and Lua5.3: one of the upvalues is _ENV, which we don't want to copy! - * instead, the function shall have LUA_RIDX_GLOBALS taken in the destination state! - */ - { - char const* upname; -#if LUA_VERSION_NUM >= 502 - // Starting with Lua 5.2, each Lua function gets its environment as one of its upvalues (named LUA_ENV, aka "_ENV" by default) - // Generally this is LUA_RIDX_GLOBALS, which we don't want to copy from the source to the destination state... - // -> if we encounter an upvalue equal to the global table in the source, bind it to the destination's global table - lua_pushglobaltable( L); // ... _G -#endif // LUA_VERSION_NUM - for( n = 0; (upname = lua_getupvalue( L, i, 1 + n)) != NULL; ++ n) - { // ... _G up[n] - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "UPNAME[%d]: %s -> " INDENT_END, n, upname)); -#if LUA_VERSION_NUM >= 502 - if( lua_rawequal( L, -1, -2)) // is the upvalue equal to the global table? - { - DEBUGSPEW_CODE( fprintf( stderr, "pushing destination global scope\n")); - lua_pushglobaltable( L2); // ... {cache} ... function - } - else -#endif // LUA_VERSION_NUM - { - DEBUGSPEW_CODE( fprintf( stderr, "copying value\n")); - if( !inter_copy_one( U, L2, L2_cache_i, L, lua_gettop( L), VT_NORMAL, mode_, upname)) // ... {cache} ... function - { - luaL_error( L, "Cannot copy upvalue type '%s'", luaL_typename( L, -1)); - } - } - lua_pop( L, 1); // ... _G - } -#if LUA_VERSION_NUM >= 502 - lua_pop( L, 1); // ... -#endif // LUA_VERSION_NUM - } - // L2: function + 'n' upvalues (>=0) - - STACK_MID( L, 0); - - // Set upvalues (originally set to 'nil' by 'lua_load') - { - int func_index = lua_gettop( L2) - n; - for( ; n > 0; -- n) - { - char const* rc = lua_setupvalue( L2, func_index, n); // ... {cache} ... function - // - // "assigns the value at the top of the stack to the upvalue and returns its name. - // It also pops the value from the stack." - - ASSERT_L( rc); // not having enough slots? - } - // once all upvalues have been set we are left - // with the function at the top of the stack // ... {cache} ... function - } - } - STACK_END( L, 0); -} - -/* - * Check if we've already copied the same function from 'L', and reuse the old - * copy. - * - * Always pushes a function to 'L2'. - */ -static void copy_cached_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) -{ - FuncSubType funcSubType; - /*lua_CFunction cfunc =*/ luaG_tocfunction( L, i, &funcSubType); // NULL for LuaJIT-fast && bytecode functions - if( funcSubType == FST_Bytecode) - { - void* const aspointer = (void*)lua_topointer( L, i); - // TBD: Merge this and same code for tables - ASSERT_L( L2_cache_i != 0); - - STACK_GROW( L2, 2); - - // L2_cache[id_str]= function - // - STACK_CHECK( L2, 0); - - // We don't need to use the from state ('L') in ID since the life span - // is only for the duration of a copy (both states are locked). - // - - // push a light userdata uniquely representing the function - lua_pushlightuserdata( L2, aspointer); // ... {cache} ... p - - //fprintf( stderr, "<< ID: %s >>\n", lua_tostring( L2, -1)); - - lua_pushvalue( L2, -1); // ... {cache} ... p p - lua_rawget( L2, L2_cache_i); // ... {cache} ... p function|nil|true - - if( lua_isnil( L2, -1)) // function is unknown - { - lua_pop( L2, 1); // ... {cache} ... p - - // Set to 'true' for the duration of creation; need to find self-references - // via upvalues - // - // pushes a copy of the func, stores a reference in the cache - copy_func( U, L2, L2_cache_i, L, i, mode_, upName_); // ... {cache} ... function - } - else // found function in the cache - { - lua_remove( L2, -2); // ... {cache} ... function - } - STACK_END( L2, 1); - ASSERT_L( lua_isfunction( L2, -1)); - } - else // function is native/LuaJIT: no need to cache - { - lookup_native_func( L2, L, i, mode_, upName_); // ... {cache} ... function - // if the function was in fact a lookup sentinel, we can either get a function or a table here - ASSERT_L( lua_isfunction( L2, -1) || lua_istable( L2, -1)); - } -} - -static bool_t push_cached_metatable( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum eLookupMode mode_, char const* upName_) -{ - STACK_CHECK( L, 0); - if( lua_getmetatable( L, i)) // ... mt - { - lua_Integer const mt_id = get_mt_id( U, L, -1); // Unique id for the metatable - - STACK_CHECK( L2, 0); - STACK_GROW( L2, 4); - // do we already know this metatable? - push_registry_subtable( L2, REG_MTID); // _R[REG_MTID] - lua_pushinteger( L2, mt_id); // _R[REG_MTID] id - lua_rawget( L2, -2); // _R[REG_MTID] mt? - - STACK_MID( L2, 2); - - if( lua_isnil( L2, -1)) - { // L2 did not know the metatable - lua_pop( L2, 1); // _R[REG_MTID] - if( inter_copy_one( U, L2, L2_cache_i, L, lua_gettop( L), VT_METATABLE, mode_, upName_)) // _R[REG_MTID] mt - { - STACK_MID( L2, 2); - // mt_id -> metatable - lua_pushinteger( L2, mt_id); // _R[REG_MTID] mt id - lua_pushvalue( L2, -2); // _R[REG_MTID] mt id mt - lua_rawset( L2, -4); // _R[REG_MTID] mt - - // metatable -> mt_id - lua_pushvalue( L2, -1); // _R[REG_MTID] mt mt - lua_pushinteger( L2, mt_id); // _R[REG_MTID] mt mt id - lua_rawset( L2, -4); // _R[REG_MTID] mt - } - else - { - (void) luaL_error( L, "Error copying a metatable"); - } - STACK_MID( L2, 2); - } - lua_remove( L2, -2); // mt - - lua_pop( L, 1); // ... - STACK_END( L2, 1); - STACK_MID( L, 0); - return TRUE; - } - STACK_END( L, 0); - return FALSE; -} - -static void inter_copy_keyvaluepair( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, enum e_vt vt, LookupMode mode_, char const* upName_) -{ - uint_t val_i = lua_gettop( L); - uint_t key_i = val_i - 1; - - // Only basic key types are copied over; others ignored - if( inter_copy_one( U, L2, 0 /*key*/, L, key_i, VT_KEY, mode_, upName_)) - { - char* valPath = (char*) upName_; - if( U->verboseErrors) - { - // for debug purposes, let's try to build a useful name - if( lua_type( L, key_i) == LUA_TSTRING) - { - char const* key = lua_tostring( L, key_i); - size_t const keyRawLen = lua_rawlen( L, key_i); - size_t const bufLen = strlen( upName_) + keyRawLen + 2; - valPath = (char*) alloca( bufLen); - sprintf( valPath, "%s.%*s", upName_, (int) keyRawLen, key); - key = NULL; - } -#if defined LUA_LNUM || LUA_VERSION_NUM >= 503 - else if( lua_isinteger( L, key_i)) - { - lua_Integer key = lua_tointeger( L, key_i); - valPath = (char*) alloca( strlen( upName_) + 32 + 3); - sprintf( valPath, "%s[" LUA_INTEGER_FMT "]", upName_, key); - } -#endif // defined LUA_LNUM || LUA_VERSION_NUM >= 503 - else if( lua_type( L, key_i) == LUA_TNUMBER) - { - lua_Number key = lua_tonumber( L, key_i); - valPath = (char*) alloca( strlen( upName_) + 32 + 3); - sprintf( valPath, "%s[" LUA_NUMBER_FMT "]", upName_, key); - } - else if( lua_type( L, key_i) == LUA_TLIGHTUSERDATA) - { - void* key = lua_touserdata( L, key_i); - valPath = (char*) alloca( strlen( upName_) + 16 + 5); - sprintf( valPath, "%s[U:%p]", upName_, key); - } - else if( lua_type( L, key_i) == LUA_TBOOLEAN) - { - int key = lua_toboolean( L, key_i); - valPath = (char*) alloca( strlen( upName_) + 8); - sprintf( valPath, "%s[%s]", upName_, key ? "true" : "false"); - } - } - /* - * Contents of metatables are copied with cache checking; - * important to detect loops. - */ - if( inter_copy_one( U, L2, L2_cache_i, L, val_i, VT_NORMAL, mode_, valPath)) - { - ASSERT_L( lua_istable( L2, -3)); - lua_rawset( L2, -3); // add to table (pops key & val) - } - else - { - luaL_error( L, "Unable to copy %s entry '%s' because of value is of type '%s'", (vt == VT_NORMAL) ? "table" : "metatable", valPath, luaL_typename( L, val_i)); - } - } -} - -/* -* The clone cache is a weak valued table listing all clones, indexed by their userdatapointer -* fnv164 of string "CLONABLES_CACHE_KEY" generated at https://www.pelock.com/products/hash-calculator -*/ -static DECLARE_CONST_UNIQUE_KEY( CLONABLES_CACHE_KEY, 0xD04EE018B3DEE8F5); - -static bool_t copyclone( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t source_i_, LookupMode mode_, char const* upName_) -{ - void* const source = lua_touserdata( L, source_i_); - source_i_ = lua_absindex( L, source_i_); - - STACK_CHECK( L, 0); // L (source) // L2 (destination) - STACK_CHECK( L2, 0); - - // Check if the source was already cloned during this copy - lua_pushlightuserdata( L2, source); // ... source - lua_rawget( L2, L2_cache_i); // ... clone? - if ( !lua_isnil( L2, -1)) - { - STACK_MID( L2, 1); - return TRUE; - } - else - { - lua_pop( L2, 1); // ... - } - STACK_MID( L2, 0); - - // no metatable? -> not clonable - if( !lua_getmetatable( L, source_i_)) // ... mt? - { - STACK_MID( L, 0); - return FALSE; - } - - // no __lanesclone? -> not clonable - lua_getfield( L, -1, "__lanesclone"); // ... mt __lanesclone? - if( lua_isnil( L, -1)) - { - lua_pop( L, 2); // ... - STACK_MID( L, 0); - return FALSE; - } - - // we need to copy over the uservalues of the userdata as well - { - int const mt = lua_absindex( L, -2); // ... mt __lanesclone - size_t const userdata_size = (size_t) lua_rawlen( L, source_i_); - void* clone = NULL; - // extract all the uservalues, but don't transfer them yet - int uvi = 0; - while( lua_getiuservalue( L, source_i_, ++ uvi) != LUA_TNONE) {} // ... mt __lanesclone [uv]+ nil - // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now - lua_pop( L, 1); // ... mt __lanesclone [uv]+ - -- uvi; - // create the clone userdata with the required number of uservalue slots - clone = lua_newuserdatauv( L2, userdata_size, uvi); // ... u - // copy the metatable in the target state, and give it to the clone we put there - if( inter_copy_one( U, L2, L2_cache_i, L, mt, VT_NORMAL, mode_, upName_)) // ... u mt|sentinel - { - if( eLM_ToKeeper == mode_) // ... u sentinel - { - ASSERT_L( lua_tocfunction( L2, -1) == table_lookup_sentinel); - // we want to create a new closure with a 'clone sentinel' function, where the upvalues are the userdata and the metatable fqn - lua_getupvalue( L2, -1, 1); // ... u sentinel fqn - lua_remove( L2, -2); // ... u fqn - lua_insert( L2, -2); // ... fqn u - lua_pushcclosure( L2, userdata_clone_sentinel, 2); // ... userdata_clone_sentinel - } - else // from keeper or direct // ... u mt - { - ASSERT_L( lua_istable( L2, -1)); - lua_setmetatable( L2, -2); // ... u - } - STACK_MID( L2, 1); - } - else - { - (void) luaL_error( L, "Error copying a metatable"); - } - // first, add the entry in the cache (at this point it is either the actual userdata or the keeper sentinel - lua_pushlightuserdata( L2, source); // ... u source - lua_pushvalue( L2, -2); // ... u source u - lua_rawset( L2, L2_cache_i); // ... u - // make sure we have the userdata now - if( eLM_ToKeeper == mode_) // ... userdata_clone_sentinel - { - lua_getupvalue( L2, -1, 2); // ... userdata_clone_sentinel u - } - // assign uservalues - while( uvi > 0) - { - inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), VT_NORMAL, mode_, upName_); // ... u uv - lua_pop( L, 1); // ... mt __lanesclone [uv]* - // this pops the value from the stack - lua_setiuservalue( L2, -2, uvi); // ... u - -- uvi; - } - // when we are done, all uservalues are popped from the source stack, and we want only the single transferred value in the destination - if( eLM_ToKeeper == mode_) // ... userdata_clone_sentinel u - { - lua_pop( L2, 1); // ... userdata_clone_sentinel - } - STACK_MID( L2, 1); - STACK_MID( L, 2); - // call cloning function in source state to perform the actual memory cloning - lua_pushlightuserdata( L, clone); // ... mt __lanesclone clone - lua_pushlightuserdata( L, source); // ... mt __lanesclone clone source - lua_pushinteger( L, userdata_size); // ... mt __lanesclone clone source size - lua_call( L, 3, 0); // ... mt - STACK_MID( L, 1); - } - - STACK_END( L2, 1); - lua_pop( L, 1); // ... - STACK_END( L, 0); - return TRUE; -} - -static bool_t inter_copy_userdata( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) -{ - STACK_CHECK( L, 0); - STACK_CHECK( L2, 0); - if( vt == VT_KEY) - { - return FALSE; - } - - // try clonable userdata first - if( copyclone( U, L2, L2_cache_i, L, i, mode_, upName_)) - { - STACK_MID( L, 0); - STACK_MID( L2, 1); - return TRUE; - } - - STACK_MID( L, 0); - STACK_MID( L2, 0); - - // Allow only deep userdata entities to be copied across - DEBUGSPEW_CODE( fprintf( stderr, "USERDATA\n")); - if( copydeep( U, L2, L2_cache_i, L, i, mode_, upName_)) - { - STACK_MID( L, 0); - STACK_MID( L2, 1); - return TRUE; - } - - STACK_MID( L, 0); - STACK_MID( L2, 0); - - // Not a deep or clonable full userdata - if( U->demoteFullUserdata) // attempt demotion to light userdata - { - void* lud = lua_touserdata( L, i); - lua_pushlightuserdata( L2, lud); - } - else // raise an error - { - (void) luaL_error( L, "can't copy non-deep full userdata across lanes"); - } - - STACK_END( L2, 1); - STACK_END( L, 0); - return TRUE; -} - -static bool_t inter_copy_function( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t source_i_, enum e_vt vt, LookupMode mode_, char const* upName_) -{ - if( vt == VT_KEY) - { - return FALSE; - } - - STACK_CHECK( L, 0); // L (source) // L2 (destination) - STACK_CHECK( L2, 0); - DEBUGSPEW_CODE( fprintf( stderr, "FUNCTION %s\n", upName_)); - - if( lua_tocfunction( L, source_i_) == userdata_clone_sentinel) // we are actually copying a clonable full userdata from a keeper - { - // clone the full userdata again - size_t userdata_size = 0; - void* source; - void* clone; - - // let's see if we already restored this userdata - lua_getupvalue( L, source_i_, 2); // ... u - source = lua_touserdata( L, -1); - lua_pushlightuserdata( L2, source); // ... source - lua_rawget( L2, L2_cache_i); // ... u? - if( !lua_isnil( L2, -1)) - { - lua_pop( L, 1); // ... - STACK_MID( L, 0); - STACK_MID( L2, 1); - return TRUE; - } - lua_pop( L2, 1); // ... - - // this function has 2 upvalues: the fqn of its metatable, and the userdata itself - lookup_table( L2, L, source_i_, mode_, upName_); // ... mt - // originally 'source_i_' slot was the proxy closure, but from now on it indexes the actual userdata we extracted from it - source_i_ = lua_gettop( L); - source = lua_touserdata( L, -1); - // get the number of bytes to allocate for the clone - userdata_size = (size_t) lua_rawlen( L, -1); - { - // extract uservalues (don't transfer them yet) - int uvi = 0; - while( lua_getiuservalue( L, source_i_, ++ uvi) != LUA_TNONE) {} // ... u uv - // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now - lua_pop( L, 1); // ... u [uv]* - -- uvi; - STACK_MID( L, uvi + 1); - // create the clone userdata with the required number of uservalue slots - clone = lua_newuserdatauv( L2, userdata_size, uvi); // ... mt u - // add it in the cache - lua_pushlightuserdata( L2, source); // ... mt u source - lua_pushvalue( L2, -2); // ... mt u source u - lua_rawset( L2, L2_cache_i); // ... mt u - // set metatable - lua_pushvalue( L2, -2); // ... mt u mt - lua_setmetatable( L2, -2); // ... mt u - // transfer and assign uservalues - while( uvi > 0) - { - inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), vt, mode_, upName_); // ... mt u uv - lua_pop( L, 1); // ... u [uv]* - // this pops the value from the stack - lua_setiuservalue( L2, -2, uvi); // ... mt u - -- uvi; - } - // when we are done, all uservalues are popped from the stack, we can pop the source as well - lua_pop( L, 1); // ... - STACK_MID( L, 0); - STACK_MID( L2, 2); // ... mt u - } - // perform the custom cloning part - lua_insert( L2, -2); // ... u mt - // __lanesclone should always exist because we wouldn't be restoring data from a userdata_clone_sentinel closure to begin with - lua_getfield(L2, -1, "__lanesclone"); // ... u mt __lanesclone - lua_remove( L2, -2); // ... u __lanesclone - lua_pushlightuserdata( L2, clone); // ... u __lanesclone clone - lua_pushlightuserdata( L2, source); // ... u __lanesclone clone source - lua_pushinteger( L2, userdata_size); // ... u __lanesclone clone source size - // clone:__lanesclone(dest, source, size) - lua_call( L2, 3, 0); // ... u - } - else // regular function - { - DEBUGSPEW_CODE( fprintf( stderr, "FUNCTION %s\n", upName_)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - copy_cached_func( U, L2, L2_cache_i, L, source_i_, mode_, upName_); // ... f - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - } - STACK_END( L2, 1); - STACK_END( L, 0); - return TRUE; -} - -static bool_t inter_copy_table( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) -{ - if( vt == VT_KEY) - { - return FALSE; - } - - STACK_CHECK( L, 0); - STACK_CHECK( L2, 0); - DEBUGSPEW_CODE( fprintf( stderr, "TABLE %s\n", upName_)); - - /* - * First, let's try to see if this table is special (aka is it some table that we registered in our lookup databases during module registration?) - * Note that this table CAN be a module table, but we just didn't register it, in which case we'll send it through the table cloning mechanism - */ - if( lookup_table( L2, L, i, mode_, upName_)) - { - ASSERT_L( lua_istable( L2, -1) || (lua_tocfunction( L2, -1) == table_lookup_sentinel)); // from lookup datables // can also be table_lookup_sentinel if this is a table we know - return TRUE; - } - - /* Check if we've already copied the same table from 'L' (during this transmission), and - * reuse the old copy. This allows table upvalues shared by multiple - * local functions to point to the same table, also in the target. - * Also, this takes care of cyclic tables and multiple references - * to the same subtable. - * - * Note: Even metatables need to go through this test; to detect - * loops such as those in required module tables (getmetatable(lanes).lanes == lanes) - */ - if( push_cached_table( L2, L2_cache_i, L, i)) - { - ASSERT_L( lua_istable( L2, -1)); // from cache - return TRUE; - } - ASSERT_L( lua_istable( L2, -1)); - - STACK_GROW( L, 2); - STACK_GROW( L2, 2); - - lua_pushnil( L); // start iteration - while( lua_next( L, i)) - { - // need a function to prevent overflowing the stack with verboseErrors-induced alloca() - inter_copy_keyvaluepair( U, L2, L2_cache_i, L, vt, mode_, upName_); - lua_pop( L, 1); // pop value (next round) - } - STACK_MID( L, 0); - STACK_MID( L2, 1); - - // Metatables are expected to be immutable, and copied only once. - if( push_cached_metatable( U, L2, L2_cache_i, L, i, mode_, upName_)) // ... t mt? - { - lua_setmetatable( L2, -2); // ... t - } - STACK_END( L2, 1); - STACK_END( L, 0); - return TRUE; -} - -/* -* Copies a value from 'L' state (at index 'i') to 'L2' state. Does not remove -* the original value. -* -* NOTE: Both the states must be solely in the current OS thread's possession. -* -* 'i' is an absolute index (no -1, ...) -* -* Returns TRUE if value was pushed, FALSE if its type is non-supported. -*/ -bool_t inter_copy_one( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) -{ - bool_t ret = TRUE; - int val_type = lua_type( L, i); - static int const pod_mask = (1 << LUA_TNIL) | (1 << LUA_TBOOLEAN) | (1 << LUA_TLIGHTUSERDATA) | (1 << LUA_TNUMBER) | (1 << LUA_TSTRING); - STACK_GROW( L2, 1); - STACK_CHECK( L, 0); // L // L2 - STACK_CHECK( L2, 0); // L // L2 - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "inter_copy_one()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s %s: " INDENT_END, lua_type_names[val_type], vt_names[vt])); - - // Non-POD can be skipped if its metatable contains { __lanesignore = true } - if( ((1 << val_type) & pod_mask) == 0) - { - if( lua_getmetatable( L, i)) // ... mt - { - lua_getfield( L, -1, "__lanesignore"); // ... mt ignore? - if( lua_isboolean( L, -1) && lua_toboolean( L, -1)) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "__lanesignore -> LUA_TNIL\n" INDENT_END)); - val_type = LUA_TNIL; - } - lua_pop( L, 2); // ... - } - } - STACK_MID( L, 0); - - /* Lets push nil to L2 if the object should be ignored */ - switch( val_type) - { - /* Basic types allowed both as values, and as table keys */ - - case LUA_TBOOLEAN: - { - bool_t v = lua_toboolean( L, i); - DEBUGSPEW_CODE( fprintf( stderr, "%s\n", v ? "true" : "false")); - lua_pushboolean( L2, v); - } - break; - - case LUA_TNUMBER: - /* LNUM patch support (keeping integer accuracy) */ -#if defined LUA_LNUM || LUA_VERSION_NUM >= 503 - if( lua_isinteger( L, i)) - { - lua_Integer v = lua_tointeger( L, i); - DEBUGSPEW_CODE( fprintf( stderr, LUA_INTEGER_FMT "\n", v)); - lua_pushinteger( L2, v); - break; - } - else -#endif // defined LUA_LNUM || LUA_VERSION_NUM >= 503 - { - lua_Number v = lua_tonumber( L, i); - DEBUGSPEW_CODE( fprintf( stderr, LUA_NUMBER_FMT "\n", v)); - lua_pushnumber( L2, v); - } - break; - - case LUA_TSTRING: - { - size_t len; - char const* s = lua_tolstring( L, i, &len); - DEBUGSPEW_CODE( fprintf( stderr, "'%s'\n", s)); - lua_pushlstring( L2, s, len); - } - break; - - case LUA_TLIGHTUSERDATA: - { - void* p = lua_touserdata( L, i); - DEBUGSPEW_CODE( fprintf( stderr, "%p\n", p)); - lua_pushlightuserdata( L2, p); - } - break; - - /* The following types are not allowed as table keys */ - - case LUA_TUSERDATA: - ret = inter_copy_userdata( U, L2, L2_cache_i, L, i, vt, mode_, upName_); - break; - - case LUA_TNIL: - if( vt == VT_KEY) - { - ret = FALSE; - break; - } - lua_pushnil( L2); - break; - - case LUA_TFUNCTION: - ret = inter_copy_function( U, L2, L2_cache_i, L, i, vt, mode_, upName_); - break; - - case LUA_TTABLE: - ret = inter_copy_table( U, L2, L2_cache_i, L, i, vt, mode_, upName_); - break; - - /* The following types cannot be copied */ - - case 10: // LuaJIT CDATA - case LUA_TTHREAD: - ret = FALSE; - break; - } - - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - - STACK_END( L2, ret ? 1 : 0); - STACK_END( L, 0); - return ret; -} - -/* -* Akin to 'lua_xmove' but copies values between _any_ Lua states. -* -* NOTE: Both the states must be solely in the current OS thread's posession. -* -* Note: Parameters are in this order ('L' = from first) to be same as 'lua_xmove'. -*/ -int luaG_inter_copy( Universe* U, lua_State* L, lua_State* L2, uint_t n, LookupMode mode_) -{ - uint_t top_L = lua_gettop( L); // ... {}n - uint_t top_L2 = lua_gettop( L2); // ... - uint_t i, j; - char tmpBuf[16]; - char const* pBuf = U->verboseErrors ? tmpBuf : "?"; - bool_t copyok = TRUE; - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - - if( n > top_L) - { - // requesting to copy more than is available? - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "nothing to copy()\n" INDENT_END)); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - return -1; - } - - STACK_CHECK( L2, 0); - STACK_GROW( L2, n + 1); - - /* - * Make a cache table for the duration of this copy. Collects tables and - * function entries, avoiding the same entries to be passed on as multiple - * copies. ESSENTIAL i.e. for handling upvalue tables in the right manner! - */ - lua_newtable( L2); // ... cache - - STACK_CHECK( L, 0); - for( i = top_L - n + 1, j = 1; i <= top_L; ++ i, ++ j) - { - if( U->verboseErrors) - { - sprintf( tmpBuf, "arg_%d", j); - } - copyok = inter_copy_one( U, L2, top_L2 + 1, L, i, VT_NORMAL, mode_, pBuf); // ... cache {}n - if( !copyok) - { - break; - } - } - STACK_END( L, 0); - - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - - if( copyok) - { - STACK_MID( L2, n + 1); - // Remove the cache table. Persistent caching would cause i.e. multiple - // messages passed in the same table to use the same table also in receiving end. - lua_remove( L2, top_L2 + 1); - return 0; - } - - // error -> pop everything from the target state stack - lua_settop( L2, top_L2); - STACK_END( L2, 0); - return -2; -} - - -int luaG_inter_move( Universe* U, lua_State* L, lua_State* L2, uint_t n, LookupMode mode_) -{ - int ret = luaG_inter_copy( U, L, L2, n, mode_); - lua_pop( L, (int) n); - return ret; -} - -int luaG_inter_copy_package( Universe* U, lua_State* L, lua_State* L2, int package_idx_, LookupMode mode_) -{ - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy_package()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - // package - STACK_CHECK( L, 0); - STACK_CHECK( L2, 0); - package_idx_ = lua_absindex( L, package_idx_); - if( lua_type( L, package_idx_) != LUA_TTABLE) - { - lua_pushfstring( L, "expected package as table, got %s", luaL_typename( L, package_idx_)); - STACK_MID( L, 1); - // raise the error when copying from lane to lane, else just leave it on the stack to be raised later - return ( mode_ == eLM_LaneBody) ? lua_error( L) : 1; - } - lua_getglobal( L2, "package"); - if( !lua_isnil( L2, -1)) // package library not loaded: do nothing - { - int i; - // package.loaders is renamed package.searchers in Lua 5.2 - // but don't copy it anyway, as the function names change depending on the slot index! - // users should provide an on_state_create function to setup custom loaders instead - // don't copy package.preload in keeper states (they don't know how to translate functions) - char const* entries[] = { "path", "cpath", (mode_ == eLM_LaneBody) ? "preload" : NULL/*, (LUA_VERSION_NUM == 501) ? "loaders" : "searchers"*/, NULL}; - for( i = 0; entries[i]; ++ i) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "package.%s\n" INDENT_END, entries[i])); - lua_getfield( L, package_idx_, entries[i]); - if( lua_isnil( L, -1)) - { - lua_pop( L, 1); - } - else - { - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - luaG_inter_move( U, L, L2, 1, mode_); // moves the entry to L2 - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - lua_setfield( L2, -2, entries[i]); // set package[entries[i]] - } - } - } - else - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "'package' not loaded, nothing to do\n" INDENT_END)); - } - lua_pop( L2, 1); - STACK_END( L2, 0); - STACK_END( L, 0); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - return 0; -} diff --git a/src/tools.cpp b/src/tools.cpp new file mode 100644 index 0000000..6f4a06a --- /dev/null +++ b/src/tools.cpp @@ -0,0 +1,2080 @@ +/* + * TOOLS.C Copyright (c) 2002-10, Asko Kauppi + * + * Lua tools to support Lanes. +*/ + +/* +=============================================================================== + +Copyright (C) 2002-10 Asko Kauppi + 2011-17 benoit Germain + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== +*/ + +#include +#include +#include +#include +#include +#if !defined(__APPLE__) +#include +#endif // __APPLE__ + +#include "tools.h" +#include "compat.h" +#include "universe.h" +#include "keeper.h" +#include "lanes.h" +#include "uniquekey.h" + +// functions implemented in deep.c +extern bool_t copydeep( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_); +extern void push_registry_subtable( lua_State* L, UniqueKey key_); + +DEBUGSPEW_CODE( char const* debugspew_indent = "----+----!----+----!----+----!----+----!----+----!----+----!----+----!----+"); + + +// ################################################################################################ + +/* + * Does what the original 'push_registry_subtable' function did, but adds an optional mode argument to it + */ +void push_registry_subtable_mode( lua_State* L, UniqueKey key_, const char* mode_) +{ + STACK_GROW( L, 3); + STACK_CHECK( L, 0); + + REGISTRY_GET( L, key_); // {}|nil + STACK_MID( L, 1); + + if( lua_isnil( L, -1)) + { + lua_pop( L, 1); // + lua_newtable( L); // {} + // _R[key_] = {} + REGISTRY_SET( L, key_, lua_pushvalue( L, -2)); // {} + STACK_MID( L, 1); + + // Set its metatable if requested + if( mode_) + { + lua_newtable( L); // {} mt + lua_pushliteral( L, "__mode"); // {} mt "__mode" + lua_pushstring( L, mode_); // {} mt "__mode" mode + lua_rawset( L, -3); // {} mt + lua_setmetatable( L, -2); // {} + } + } + STACK_END( L, 1); + ASSERT_L( lua_istable( L, -1)); +} + +// ################################################################################################ + +/* + * Push a registry subtable (keyed by unique 'key_') onto the stack. + * If the subtable does not exist, it is created and chained. + */ +void push_registry_subtable( lua_State* L, UniqueKey key_) +{ + push_registry_subtable_mode( L, key_, NULL); +} + +// ################################################################################################ + +/*---=== luaG_dump ===---*/ +#ifdef _DEBUG +void luaG_dump( lua_State* L) +{ + int top = lua_gettop( L); + int i; + + fprintf( stderr, "\n\tDEBUG STACK:\n"); + + if( top == 0) + fprintf( stderr, "\t(none)\n"); + + for( i = 1; i <= top; ++ i) + { + int type = lua_type( L, i); + + fprintf( stderr, "\t[%d]= (%s) ", i, lua_typename( L, type)); + + // Print item contents here... + // + // Note: this requires 'tostring()' to be defined. If it is NOT, + // enable it for more debugging. + // + STACK_CHECK( L, 0); + STACK_GROW( L, 2); + + lua_getglobal( L, "tostring"); + // + // [-1]: tostring function, or nil + + if( !lua_isfunction( L, -1)) + { + fprintf( stderr, "('tostring' not available)"); + } + else + { + lua_pushvalue( L, i); + lua_call( L, 1 /*args*/, 1 /*retvals*/); + + // Don't trust the string contents + // + fprintf( stderr, "%s", lua_tostring( L, -1)); + } + lua_pop( L, 1); + STACK_END( L, 0); + fprintf( stderr, "\n"); + } + fprintf( stderr, "\n"); +} +#endif // _DEBUG + +// ################################################################################################ + +// same as PUC-Lua l_alloc +static void* libc_lua_Alloc(void* ud, void* ptr, size_t osize, size_t nsize) +{ + (void)ud; (void)osize; /* not used */ + if (nsize == 0) + { + free(ptr); + return NULL; + } + else + { + return realloc(ptr, nsize); + } +} + +static void* protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize) +{ + void* p; + ProtectedAllocator* s = (ProtectedAllocator*) ud; + MUTEX_LOCK( &s->lock); + p = s->definition.allocF( s->definition.allocUD, ptr, osize, nsize); + MUTEX_UNLOCK( &s->lock); + return p; +} + +static int luaG_provide_protected_allocator( lua_State* L) +{ + Universe* U = universe_get( L); + AllocatorDefinition* const def = (AllocatorDefinition*) lua_newuserdatauv( L, sizeof(AllocatorDefinition), 0); + def->allocF = protected_lua_Alloc; + def->allocUD = &U->protected_allocator; + return 1; +} + +// called once at the creation of the universe (therefore L is the master Lua state everything originates from) +// Do I need to disable this when compiling for LuaJIT to prevent issues? +void initialize_allocator_function( Universe* U, lua_State* L) +{ + STACK_CHECK( L, 0); + lua_getfield( L, -1, "allocator"); // settings allocator|nil|"protected" + if( !lua_isnil( L, -1)) + { + // store C function pointer in an internal variable + U->provide_allocator = lua_tocfunction( L, -1); // settings allocator + if( U->provide_allocator != NULL) + { + // make sure the function doesn't have upvalues + char const* upname = lua_getupvalue( L, -1, 1); // settings allocator upval? + if( upname != NULL) // should be "" for C functions with upvalues if any + { + (void) luaL_error( L, "config.allocator() shouldn't have upvalues"); + } + // remove this C function from the config table so that it doesn't cause problems + // when we transfer the config table in newly created Lua states + lua_pushnil( L); // settings allocator nil + lua_setfield( L, -3, "allocator"); // settings allocator + } + else if( lua_type( L, -1) == LUA_TSTRING) // should be "protected" + { + // initialize all we need for the protected allocator + MUTEX_INIT( &U->protected_allocator.lock); // the mutex + // and the original allocator to call from inside protection by the mutex + U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); + // before a state is created, this function will be called to obtain the allocator + U->provide_allocator = luaG_provide_protected_allocator; + + lua_setallocf( L, protected_lua_Alloc, &U->protected_allocator); + } + } + else + { + // initialize the mutex even if we are not going to use it, because cleanup_allocator_function will deinitialize it + MUTEX_INIT( &U->protected_allocator.lock); + // just grab whatever allocator was provided to lua_newstate + U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); + } + lua_pop( L, 1); // settings + STACK_MID(L, 0); + + lua_getfield( L, -1, "internal_allocator"); // settings "libc"|"allocator" + { + char const* allocator = lua_tostring( L, -1); + if (strcmp(allocator, "libc") == 0) + { + U->internal_allocator.allocF = libc_lua_Alloc; + U->internal_allocator.allocUD = NULL; + } + else + { + U->internal_allocator = U->protected_allocator.definition; + } + } + lua_pop( L, 1); // settings + STACK_END( L, 0); +} + +void cleanup_allocator_function( Universe* U, lua_State* L) +{ + // remove the protected allocator, if any + if( U->protected_allocator.definition.allocF != NULL) + { + // install the non-protected allocator + lua_setallocf( L, U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD); + // release the mutex + MUTEX_FREE( &U->protected_allocator.lock); + } +} + +// ################################################################################################ + +static int dummy_writer( lua_State* L, void const* p, size_t sz, void* ud) +{ + (void)L; (void)p; (void)sz; (void) ud; // unused + return 666; +} + + +/* + * differentiation between C, bytecode and JIT-fast functions + * + * + * +----------+------------+----------+ + * | bytecode | C function | JIT-fast | + * +-----------------+----------+------------+----------+ + * | lua_topointer | | | | + * +-----------------+----------+------------+----------+ + * | lua_tocfunction | NULL | | NULL | + * +-----------------+----------+------------+----------+ + * | lua_dump | 666 | 1 | 1 | + * +-----------------+----------+------------+----------+ + */ + +typedef enum +{ + FST_Bytecode, + FST_Native, + FST_FastJIT +} FuncSubType; + +FuncSubType luaG_getfuncsubtype( lua_State *L, int _i) +{ + if( lua_tocfunction( L, _i)) + { + return FST_Native; + } + { + int mustpush = 0, dumpres; + if( lua_absindex( L, _i) != lua_gettop( L)) + { + lua_pushvalue( L, _i); + mustpush = 1; + } + // the provided writer fails with code 666 + // therefore, anytime we get 666, this means that lua_dump() attempted a dump + // all other cases mean this is either a C or LuaJIT-fast function + dumpres = lua504_dump( L, dummy_writer, NULL, 0); + lua_pop( L, mustpush); + if( dumpres == 666) + { + return FST_Bytecode; + } + } + return FST_FastJIT; +} + +static lua_CFunction luaG_tocfunction( lua_State *L, int _i, FuncSubType *_out) +{ + lua_CFunction p = lua_tocfunction( L, _i); + *_out = luaG_getfuncsubtype( L, _i); + return p; +} + +// crc64/we of string "LOOKUPCACHE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ +static DECLARE_CONST_UNIQUE_KEY( LOOKUPCACHE_REGKEY, 0x837a68dfc6fcb716); + +// inspired from tconcat() in ltablib.c +static char const* luaG_pushFQN( lua_State* L, int t, int last, size_t* length) +{ + int i = 1; + luaL_Buffer b; + STACK_CHECK( L, 0); + // Lua 5.4 pushes &b as light userdata on the stack. be aware of it... + luaL_buffinit( L, &b); // ... {} ... &b? + for( ; i < last; ++ i) + { + lua_rawgeti( L, t, i); + luaL_addvalue( &b); + luaL_addlstring(&b, "/", 1); + } + if( i == last) // add last value (if interval was not empty) + { + lua_rawgeti( L, t, i); + luaL_addvalue( &b); + } + // &b is popped at that point (-> replaced by the result) + luaL_pushresult( &b); // ... {} ... "" + STACK_END( L, 1); + return lua_tolstring( L, -1, length); +} + +/* + * receives 2 arguments: a name k and an object o + * add two entries ["fully.qualified.name"] = o + * and [o] = "fully.qualified.name" + * where is either a table or a function + * if we already had an entry of type [o] = ..., replace the name if the new one is shorter + * pops the processed object from the stack + */ +static void update_lookup_entry( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, int _ctx_base, int _depth) +{ + // slot 1 in the stack contains the table that receives everything we found + int const dest = _ctx_base; + // slot 2 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot _i + int const fqn = _ctx_base + 1; + + size_t prevNameLength, newNameLength; + char const* prevName; + DEBUGSPEW_CODE( char const *newName); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "update_lookup_entry()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + + STACK_CHECK( L, 0); + // first, raise an error if the function is already known + lua_pushvalue( L, -1); // ... {bfc} k o o + lua_rawget( L, dest); // ... {bfc} k o name? + prevName = lua_tolstring( L, -1, &prevNameLength); // NULL if we got nil (first encounter of this object) + // push name in fqn stack (note that concatenation will crash if name is a not string or a number) + lua_pushvalue( L, -3); // ... {bfc} k o name? k + ASSERT_L( lua_type( L, -1) == LUA_TNUMBER || lua_type( L, -1) == LUA_TSTRING); + ++ _depth; + lua_rawseti( L, fqn, _depth); // ... {bfc} k o name? + // generate name + DEBUGSPEW_CODE( newName =) luaG_pushFQN( L, fqn, _depth, &newNameLength); // ... {bfc} k o name? "f.q.n" + // Lua 5.2 introduced a hash randomizer seed which causes table iteration to yield a different key order + // on different VMs even when the tables are populated the exact same way. + // When Lua is built with compatibility options (such as LUA_COMPAT_ALL), + // this causes several base libraries to register functions under multiple names. + // This, with the randomizer, can cause the first generated name of an object to be different on different VMs, + // which breaks function transfer. + // Also, nothing prevents any external module from exposing a given object under several names, so... + // Therefore, when we encounter an object for which a name was previously registered, we need to select the names + // based on some sorting order so that we end up with the same name in all databases whatever order the table walk yielded + if( prevName != NULL && (prevNameLength < newNameLength || lua_lessthan( L, -2, -1))) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s '%s' remained named '%s'\n" INDENT_END, lua_typename( L, lua_type( L, -3)), newName, prevName)); + // the previous name is 'smaller' than the one we just generated: keep it! + lua_pop( L, 3); // ... {bfc} k + } + else + { + // the name we generated is either the first one, or a better fit for our purposes + if( prevName) + { + // clear the previous name for the database to avoid clutter + lua_insert( L, -2); // ... {bfc} k o "f.q.n" prevName + // t[prevName] = nil + lua_pushnil( L); // ... {bfc} k o "f.q.n" prevName nil + lua_rawset( L, dest); // ... {bfc} k o "f.q.n" + } + else + { + lua_remove( L, -2); // ... {bfc} k o "f.q.n" + } + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s '%s'\n" INDENT_END, lua_typename( L, lua_type( L, -2)), newName)); + // prepare the stack for database feed + lua_pushvalue( L, -1); // ... {bfc} k o "f.q.n" "f.q.n" + lua_pushvalue( L, -3); // ... {bfc} k o "f.q.n" "f.q.n" o + ASSERT_L( lua_rawequal( L, -1, -4)); + ASSERT_L( lua_rawequal( L, -2, -3)); + // t["f.q.n"] = o + lua_rawset( L, dest); // ... {bfc} k o "f.q.n" + // t[o] = "f.q.n" + lua_rawset( L, dest); // ... {bfc} k + // remove table name from fqn stack + lua_pushnil( L); // ... {bfc} k nil + lua_rawseti( L, fqn, _depth); // ... {bfc} k + } + -- _depth; + STACK_END( L, -1); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); +} + +static void populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, int _ctx_base, int _i, int _depth) +{ + lua_Integer visit_count; + // slot 2 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot _i + int const fqn = _ctx_base + 1; + // slot 3 contains a cache that stores all already visited tables to avoid infinite recursion loops + int const cache = _ctx_base + 2; + // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search) + int const breadth_first_cache = lua_gettop( L) + 1; + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "populate_func_lookup_table_recur()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + + STACK_GROW( L, 6); + // slot _i contains a table where we search for functions (or a full userdata with a metatable) + STACK_CHECK( L, 0); // ... {_i} + + // if object is a userdata, replace it by its metatable + if( lua_type( L, _i) == LUA_TUSERDATA) + { + lua_getmetatable( L, _i); // ... {_i} mt + lua_replace( L, _i); // ... {_i} + } + + // if table is already visited, we are done + lua_pushvalue( L, _i); // ... {_i} {} + lua_rawget( L, cache); // ... {_i} nil|n + visit_count = lua_tointeger( L, -1); // 0 if nil, else n + lua_pop( L, 1); // ... {_i} + STACK_MID( L, 0); + if( visit_count > 0) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "already visited\n" INDENT_END)); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + return; + } + + // remember we visited this table (1-visit count) + lua_pushvalue( L, _i); // ... {_i} {} + lua_pushinteger( L, visit_count + 1); // ... {_i} {} 1 + lua_rawset( L, cache); // ... {_i} + STACK_MID( L, 0); + + // this table is at breadth_first_cache index + lua_newtable( L); // ... {_i} {bfc} + ASSERT_L( lua_gettop( L) == breadth_first_cache); + // iterate over all entries in the processed table + lua_pushnil( L); // ... {_i} {bfc} nil + while( lua_next( L, _i) != 0) // ... {_i} {bfc} k v + { + // just for debug, not actually needed + //char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"; + // subtable: process it recursively + if( lua_istable( L, -1)) // ... {_i} {bfc} k {} + { + // increment visit count to make sure we will actually scan it at this recursive level + lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} + lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} {} + lua_rawget( L, cache); // ... {_i} {bfc} k {} {} n? + visit_count = lua_tointeger( L, -1) + 1; // 1 if we got nil, else n+1 + lua_pop( L, 1); // ... {_i} {bfc} k {} {} + lua_pushinteger( L, visit_count); // ... {_i} {bfc} k {} {} n + lua_rawset( L, cache); // ... {_i} {bfc} k {} + // store the table in the breadth-first cache + lua_pushvalue( L, -2); // ... {_i} {bfc} k {} k + lua_pushvalue( L, -2); // ... {_i} {bfc} k {} k {} + lua_rawset( L, breadth_first_cache); // ... {_i} {bfc} k {} + // generate a name, and if we already had one name, keep whichever is the shorter + update_lookup_entry( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, _depth); // ... {_i} {bfc} k + } + else if( lua_isfunction( L, -1) && (luaG_getfuncsubtype( L, -1) != FST_Bytecode)) // ... {_i} {bfc} k func + { + // generate a name, and if we already had one name, keep whichever is the shorter + update_lookup_entry( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, _depth); // ... {_i} {bfc} k + } + else + { + lua_pop( L, 1); // ... {_i} {bfc} k + } + STACK_MID( L, 2); + } + // now process the tables we encountered at that depth + ++ _depth; + lua_pushnil( L); // ... {_i} {bfc} nil + while( lua_next( L, breadth_first_cache) != 0) // ... {_i} {bfc} k {} + { + DEBUGSPEW_CODE( char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "table '%s'\n" INDENT_END, key)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + // un-visit this table in case we do need to process it + lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} + lua_rawget( L, cache); // ... {_i} {bfc} k {} n + ASSERT_L( lua_type( L, -1) == LUA_TNUMBER); + visit_count = lua_tointeger( L, -1) - 1; + lua_pop( L, 1); // ... {_i} {bfc} k {} + lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} + if( visit_count > 0) + { + lua_pushinteger( L, visit_count); // ... {_i} {bfc} k {} {} n + } + else + { + lua_pushnil( L); // ... {_i} {bfc} k {} {} nil + } + lua_rawset( L, cache); // ... {_i} {bfc} k {} + // push table name in fqn stack (note that concatenation will crash if name is a not string!) + lua_pushvalue( L, -2); // ... {_i} {bfc} k {} k + lua_rawseti( L, fqn, _depth); // ... {_i} {bfc} k {} + populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, lua_gettop( L), _depth); + lua_pop( L, 1); // ... {_i} {bfc} k + STACK_MID( L, 2); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + } + // remove table name from fqn stack + lua_pushnil( L); // ... {_i} {bfc} nil + lua_rawseti( L, fqn, _depth); // ... {_i} {bfc} + -- _depth; + // we are done with our cache + lua_pop( L, 1); // ... {_i} + STACK_END( L, 0); + // we are done // ... {_i} {bfc} + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); +} + +/* + * create a "fully.qualified.name" <-> function equivalence database + */ +void populate_func_lookup_table( lua_State* L, int _i, char const* name_) +{ + int const ctx_base = lua_gettop( L) + 1; + int const in_base = lua_absindex( L, _i); + int start_depth = 0; + DEBUGSPEW_CODE( Universe* U = universe_get( L)); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END, L, name_ ? name_ : "NULL")); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + STACK_GROW( L, 3); + STACK_CHECK( L, 0); + REGISTRY_GET( L, LOOKUP_REGKEY); // {} + STACK_MID( L, 1); + ASSERT_L( lua_istable( L, -1)); + if( lua_type( L, in_base) == LUA_TFUNCTION) // for example when a module is a simple function + { + name_ = name_ ? name_ : "NULL"; + lua_pushvalue( L, in_base); // {} f + lua_pushstring( L, name_); // {} f _name + lua_rawset( L, -3); // {} + lua_pushstring( L, name_); // {} _name + lua_pushvalue( L, in_base); // {} _name f + lua_rawset( L, -3); // {} + lua_pop( L, 1); // + } + else if( lua_type( L, in_base) == LUA_TTABLE) + { + lua_newtable( L); // {} {fqn} + if( name_) + { + STACK_MID( L, 2); + lua_pushstring( L, name_); // {} {fqn} "name" + // generate a name, and if we already had one name, keep whichever is the shorter + lua_pushvalue( L, in_base); // {} {fqn} "name" t + update_lookup_entry( DEBUGSPEW_PARAM_COMMA( U) L, ctx_base, start_depth); // {} {fqn} "name" + // don't forget to store the name at the bottom of the fqn stack + ++ start_depth; + lua_rawseti( L, -2, start_depth); // {} {fqn} + STACK_MID( L, 2); + } + // retrieve the cache, create it if we haven't done it yet + REGISTRY_GET( L, LOOKUPCACHE_REGKEY); // {} {fqn} {cache}? + if( lua_isnil( L, -1)) + { + lua_pop( L, 1); // {} {fqn} + lua_newtable( L); // {} {fqn} {cache} + REGISTRY_SET( L, LOOKUPCACHE_REGKEY, lua_pushvalue( L, -2)); + STACK_MID( L, 3); + } + // process everything we find in that table, filling in lookup data for all functions and tables we see there + populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, ctx_base, in_base, start_depth); + lua_pop( L, 3); + } + else + { + lua_pop( L, 1); // + (void) luaL_error( L, "unsupported module type %s", lua_typename( L, lua_type( L, in_base))); + } + STACK_END( L, 0); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); +} + +/*---=== Inter-state copying ===---*/ + +// crc64/we of string "REG_MTID" generated at http://www.nitrxgen.net/hashgen/ +static DECLARE_CONST_UNIQUE_KEY( REG_MTID, 0x2e68f9b4751584dc); + +/* +* Get a unique ID for metatable at [i]. +*/ +static lua_Integer get_mt_id( Universe* U, lua_State* L, int i) +{ + lua_Integer id; + + i = lua_absindex( L, i); + + STACK_GROW( L, 3); + + STACK_CHECK( L, 0); + push_registry_subtable( L, REG_MTID); // ... _R[REG_MTID] + lua_pushvalue( L, i); // ... _R[REG_MTID] {mt} + lua_rawget( L, -2); // ... _R[REG_MTID] mtk? + + id = lua_tointeger( L, -1); // 0 for nil + lua_pop( L, 1); // ... _R[REG_MTID] + STACK_MID( L, 1); + + if( id == 0) + { + MUTEX_LOCK( &U->mtid_lock); + id = ++ U->last_mt_id; + MUTEX_UNLOCK( &U->mtid_lock); + + /* Create two-way references: id_uint <-> table + */ + lua_pushvalue( L, i); // ... _R[REG_MTID] {mt} + lua_pushinteger( L, id); // ... _R[REG_MTID] {mt} id + lua_rawset( L, -3); // ... _R[REG_MTID] + + lua_pushinteger( L, id); // ... _R[REG_MTID] id + lua_pushvalue( L, i); // ... _R[REG_MTID] id {mt} + lua_rawset( L, -3); // ... _R[REG_MTID] + } + lua_pop( L, 1); // ... + + STACK_END( L, 0); + + return id; +} + +// function sentinel used to transfer native functions from/to keeper states +static int func_lookup_sentinel( lua_State* L) +{ + return luaL_error( L, "function lookup sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); +} + + +// function sentinel used to transfer native table from/to keeper states +static int table_lookup_sentinel( lua_State* L) +{ + return luaL_error( L, "table lookup sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); +} + +// function sentinel used to transfer cloned full userdata from/to keeper states +static int userdata_clone_sentinel( lua_State* L) +{ + return luaL_error( L, "userdata clone sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); +} + +/* + * retrieve the name of a function/table in the lookup database + */ +static char const* find_lookup_name( lua_State* L, uint_t i, LookupMode mode_, char const* upName_, size_t* len_) +{ + DEBUGSPEW_CODE( Universe* const U = universe_get( L)); + char const* fqn; + ASSERT_L( lua_isfunction( L, i) || lua_istable( L, i)); // ... v ... + STACK_CHECK( L, 0); + STACK_GROW( L, 3); // up to 3 slots are necessary on error + if( mode_ == eLM_FromKeeper) + { + lua_CFunction f = lua_tocfunction( L, i); // should *always* be func_lookup_sentinel or table_lookup_sentinel! + if( f == func_lookup_sentinel || f == table_lookup_sentinel || f == userdata_clone_sentinel) + { + lua_getupvalue( L, i, 1); // ... v ... "f.q.n" + } + else + { + // if this is not a sentinel, this is some user-created table we wanted to lookup + ASSERT_L( NULL == f && lua_istable( L, i)); + // push anything that will convert to NULL string + lua_pushnil( L); // ... v ... nil + } + } + else + { + // fetch the name from the source state's lookup table + REGISTRY_GET( L, LOOKUP_REGKEY); // ... v ... {} + STACK_MID( L, 1); + ASSERT_L( lua_istable( L, -1)); + lua_pushvalue( L, i); // ... v ... {} v + lua_rawget( L, -2); // ... v ... {} "f.q.n" + } + fqn = lua_tolstring( L, -1, len_); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "function [C] %s \n" INDENT_END, fqn)); + // popping doesn't invalidate the pointer since this is an interned string gotten from the lookup database + lua_pop( L, (mode_ == eLM_FromKeeper) ? 1 : 2); // ... v ... + STACK_MID( L, 0); + if( NULL == fqn && !lua_istable( L, i)) // raise an error if we try to send an unknown function (but not for tables) + { + char const *from, *typewhat, *what, *gotchaA, *gotchaB; + // try to discover the name of the function we want to send + lua_getglobal( L, "decoda_name"); // ... v ... decoda_name + from = lua_tostring( L, -1); + lua_pushcfunction( L, luaG_nameof); // ... v ... decoda_name luaG_nameof + lua_pushvalue( L, i); // ... v ... decoda_name luaG_nameof t + lua_call( L, 1, 2); // ... v ... decoda_name "type" "name"|nil + typewhat = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : luaL_typename( L, -2); + // second return value can be nil if the table was not found + // probable reason: the function was removed from the source Lua state before Lanes was required. + if( lua_isnil( L, -1)) + { + gotchaA = " referenced by"; + gotchaB = "\n(did you remove it from the source Lua state before requiring Lanes?)"; + what = upName_; + } + else + { + gotchaA = ""; + gotchaB = ""; + what = (lua_type( L, -1) == LUA_TSTRING) ? lua_tostring( L, -1) : luaL_typename( L, -1); + } + (void) luaL_error( L, "%s%s '%s' not found in %s origin transfer database.%s", typewhat, gotchaA, what, from ? from : "main", gotchaB); + *len_ = 0; + return NULL; + } + STACK_END( L, 0); + return fqn; +} + + +/* + * Push a looked-up table, or nothing if we found nothing + */ +static bool_t lookup_table( lua_State* L2, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) +{ + // get the name of the table we want to send + size_t len; + char const* fqn = find_lookup_name( L, i, mode_, upName_, &len); + if( NULL == fqn) // name not found, it is some user-created table + { + return FALSE; + } + // push the equivalent table in the destination's stack, retrieved from the lookup table + STACK_CHECK( L2, 0); // L // L2 + STACK_GROW( L2, 3); // up to 3 slots are necessary on error + switch( mode_) + { + default: // shouldn't happen, in theory... + (void) luaL_error( L, "internal error: unknown lookup mode"); + return FALSE; + + case eLM_ToKeeper: + // push a sentinel closure that holds the lookup name as upvalue + lua_pushlstring( L2, fqn, len); // "f.q.n" + lua_pushcclosure( L2, table_lookup_sentinel, 1); // f + break; + + case eLM_LaneBody: + case eLM_FromKeeper: + REGISTRY_GET( L2, LOOKUP_REGKEY); // {} + STACK_MID( L2, 1); + ASSERT_L( lua_istable( L2, -1)); + lua_pushlstring( L2, fqn, len); // {} "f.q.n" + lua_rawget( L2, -2); // {} t + // we accept destination lookup failures in the case of transfering the Lanes body function (this will result in the source table being cloned instead) + // but not when we extract something out of a keeper, as there is nothing to clone! + if( lua_isnil( L2, -1) && mode_ == eLM_LaneBody) + { + lua_pop( L2, 2); // + STACK_MID( L2, 0); + return FALSE; + } + else if( !lua_istable( L2, -1)) + { + char const* from, *to; + lua_getglobal( L, "decoda_name"); // ... t ... decoda_name + from = lua_tostring( L, -1); + lua_pop( L, 1); // ... t ... + lua_getglobal( L2, "decoda_name"); // {} t decoda_name + to = lua_tostring( L2, -1); + lua_pop( L2, 1); // {} t + // when mode_ == eLM_FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error + (void) luaL_error( + (mode_ == eLM_FromKeeper) ? L2 : L + , "INTERNAL ERROR IN %s: table '%s' not found in %s destination transfer database." + , from ? from : "main" + , fqn + , to ? to : "main" + ); + return FALSE; + } + lua_remove( L2, -2); // t + break; + } + STACK_END( L2, 1); + return TRUE; +} + + +/* + * Check if we've already copied the same table from 'L', and + * reuse the old copy. This allows table upvalues shared by multiple + * local functions to point to the same table, also in the target. + * + * Always pushes a table to 'L2'. + * + * Returns TRUE if the table was cached (no need to fill it!); FALSE if + * it's a virgin. + */ +static bool_t push_cached_table( lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i) +{ + bool_t not_found_in_cache; // L2 + DECLARE_CONST_UNIQUE_KEY( p, lua_topointer( L, i)); + + ASSERT_L( L2_cache_i != 0); + STACK_GROW( L2, 3); + STACK_CHECK( L2, 0); + + // We don't need to use the from state ('L') in ID since the life span + // is only for the duration of a copy (both states are locked). + // push a light userdata uniquely representing the table + push_unique_key( L2, p); // ... p + + //fprintf( stderr, "<< ID: %s >>\n", lua_tostring( L2, -1)); + + lua_rawget( L2, L2_cache_i); // ... {cached|nil} + not_found_in_cache = lua_isnil( L2, -1); + if( not_found_in_cache) + { + lua_pop( L2, 1); // ... + lua_newtable( L2); // ... {} + push_unique_key( L2, p); // ... {} p + lua_pushvalue( L2, -2); // ... {} p {} + lua_rawset( L2, L2_cache_i); // ... {} + } + STACK_END( L2, 1); + ASSERT_L( lua_istable( L2, -1)); + return !not_found_in_cache; +} + + +/* + * Return some name helping to identify an object + */ +static int discover_object_name_recur( lua_State* L, int shortest_, int depth_) +{ + int const what = 1; // o "r" {c} {fqn} ... {?} + int const result = 2; + int const cache = 3; + int const fqn = 4; + // no need to scan this table if the name we will discover is longer than one we already know + if( shortest_ <= depth_ + 1) + { + return shortest_; + } + STACK_GROW( L, 3); + STACK_CHECK( L, 0); + // stack top contains the table to search in + lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} + lua_rawget( L, cache); // o "r" {c} {fqn} ... {?} nil/1 + // if table is already visited, we are done + if( !lua_isnil( L, -1)) + { + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} + return shortest_; + } + // examined table is not in the cache, add it now + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} + lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} + lua_pushinteger( L, 1); // o "r" {c} {fqn} ... {?} {?} 1 + lua_rawset( L, cache); // o "r" {c} {fqn} ... {?} + // scan table contents + lua_pushnil( L); // o "r" {c} {fqn} ... {?} nil + while( lua_next( L, -2)) // o "r" {c} {fqn} ... {?} k v + { + //char const *const strKey = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : NULL; // only for debugging + //lua_Number const numKey = (lua_type( L, -2) == LUA_TNUMBER) ? lua_tonumber( L, -2) : -6666; // only for debugging + STACK_MID( L, 2); + // append key name to fqn stack + ++ depth_; + lua_pushvalue( L, -2); // o "r" {c} {fqn} ... {?} k v k + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v + if( lua_rawequal( L, -1, what)) // is it what we are looking for? + { + STACK_MID( L, 2); + // update shortest name + if( depth_ < shortest_) + { + shortest_ = depth_; + luaG_pushFQN( L, fqn, depth_, NULL); // o "r" {c} {fqn} ... {?} k v "fqn" + lua_replace( L, result); // o "r" {c} {fqn} ... {?} k v + } + // no need to search further at this level + lua_pop( L, 2); // o "r" {c} {fqn} ... {?} + STACK_MID( L, 0); + break; + } + switch( lua_type( L, -1)) // o "r" {c} {fqn} ... {?} k v + { + default: // nil, boolean, light userdata, number and string aren't identifiable + break; + + case LUA_TTABLE: // o "r" {c} {fqn} ... {?} k {} + STACK_MID( L, 2); + shortest_ = discover_object_name_recur( L, shortest_, depth_); + // search in the table's metatable too + if( lua_getmetatable( L, -1)) // o "r" {c} {fqn} ... {?} k {} {mt} + { + if( lua_istable( L, -1)) + { + ++ depth_; + lua_pushliteral( L, "__metatable"); // o "r" {c} {fqn} ... {?} k {} {mt} "__metatable" + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} + shortest_ = discover_object_name_recur( L, shortest_, depth_); + lua_pushnil( L); // o "r" {c} {fqn} ... {?} k {} {mt} nil + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} + -- depth_; + } + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k {} + } + STACK_MID( L, 2); + break; + + case LUA_TTHREAD: // o "r" {c} {fqn} ... {?} k T + // TODO: explore the thread's stack frame looking for our culprit? + break; + + case LUA_TUSERDATA: // o "r" {c} {fqn} ... {?} k U + STACK_MID( L, 2); + // search in the object's metatable (some modules are built that way) + if( lua_getmetatable( L, -1)) // o "r" {c} {fqn} ... {?} k U {mt} + { + if( lua_istable( L, -1)) + { + ++ depth_; + lua_pushliteral( L, "__metatable"); // o "r" {c} {fqn} ... {?} k U {mt} "__metatable" + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} + shortest_ = discover_object_name_recur( L, shortest_, depth_); + lua_pushnil( L); // o "r" {c} {fqn} ... {?} k U {mt} nil + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} + -- depth_; + } + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U + } + STACK_MID( L, 2); + // search in the object's uservalues + { + int uvi = 1; + while( lua_getiuservalue( L, -1, uvi) != LUA_TNONE) // o "r" {c} {fqn} ... {?} k U {u} + { + if( lua_istable( L, -1)) // if it is a table, look inside + { + ++ depth_; + lua_pushliteral( L, "uservalue"); // o "r" {c} {fqn} ... {?} k v {u} "uservalue" + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} + shortest_ = discover_object_name_recur( L, shortest_, depth_); + lua_pushnil( L); // o "r" {c} {fqn} ... {?} k v {u} nil + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} + -- depth_; + } + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U + ++ uvi; + } + // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U + } + STACK_MID( L, 2); + break; + } + // make ready for next iteration + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k + // remove name from fqn stack + lua_pushnil( L); // o "r" {c} {fqn} ... {?} k nil + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k + STACK_MID( L, 1); + -- depth_; + } // o "r" {c} {fqn} ... {?} + STACK_MID( L, 0); + // remove the visited table from the cache, in case a shorter path to the searched object exists + lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} + lua_pushnil( L); // o "r" {c} {fqn} ... {?} {?} nil + lua_rawset( L, cache); // o "r" {c} {fqn} ... {?} + STACK_END( L, 0); + return shortest_; +} + + +/* + * "type", "name" = lanes.nameof( o) + */ +int luaG_nameof( lua_State* L) +{ + int what = lua_gettop( L); + if( what > 1) + { + luaL_argerror( L, what, "too many arguments."); + } + + // nil, boolean, light userdata, number and string aren't identifiable + if( lua_type( L, 1) < LUA_TTABLE) + { + lua_pushstring( L, luaL_typename( L, 1)); // o "type" + lua_insert( L, -2); // "type" o + return 2; + } + + STACK_GROW( L, 4); + STACK_CHECK( L, 0); + // this slot will contain the shortest name we found when we are done + lua_pushnil( L); // o nil + // push a cache that will contain all already visited tables + lua_newtable( L); // o nil {c} + // push a table whose contents are strings that, when concatenated, produce unique name + lua_newtable( L); // o nil {c} {fqn} + lua_pushliteral( L, "_G"); // o nil {c} {fqn} "_G" + lua_rawseti( L, -2, 1); // o nil {c} {fqn} + // this is where we start the search + lua_pushglobaltable( L); // o nil {c} {fqn} _G + (void) discover_object_name_recur( L, 6666, 1); + if( lua_isnil( L, 2)) // try again with registry, just in case... + { + lua_pop( L, 1); // o nil {c} {fqn} + lua_pushliteral( L, "_R"); // o nil {c} {fqn} "_R" + lua_rawseti( L, -2, 1); // o nil {c} {fqn} + lua_pushvalue( L, LUA_REGISTRYINDEX); // o nil {c} {fqn} _R + (void) discover_object_name_recur( L, 6666, 1); + } + lua_pop( L, 3); // o "result" + STACK_END( L, 1); + lua_pushstring( L, luaL_typename( L, 1)); // o "result" "type" + lua_replace( L, -3); // "type" "result" + return 2; +} + + +/* + * Push a looked-up native/LuaJIT function. + */ +static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) +{ + // get the name of the function we want to send + size_t len; + char const* fqn = find_lookup_name( L, i, mode_, upName_, &len); + // push the equivalent function in the destination's stack, retrieved from the lookup table + STACK_CHECK( L2, 0); // L // L2 + STACK_GROW( L2, 3); // up to 3 slots are necessary on error + switch( mode_) + { + default: // shouldn't happen, in theory... + (void) luaL_error( L, "internal error: unknown lookup mode"); + return; + + case eLM_ToKeeper: + // push a sentinel closure that holds the lookup name as upvalue + lua_pushlstring( L2, fqn, len); // "f.q.n" + lua_pushcclosure( L2, func_lookup_sentinel, 1); // f + break; + + case eLM_LaneBody: + case eLM_FromKeeper: + REGISTRY_GET( L2, LOOKUP_REGKEY); // {} + STACK_MID( L2, 1); + ASSERT_L( lua_istable( L2, -1)); + lua_pushlstring( L2, fqn, len); // {} "f.q.n" + lua_rawget( L2, -2); // {} f + // nil means we don't know how to transfer stuff: user should do something + // anything other than function or table should not happen! + if( !lua_isfunction( L2, -1) && !lua_istable( L2, -1)) + { + char const* from, * to; + lua_getglobal( L, "decoda_name"); // ... f ... decoda_name + from = lua_tostring( L, -1); + lua_pop( L, 1); // ... f ... + lua_getglobal( L2, "decoda_name"); // {} f decoda_name + to = lua_tostring( L2, -1); + lua_pop( L2, 1); // {} f + // when mode_ == eLM_FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error + (void) luaL_error( + (mode_ == eLM_FromKeeper) ? L2 : L + , "%s%s: function '%s' not found in %s destination transfer database." + , lua_isnil( L2, -1) ? "" : "INTERNAL ERROR IN " + , from ? from : "main" + , fqn + , to ? to : "main" + ); + return; + } + lua_remove( L2, -2); // f + break; + + /* keep it in case I need it someday, who knows... + case eLM_RawFunctions: + { + int n; + char const* upname; + lua_CFunction f = lua_tocfunction( L, i); + // copy upvalues + for( n = 0; (upname = lua_getupvalue( L, i, 1 + n)) != NULL; ++ n) + { + luaG_inter_move( U, L, L2, 1, mode_); // [up[,up ...]] + } + lua_pushcclosure( L2, f, n); // + } + break; + */ + } + STACK_END( L2, 1); +} + + +/* + * Copy a function over, which has not been found in the cache. + * L2 has the cache key for this function at the top of the stack +*/ + +#if USE_DEBUG_SPEW() +static char const* lua_type_names[] = +{ + "LUA_TNIL" + , "LUA_TBOOLEAN" + , "LUA_TLIGHTUSERDATA" + , "LUA_TNUMBER" + , "LUA_TSTRING" + , "LUA_TTABLE" + , "LUA_TFUNCTION" + , "LUA_TUSERDATA" + , "LUA_TTHREAD" + , "" // not really a type + , "LUA_TJITCDATA" // LuaJIT specific +}; +static char const* vt_names[] = +{ + "VT_NORMAL" + , "VT_KEY" + , "VT_METATABLE" +}; +#endif // USE_DEBUG_SPEW() + +// Lua 5.4.3 style of dumping (see lstrlib.c) +// we have to do it that way because we can't unbalance the stack between buffer operations +// namely, this means we can't push a function on top of the stack *after* we initialize the buffer! +// luckily, this also works with earlier Lua versions +static int buf_writer( lua_State* L, void const* b, size_t size, void* ud) +{ + luaL_Buffer* B = (luaL_Buffer*) ud; + if( !B->L) + { + luaL_buffinit( L, B); + } + luaL_addlstring( B, (char const*) b, size); + return 0; +} + +static void copy_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) +{ + int n, needToPush; + luaL_Buffer B; + B.L = NULL; + + ASSERT_L( L2_cache_i != 0); // ... {cache} ... p + STACK_GROW( L, 2); + STACK_CHECK( L, 0); + + + // 'lua_dump()' needs the function at top of stack + // if already on top of the stack, no need to push again + needToPush = (i != (uint_t)lua_gettop( L)); + if( needToPush) + { + lua_pushvalue( L, i); // ... f + } + + // + // "value returned is the error code returned by the last call + // to the writer" (and we only return 0) + // not sure this could ever fail but for memory shortage reasons + // last parameter is Lua 5.4-specific (no stripping) + if( lua504_dump( L, buf_writer, &B, 0) != 0) + { + luaL_error( L, "internal error: function dump failed."); + } + + // pushes dumped string on 'L' + luaL_pushresult( &B); // ... f b + + // if not pushed, no need to pop + if( needToPush) + { + lua_remove( L, -2); // ... b + } + + // transfer the bytecode, then the upvalues, to create a similar closure + { + char const* name = NULL; + + #if LOG_FUNC_INFO + // "To get information about a function you push it onto the + // stack and start the what string with the character '>'." + // + { + lua_Debug ar; + lua_pushvalue( L, i); // ... b f + // fills 'name' 'namewhat' and 'linedefined', pops function + lua_getinfo( L, ">nS", &ar); // ... b + name = ar.namewhat; + fprintf( stderr, INDENT_BEGIN "FNAME: %s @ %d\n", i, s_indent, ar.short_src, ar.linedefined); // just gives NULL + } + #endif // LOG_FUNC_INFO + { + size_t sz; + char const* s = lua_tolstring( L, -1, &sz); // ... b + ASSERT_L( s && sz); + STACK_GROW( L2, 2); + // Note: Line numbers seem to be taken precisely from the + // original function. 'name' is not used since the chunk + // is precompiled (it seems...). + // + // TBD: Can we get the function's original name through, as well? + // + if( luaL_loadbuffer( L2, s, sz, name) != 0) // ... {cache} ... p function + { + // chunk is precompiled so only LUA_ERRMEM can happen + // "Otherwise, it pushes an error message" + // + STACK_GROW( L, 1); + luaL_error( L, "%s: %s", upName_, lua_tostring( L2, -1)); + } + // remove the dumped string + lua_pop( L, 1); // ... + // now set the cache as soon as we can. + // this is necessary if one of the function's upvalues references it indirectly + // we need to find it in the cache even if it isn't fully transfered yet + lua_insert( L2, -2); // ... {cache} ... function p + lua_pushvalue( L2, -2); // ... {cache} ... function p function + // cache[p] = function + lua_rawset( L2, L2_cache_i); // ... {cache} ... function + } + STACK_MID( L, 0); + + /* push over any upvalues; references to this function will come from + * cache so we don't end up in eternal loop. + * Lua5.2 and Lua5.3: one of the upvalues is _ENV, which we don't want to copy! + * instead, the function shall have LUA_RIDX_GLOBALS taken in the destination state! + */ + { + char const* upname; +#if LUA_VERSION_NUM >= 502 + // Starting with Lua 5.2, each Lua function gets its environment as one of its upvalues (named LUA_ENV, aka "_ENV" by default) + // Generally this is LUA_RIDX_GLOBALS, which we don't want to copy from the source to the destination state... + // -> if we encounter an upvalue equal to the global table in the source, bind it to the destination's global table + lua_pushglobaltable( L); // ... _G +#endif // LUA_VERSION_NUM + for( n = 0; (upname = lua_getupvalue( L, i, 1 + n)) != NULL; ++ n) + { // ... _G up[n] + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "UPNAME[%d]: %s -> " INDENT_END, n, upname)); +#if LUA_VERSION_NUM >= 502 + if( lua_rawequal( L, -1, -2)) // is the upvalue equal to the global table? + { + DEBUGSPEW_CODE( fprintf( stderr, "pushing destination global scope\n")); + lua_pushglobaltable( L2); // ... {cache} ... function + } + else +#endif // LUA_VERSION_NUM + { + DEBUGSPEW_CODE( fprintf( stderr, "copying value\n")); + if( !inter_copy_one( U, L2, L2_cache_i, L, lua_gettop( L), VT_NORMAL, mode_, upname)) // ... {cache} ... function + { + luaL_error( L, "Cannot copy upvalue type '%s'", luaL_typename( L, -1)); + } + } + lua_pop( L, 1); // ... _G + } +#if LUA_VERSION_NUM >= 502 + lua_pop( L, 1); // ... +#endif // LUA_VERSION_NUM + } + // L2: function + 'n' upvalues (>=0) + + STACK_MID( L, 0); + + // Set upvalues (originally set to 'nil' by 'lua_load') + { + int func_index = lua_gettop( L2) - n; + for( ; n > 0; -- n) + { + char const* rc = lua_setupvalue( L2, func_index, n); // ... {cache} ... function + // + // "assigns the value at the top of the stack to the upvalue and returns its name. + // It also pops the value from the stack." + + ASSERT_L( rc); // not having enough slots? + } + // once all upvalues have been set we are left + // with the function at the top of the stack // ... {cache} ... function + } + } + STACK_END( L, 0); +} + +/* + * Check if we've already copied the same function from 'L', and reuse the old + * copy. + * + * Always pushes a function to 'L2'. + */ +static void copy_cached_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) +{ + FuncSubType funcSubType; + /*lua_CFunction cfunc =*/ luaG_tocfunction( L, i, &funcSubType); // NULL for LuaJIT-fast && bytecode functions + if( funcSubType == FST_Bytecode) + { + void* const aspointer = (void*)lua_topointer( L, i); + // TBD: Merge this and same code for tables + ASSERT_L( L2_cache_i != 0); + + STACK_GROW( L2, 2); + + // L2_cache[id_str]= function + // + STACK_CHECK( L2, 0); + + // We don't need to use the from state ('L') in ID since the life span + // is only for the duration of a copy (both states are locked). + // + + // push a light userdata uniquely representing the function + lua_pushlightuserdata( L2, aspointer); // ... {cache} ... p + + //fprintf( stderr, "<< ID: %s >>\n", lua_tostring( L2, -1)); + + lua_pushvalue( L2, -1); // ... {cache} ... p p + lua_rawget( L2, L2_cache_i); // ... {cache} ... p function|nil|true + + if( lua_isnil( L2, -1)) // function is unknown + { + lua_pop( L2, 1); // ... {cache} ... p + + // Set to 'true' for the duration of creation; need to find self-references + // via upvalues + // + // pushes a copy of the func, stores a reference in the cache + copy_func( U, L2, L2_cache_i, L, i, mode_, upName_); // ... {cache} ... function + } + else // found function in the cache + { + lua_remove( L2, -2); // ... {cache} ... function + } + STACK_END( L2, 1); + ASSERT_L( lua_isfunction( L2, -1)); + } + else // function is native/LuaJIT: no need to cache + { + lookup_native_func( L2, L, i, mode_, upName_); // ... {cache} ... function + // if the function was in fact a lookup sentinel, we can either get a function or a table here + ASSERT_L( lua_isfunction( L2, -1) || lua_istable( L2, -1)); + } +} + +static bool_t push_cached_metatable( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum eLookupMode mode_, char const* upName_) +{ + STACK_CHECK( L, 0); + if( lua_getmetatable( L, i)) // ... mt + { + lua_Integer const mt_id = get_mt_id( U, L, -1); // Unique id for the metatable + + STACK_CHECK( L2, 0); + STACK_GROW( L2, 4); + // do we already know this metatable? + push_registry_subtable( L2, REG_MTID); // _R[REG_MTID] + lua_pushinteger( L2, mt_id); // _R[REG_MTID] id + lua_rawget( L2, -2); // _R[REG_MTID] mt? + + STACK_MID( L2, 2); + + if( lua_isnil( L2, -1)) + { // L2 did not know the metatable + lua_pop( L2, 1); // _R[REG_MTID] + if( inter_copy_one( U, L2, L2_cache_i, L, lua_gettop( L), VT_METATABLE, mode_, upName_)) // _R[REG_MTID] mt + { + STACK_MID( L2, 2); + // mt_id -> metatable + lua_pushinteger( L2, mt_id); // _R[REG_MTID] mt id + lua_pushvalue( L2, -2); // _R[REG_MTID] mt id mt + lua_rawset( L2, -4); // _R[REG_MTID] mt + + // metatable -> mt_id + lua_pushvalue( L2, -1); // _R[REG_MTID] mt mt + lua_pushinteger( L2, mt_id); // _R[REG_MTID] mt mt id + lua_rawset( L2, -4); // _R[REG_MTID] mt + } + else + { + (void) luaL_error( L, "Error copying a metatable"); + } + STACK_MID( L2, 2); + } + lua_remove( L2, -2); // mt + + lua_pop( L, 1); // ... + STACK_END( L2, 1); + STACK_MID( L, 0); + return TRUE; + } + STACK_END( L, 0); + return FALSE; +} + +static void inter_copy_keyvaluepair( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, enum e_vt vt, LookupMode mode_, char const* upName_) +{ + uint_t val_i = lua_gettop( L); + uint_t key_i = val_i - 1; + + // Only basic key types are copied over; others ignored + if( inter_copy_one( U, L2, 0 /*key*/, L, key_i, VT_KEY, mode_, upName_)) + { + char* valPath = (char*) upName_; + if( U->verboseErrors) + { + // for debug purposes, let's try to build a useful name + if( lua_type( L, key_i) == LUA_TSTRING) + { + char const* key = lua_tostring( L, key_i); + size_t const keyRawLen = lua_rawlen( L, key_i); + size_t const bufLen = strlen( upName_) + keyRawLen + 2; + valPath = (char*) alloca( bufLen); + sprintf( valPath, "%s.%*s", upName_, (int) keyRawLen, key); + key = NULL; + } +#if defined LUA_LNUM || LUA_VERSION_NUM >= 503 + else if( lua_isinteger( L, key_i)) + { + lua_Integer key = lua_tointeger( L, key_i); + valPath = (char*) alloca( strlen( upName_) + 32 + 3); + sprintf( valPath, "%s[" LUA_INTEGER_FMT "]", upName_, key); + } +#endif // defined LUA_LNUM || LUA_VERSION_NUM >= 503 + else if( lua_type( L, key_i) == LUA_TNUMBER) + { + lua_Number key = lua_tonumber( L, key_i); + valPath = (char*) alloca( strlen( upName_) + 32 + 3); + sprintf( valPath, "%s[" LUA_NUMBER_FMT "]", upName_, key); + } + else if( lua_type( L, key_i) == LUA_TLIGHTUSERDATA) + { + void* key = lua_touserdata( L, key_i); + valPath = (char*) alloca( strlen( upName_) + 16 + 5); + sprintf( valPath, "%s[U:%p]", upName_, key); + } + else if( lua_type( L, key_i) == LUA_TBOOLEAN) + { + int key = lua_toboolean( L, key_i); + valPath = (char*) alloca( strlen( upName_) + 8); + sprintf( valPath, "%s[%s]", upName_, key ? "true" : "false"); + } + } + /* + * Contents of metatables are copied with cache checking; + * important to detect loops. + */ + if( inter_copy_one( U, L2, L2_cache_i, L, val_i, VT_NORMAL, mode_, valPath)) + { + ASSERT_L( lua_istable( L2, -3)); + lua_rawset( L2, -3); // add to table (pops key & val) + } + else + { + luaL_error( L, "Unable to copy %s entry '%s' because of value is of type '%s'", (vt == VT_NORMAL) ? "table" : "metatable", valPath, luaL_typename( L, val_i)); + } + } +} + +/* +* The clone cache is a weak valued table listing all clones, indexed by their userdatapointer +* fnv164 of string "CLONABLES_CACHE_KEY" generated at https://www.pelock.com/products/hash-calculator +*/ +static DECLARE_CONST_UNIQUE_KEY( CLONABLES_CACHE_KEY, 0xD04EE018B3DEE8F5); + +static bool_t copyclone( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t source_i_, LookupMode mode_, char const* upName_) +{ + void* const source = lua_touserdata( L, source_i_); + source_i_ = lua_absindex( L, source_i_); + + STACK_CHECK( L, 0); // L (source) // L2 (destination) + STACK_CHECK( L2, 0); + + // Check if the source was already cloned during this copy + lua_pushlightuserdata( L2, source); // ... source + lua_rawget( L2, L2_cache_i); // ... clone? + if ( !lua_isnil( L2, -1)) + { + STACK_MID( L2, 1); + return TRUE; + } + else + { + lua_pop( L2, 1); // ... + } + STACK_MID( L2, 0); + + // no metatable? -> not clonable + if( !lua_getmetatable( L, source_i_)) // ... mt? + { + STACK_MID( L, 0); + return FALSE; + } + + // no __lanesclone? -> not clonable + lua_getfield( L, -1, "__lanesclone"); // ... mt __lanesclone? + if( lua_isnil( L, -1)) + { + lua_pop( L, 2); // ... + STACK_MID( L, 0); + return FALSE; + } + + // we need to copy over the uservalues of the userdata as well + { + int const mt = lua_absindex( L, -2); // ... mt __lanesclone + size_t const userdata_size = (size_t) lua_rawlen( L, source_i_); + void* clone = NULL; + // extract all the uservalues, but don't transfer them yet + int uvi = 0; + while( lua_getiuservalue( L, source_i_, ++ uvi) != LUA_TNONE) {} // ... mt __lanesclone [uv]+ nil + // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now + lua_pop( L, 1); // ... mt __lanesclone [uv]+ + -- uvi; + // create the clone userdata with the required number of uservalue slots + clone = lua_newuserdatauv( L2, userdata_size, uvi); // ... u + // copy the metatable in the target state, and give it to the clone we put there + if( inter_copy_one( U, L2, L2_cache_i, L, mt, VT_NORMAL, mode_, upName_)) // ... u mt|sentinel + { + if( eLM_ToKeeper == mode_) // ... u sentinel + { + ASSERT_L( lua_tocfunction( L2, -1) == table_lookup_sentinel); + // we want to create a new closure with a 'clone sentinel' function, where the upvalues are the userdata and the metatable fqn + lua_getupvalue( L2, -1, 1); // ... u sentinel fqn + lua_remove( L2, -2); // ... u fqn + lua_insert( L2, -2); // ... fqn u + lua_pushcclosure( L2, userdata_clone_sentinel, 2); // ... userdata_clone_sentinel + } + else // from keeper or direct // ... u mt + { + ASSERT_L( lua_istable( L2, -1)); + lua_setmetatable( L2, -2); // ... u + } + STACK_MID( L2, 1); + } + else + { + (void) luaL_error( L, "Error copying a metatable"); + } + // first, add the entry in the cache (at this point it is either the actual userdata or the keeper sentinel + lua_pushlightuserdata( L2, source); // ... u source + lua_pushvalue( L2, -2); // ... u source u + lua_rawset( L2, L2_cache_i); // ... u + // make sure we have the userdata now + if( eLM_ToKeeper == mode_) // ... userdata_clone_sentinel + { + lua_getupvalue( L2, -1, 2); // ... userdata_clone_sentinel u + } + // assign uservalues + while( uvi > 0) + { + inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), VT_NORMAL, mode_, upName_); // ... u uv + lua_pop( L, 1); // ... mt __lanesclone [uv]* + // this pops the value from the stack + lua_setiuservalue( L2, -2, uvi); // ... u + -- uvi; + } + // when we are done, all uservalues are popped from the source stack, and we want only the single transferred value in the destination + if( eLM_ToKeeper == mode_) // ... userdata_clone_sentinel u + { + lua_pop( L2, 1); // ... userdata_clone_sentinel + } + STACK_MID( L2, 1); + STACK_MID( L, 2); + // call cloning function in source state to perform the actual memory cloning + lua_pushlightuserdata( L, clone); // ... mt __lanesclone clone + lua_pushlightuserdata( L, source); // ... mt __lanesclone clone source + lua_pushinteger( L, userdata_size); // ... mt __lanesclone clone source size + lua_call( L, 3, 0); // ... mt + STACK_MID( L, 1); + } + + STACK_END( L2, 1); + lua_pop( L, 1); // ... + STACK_END( L, 0); + return TRUE; +} + +static bool_t inter_copy_userdata( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) +{ + STACK_CHECK( L, 0); + STACK_CHECK( L2, 0); + if( vt == VT_KEY) + { + return FALSE; + } + + // try clonable userdata first + if( copyclone( U, L2, L2_cache_i, L, i, mode_, upName_)) + { + STACK_MID( L, 0); + STACK_MID( L2, 1); + return TRUE; + } + + STACK_MID( L, 0); + STACK_MID( L2, 0); + + // Allow only deep userdata entities to be copied across + DEBUGSPEW_CODE( fprintf( stderr, "USERDATA\n")); + if( copydeep( U, L2, L2_cache_i, L, i, mode_, upName_)) + { + STACK_MID( L, 0); + STACK_MID( L2, 1); + return TRUE; + } + + STACK_MID( L, 0); + STACK_MID( L2, 0); + + // Not a deep or clonable full userdata + if( U->demoteFullUserdata) // attempt demotion to light userdata + { + void* lud = lua_touserdata( L, i); + lua_pushlightuserdata( L2, lud); + } + else // raise an error + { + (void) luaL_error( L, "can't copy non-deep full userdata across lanes"); + } + + STACK_END( L2, 1); + STACK_END( L, 0); + return TRUE; +} + +static bool_t inter_copy_function( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t source_i_, enum e_vt vt, LookupMode mode_, char const* upName_) +{ + if( vt == VT_KEY) + { + return FALSE; + } + + STACK_CHECK( L, 0); // L (source) // L2 (destination) + STACK_CHECK( L2, 0); + DEBUGSPEW_CODE( fprintf( stderr, "FUNCTION %s\n", upName_)); + + if( lua_tocfunction( L, source_i_) == userdata_clone_sentinel) // we are actually copying a clonable full userdata from a keeper + { + // clone the full userdata again + size_t userdata_size = 0; + void* source; + void* clone; + + // let's see if we already restored this userdata + lua_getupvalue( L, source_i_, 2); // ... u + source = lua_touserdata( L, -1); + lua_pushlightuserdata( L2, source); // ... source + lua_rawget( L2, L2_cache_i); // ... u? + if( !lua_isnil( L2, -1)) + { + lua_pop( L, 1); // ... + STACK_MID( L, 0); + STACK_MID( L2, 1); + return TRUE; + } + lua_pop( L2, 1); // ... + + // this function has 2 upvalues: the fqn of its metatable, and the userdata itself + lookup_table( L2, L, source_i_, mode_, upName_); // ... mt + // originally 'source_i_' slot was the proxy closure, but from now on it indexes the actual userdata we extracted from it + source_i_ = lua_gettop( L); + source = lua_touserdata( L, -1); + // get the number of bytes to allocate for the clone + userdata_size = (size_t) lua_rawlen( L, -1); + { + // extract uservalues (don't transfer them yet) + int uvi = 0; + while( lua_getiuservalue( L, source_i_, ++ uvi) != LUA_TNONE) {} // ... u uv + // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now + lua_pop( L, 1); // ... u [uv]* + -- uvi; + STACK_MID( L, uvi + 1); + // create the clone userdata with the required number of uservalue slots + clone = lua_newuserdatauv( L2, userdata_size, uvi); // ... mt u + // add it in the cache + lua_pushlightuserdata( L2, source); // ... mt u source + lua_pushvalue( L2, -2); // ... mt u source u + lua_rawset( L2, L2_cache_i); // ... mt u + // set metatable + lua_pushvalue( L2, -2); // ... mt u mt + lua_setmetatable( L2, -2); // ... mt u + // transfer and assign uservalues + while( uvi > 0) + { + inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), vt, mode_, upName_); // ... mt u uv + lua_pop( L, 1); // ... u [uv]* + // this pops the value from the stack + lua_setiuservalue( L2, -2, uvi); // ... mt u + -- uvi; + } + // when we are done, all uservalues are popped from the stack, we can pop the source as well + lua_pop( L, 1); // ... + STACK_MID( L, 0); + STACK_MID( L2, 2); // ... mt u + } + // perform the custom cloning part + lua_insert( L2, -2); // ... u mt + // __lanesclone should always exist because we wouldn't be restoring data from a userdata_clone_sentinel closure to begin with + lua_getfield(L2, -1, "__lanesclone"); // ... u mt __lanesclone + lua_remove( L2, -2); // ... u __lanesclone + lua_pushlightuserdata( L2, clone); // ... u __lanesclone clone + lua_pushlightuserdata( L2, source); // ... u __lanesclone clone source + lua_pushinteger( L2, userdata_size); // ... u __lanesclone clone source size + // clone:__lanesclone(dest, source, size) + lua_call( L2, 3, 0); // ... u + } + else // regular function + { + DEBUGSPEW_CODE( fprintf( stderr, "FUNCTION %s\n", upName_)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + copy_cached_func( U, L2, L2_cache_i, L, source_i_, mode_, upName_); // ... f + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + } + STACK_END( L2, 1); + STACK_END( L, 0); + return TRUE; +} + +static bool_t inter_copy_table( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) +{ + if( vt == VT_KEY) + { + return FALSE; + } + + STACK_CHECK( L, 0); + STACK_CHECK( L2, 0); + DEBUGSPEW_CODE( fprintf( stderr, "TABLE %s\n", upName_)); + + /* + * First, let's try to see if this table is special (aka is it some table that we registered in our lookup databases during module registration?) + * Note that this table CAN be a module table, but we just didn't register it, in which case we'll send it through the table cloning mechanism + */ + if( lookup_table( L2, L, i, mode_, upName_)) + { + ASSERT_L( lua_istable( L2, -1) || (lua_tocfunction( L2, -1) == table_lookup_sentinel)); // from lookup datables // can also be table_lookup_sentinel if this is a table we know + return TRUE; + } + + /* Check if we've already copied the same table from 'L' (during this transmission), and + * reuse the old copy. This allows table upvalues shared by multiple + * local functions to point to the same table, also in the target. + * Also, this takes care of cyclic tables and multiple references + * to the same subtable. + * + * Note: Even metatables need to go through this test; to detect + * loops such as those in required module tables (getmetatable(lanes).lanes == lanes) + */ + if( push_cached_table( L2, L2_cache_i, L, i)) + { + ASSERT_L( lua_istable( L2, -1)); // from cache + return TRUE; + } + ASSERT_L( lua_istable( L2, -1)); + + STACK_GROW( L, 2); + STACK_GROW( L2, 2); + + lua_pushnil( L); // start iteration + while( lua_next( L, i)) + { + // need a function to prevent overflowing the stack with verboseErrors-induced alloca() + inter_copy_keyvaluepair( U, L2, L2_cache_i, L, vt, mode_, upName_); + lua_pop( L, 1); // pop value (next round) + } + STACK_MID( L, 0); + STACK_MID( L2, 1); + + // Metatables are expected to be immutable, and copied only once. + if( push_cached_metatable( U, L2, L2_cache_i, L, i, mode_, upName_)) // ... t mt? + { + lua_setmetatable( L2, -2); // ... t + } + STACK_END( L2, 1); + STACK_END( L, 0); + return TRUE; +} + +/* +* Copies a value from 'L' state (at index 'i') to 'L2' state. Does not remove +* the original value. +* +* NOTE: Both the states must be solely in the current OS thread's possession. +* +* 'i' is an absolute index (no -1, ...) +* +* Returns TRUE if value was pushed, FALSE if its type is non-supported. +*/ +bool_t inter_copy_one( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) +{ + bool_t ret = TRUE; + int val_type = lua_type( L, i); + static int const pod_mask = (1 << LUA_TNIL) | (1 << LUA_TBOOLEAN) | (1 << LUA_TLIGHTUSERDATA) | (1 << LUA_TNUMBER) | (1 << LUA_TSTRING); + STACK_GROW( L2, 1); + STACK_CHECK( L, 0); // L // L2 + STACK_CHECK( L2, 0); // L // L2 + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "inter_copy_one()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s %s: " INDENT_END, lua_type_names[val_type], vt_names[vt])); + + // Non-POD can be skipped if its metatable contains { __lanesignore = true } + if( ((1 << val_type) & pod_mask) == 0) + { + if( lua_getmetatable( L, i)) // ... mt + { + lua_getfield( L, -1, "__lanesignore"); // ... mt ignore? + if( lua_isboolean( L, -1) && lua_toboolean( L, -1)) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "__lanesignore -> LUA_TNIL\n" INDENT_END)); + val_type = LUA_TNIL; + } + lua_pop( L, 2); // ... + } + } + STACK_MID( L, 0); + + /* Lets push nil to L2 if the object should be ignored */ + switch( val_type) + { + /* Basic types allowed both as values, and as table keys */ + + case LUA_TBOOLEAN: + { + bool_t v = lua_toboolean( L, i); + DEBUGSPEW_CODE( fprintf( stderr, "%s\n", v ? "true" : "false")); + lua_pushboolean( L2, v); + } + break; + + case LUA_TNUMBER: + /* LNUM patch support (keeping integer accuracy) */ +#if defined LUA_LNUM || LUA_VERSION_NUM >= 503 + if( lua_isinteger( L, i)) + { + lua_Integer v = lua_tointeger( L, i); + DEBUGSPEW_CODE( fprintf( stderr, LUA_INTEGER_FMT "\n", v)); + lua_pushinteger( L2, v); + break; + } + else +#endif // defined LUA_LNUM || LUA_VERSION_NUM >= 503 + { + lua_Number v = lua_tonumber( L, i); + DEBUGSPEW_CODE( fprintf( stderr, LUA_NUMBER_FMT "\n", v)); + lua_pushnumber( L2, v); + } + break; + + case LUA_TSTRING: + { + size_t len; + char const* s = lua_tolstring( L, i, &len); + DEBUGSPEW_CODE( fprintf( stderr, "'%s'\n", s)); + lua_pushlstring( L2, s, len); + } + break; + + case LUA_TLIGHTUSERDATA: + { + void* p = lua_touserdata( L, i); + DEBUGSPEW_CODE( fprintf( stderr, "%p\n", p)); + lua_pushlightuserdata( L2, p); + } + break; + + /* The following types are not allowed as table keys */ + + case LUA_TUSERDATA: + ret = inter_copy_userdata( U, L2, L2_cache_i, L, i, vt, mode_, upName_); + break; + + case LUA_TNIL: + if( vt == VT_KEY) + { + ret = FALSE; + break; + } + lua_pushnil( L2); + break; + + case LUA_TFUNCTION: + ret = inter_copy_function( U, L2, L2_cache_i, L, i, vt, mode_, upName_); + break; + + case LUA_TTABLE: + ret = inter_copy_table( U, L2, L2_cache_i, L, i, vt, mode_, upName_); + break; + + /* The following types cannot be copied */ + + case 10: // LuaJIT CDATA + case LUA_TTHREAD: + ret = FALSE; + break; + } + + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + + STACK_END( L2, ret ? 1 : 0); + STACK_END( L, 0); + return ret; +} + +/* +* Akin to 'lua_xmove' but copies values between _any_ Lua states. +* +* NOTE: Both the states must be solely in the current OS thread's posession. +* +* Note: Parameters are in this order ('L' = from first) to be same as 'lua_xmove'. +*/ +int luaG_inter_copy( Universe* U, lua_State* L, lua_State* L2, uint_t n, LookupMode mode_) +{ + uint_t top_L = lua_gettop( L); // ... {}n + uint_t top_L2 = lua_gettop( L2); // ... + uint_t i, j; + char tmpBuf[16]; + char const* pBuf = U->verboseErrors ? tmpBuf : "?"; + bool_t copyok = TRUE; + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + + if( n > top_L) + { + // requesting to copy more than is available? + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "nothing to copy()\n" INDENT_END)); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + return -1; + } + + STACK_CHECK( L2, 0); + STACK_GROW( L2, n + 1); + + /* + * Make a cache table for the duration of this copy. Collects tables and + * function entries, avoiding the same entries to be passed on as multiple + * copies. ESSENTIAL i.e. for handling upvalue tables in the right manner! + */ + lua_newtable( L2); // ... cache + + STACK_CHECK( L, 0); + for( i = top_L - n + 1, j = 1; i <= top_L; ++ i, ++ j) + { + if( U->verboseErrors) + { + sprintf( tmpBuf, "arg_%d", j); + } + copyok = inter_copy_one( U, L2, top_L2 + 1, L, i, VT_NORMAL, mode_, pBuf); // ... cache {}n + if( !copyok) + { + break; + } + } + STACK_END( L, 0); + + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + + if( copyok) + { + STACK_MID( L2, n + 1); + // Remove the cache table. Persistent caching would cause i.e. multiple + // messages passed in the same table to use the same table also in receiving end. + lua_remove( L2, top_L2 + 1); + return 0; + } + + // error -> pop everything from the target state stack + lua_settop( L2, top_L2); + STACK_END( L2, 0); + return -2; +} + + +int luaG_inter_move( Universe* U, lua_State* L, lua_State* L2, uint_t n, LookupMode mode_) +{ + int ret = luaG_inter_copy( U, L, L2, n, mode_); + lua_pop( L, (int) n); + return ret; +} + +int luaG_inter_copy_package( Universe* U, lua_State* L, lua_State* L2, int package_idx_, LookupMode mode_) +{ + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy_package()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + // package + STACK_CHECK( L, 0); + STACK_CHECK( L2, 0); + package_idx_ = lua_absindex( L, package_idx_); + if( lua_type( L, package_idx_) != LUA_TTABLE) + { + lua_pushfstring( L, "expected package as table, got %s", luaL_typename( L, package_idx_)); + STACK_MID( L, 1); + // raise the error when copying from lane to lane, else just leave it on the stack to be raised later + return ( mode_ == eLM_LaneBody) ? lua_error( L) : 1; + } + lua_getglobal( L2, "package"); + if( !lua_isnil( L2, -1)) // package library not loaded: do nothing + { + int i; + // package.loaders is renamed package.searchers in Lua 5.2 + // but don't copy it anyway, as the function names change depending on the slot index! + // users should provide an on_state_create function to setup custom loaders instead + // don't copy package.preload in keeper states (they don't know how to translate functions) + char const* entries[] = { "path", "cpath", (mode_ == eLM_LaneBody) ? "preload" : NULL/*, (LUA_VERSION_NUM == 501) ? "loaders" : "searchers"*/, NULL}; + for( i = 0; entries[i]; ++ i) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "package.%s\n" INDENT_END, entries[i])); + lua_getfield( L, package_idx_, entries[i]); + if( lua_isnil( L, -1)) + { + lua_pop( L, 1); + } + else + { + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + luaG_inter_move( U, L, L2, 1, mode_); // moves the entry to L2 + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + lua_setfield( L2, -2, entries[i]); // set package[entries[i]] + } + } + } + else + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "'package' not loaded, nothing to do\n" INDENT_END)); + } + lua_pop( L2, 1); + STACK_END( L2, 0); + STACK_END( L, 0); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + return 0; +} diff --git a/src/universe.c b/src/universe.c deleted file mode 100644 index 0a014f7..0000000 --- a/src/universe.c +++ /dev/null @@ -1,75 +0,0 @@ -/* - * UNIVERSE.C Copyright (c) 2017, Benoit Germain - */ - -/* -=============================================================================== - -Copyright (C) 2017 Benoit Germain - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -=============================================================================== -*/ - -#include -#include - -#include "universe.h" -#include "compat.h" -#include "macros_and_utils.h" -#include "uniquekey.h" - -// crc64/we of string "UNIVERSE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ -static DECLARE_CONST_UNIQUE_KEY( UNIVERSE_REGKEY, 0x9f877b2cf078f17f); - -// ################################################################################################ - -Universe* universe_create( lua_State* L) -{ - Universe* U = (Universe*) lua_newuserdatauv( L, sizeof(Universe), 0); // universe - memset( U, 0, sizeof( Universe)); - STACK_CHECK( L, 1); - REGISTRY_SET( L, UNIVERSE_REGKEY, lua_pushvalue(L, -2)); // universe - STACK_END( L, 1); - return U; -} - -// ################################################################################################ - -void universe_store( lua_State* L, Universe* U) -{ - STACK_CHECK( L, 0); - REGISTRY_SET( L, UNIVERSE_REGKEY, (NULL != U) ? lua_pushlightuserdata( L, U) : lua_pushnil( L)); - STACK_END( L, 0); -} - -// ################################################################################################ - -Universe* universe_get( lua_State* L) -{ - Universe* universe; - STACK_GROW( L, 2); - STACK_CHECK( L, 0); - REGISTRY_GET( L, UNIVERSE_REGKEY); - universe = (Universe*) lua_touserdata( L, -1); // NULL if nil - lua_pop( L, 1); - STACK_END( L, 0); - return universe; -} diff --git a/src/universe.cpp b/src/universe.cpp new file mode 100644 index 0000000..0a014f7 --- /dev/null +++ b/src/universe.cpp @@ -0,0 +1,75 @@ +/* + * UNIVERSE.C Copyright (c) 2017, Benoit Germain + */ + +/* +=============================================================================== + +Copyright (C) 2017 Benoit Germain + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== +*/ + +#include +#include + +#include "universe.h" +#include "compat.h" +#include "macros_and_utils.h" +#include "uniquekey.h" + +// crc64/we of string "UNIVERSE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ +static DECLARE_CONST_UNIQUE_KEY( UNIVERSE_REGKEY, 0x9f877b2cf078f17f); + +// ################################################################################################ + +Universe* universe_create( lua_State* L) +{ + Universe* U = (Universe*) lua_newuserdatauv( L, sizeof(Universe), 0); // universe + memset( U, 0, sizeof( Universe)); + STACK_CHECK( L, 1); + REGISTRY_SET( L, UNIVERSE_REGKEY, lua_pushvalue(L, -2)); // universe + STACK_END( L, 1); + return U; +} + +// ################################################################################################ + +void universe_store( lua_State* L, Universe* U) +{ + STACK_CHECK( L, 0); + REGISTRY_SET( L, UNIVERSE_REGKEY, (NULL != U) ? lua_pushlightuserdata( L, U) : lua_pushnil( L)); + STACK_END( L, 0); +} + +// ################################################################################################ + +Universe* universe_get( lua_State* L) +{ + Universe* universe; + STACK_GROW( L, 2); + STACK_CHECK( L, 0); + REGISTRY_GET( L, UNIVERSE_REGKEY); + universe = (Universe*) lua_touserdata( L, -1); // NULL if nil + lua_pop( L, 1); + STACK_END( L, 0); + return universe; +} -- cgit v1.2.3-55-g6feb