From 621fb024b5f887ef9e81e2f28bf087386f5300e1 Mon Sep 17 00:00:00 2001 From: Benoit Germain Date: Mon, 7 Feb 2022 08:56:39 +0100 Subject: Changed all indentations to all whitespaces Tabs mess up alignment of stack contents comments, so I'm done with them. --- src/cancel.c | 334 ++--- src/cancel.h | 32 +- src/compat.c | 102 +- src/deep.c | 654 +++++----- src/deep.h | 24 +- src/keeper.c | 1208 +++++++++--------- src/keeper.h | 10 +- src/lanes.c | 2886 +++++++++++++++++++++--------------------- src/lanes.lua | 1380 ++++++++++---------- src/lanes_private.h | 110 +- src/linda.c | 1380 ++++++++++---------- src/macros_and_utils.h | 66 +- src/state.c | 554 ++++---- src/threading.c | 910 ++++++------- src/threading.h | 67 +- src/tools.c | 3308 ++++++++++++++++++++++++------------------------ src/tools.h | 6 +- src/uniquekey.h | 2 +- src/universe.c | 34 +- src/universe.h | 64 +- 20 files changed, 6564 insertions(+), 6567 deletions(-) diff --git a/src/cancel.c b/src/cancel.c index cd930b5..0a5adb6 100644 --- a/src/cancel.c +++ b/src/cancel.c @@ -55,9 +55,9 @@ THE SOFTWARE. */ static inline enum e_cancel_request cancel_test( lua_State* L) { - Lane* const s = get_lane_from_registry( L); - // 's' is NULL for the original main state (and no-one can cancel that) - return s ? s->cancel_request : CANCEL_NONE; + Lane* const s = get_lane_from_registry( L); + // 's' is NULL for the original main state (and no-one can cancel that) + return s ? s->cancel_request : CANCEL_NONE; } // ################################################################################################ @@ -70,9 +70,9 @@ static inline enum e_cancel_request cancel_test( lua_State* L) // LUAG_FUNC( cancel_test) { - enum e_cancel_request test = cancel_test( L); - lua_pushboolean( L, test != CANCEL_NONE); - return 1; + enum e_cancel_request test = cancel_test( L); + lua_pushboolean( L, test != CANCEL_NONE); + return 1; } // ################################################################################################ @@ -80,13 +80,13 @@ LUAG_FUNC( cancel_test) static void cancel_hook( lua_State* L, lua_Debug* ar) { - (void)ar; - DEBUGSPEW_CODE( fprintf( stderr, "cancel_hook\n")); - if( cancel_test( L) != CANCEL_NONE) - { - lua_sethook( L, NULL, 0, 0); - cancel_error( L); - } + (void)ar; + DEBUGSPEW_CODE( fprintf( stderr, "cancel_hook\n")); + if( cancel_test( L) != CANCEL_NONE) + { + lua_sethook( L, NULL, 0, 0); + cancel_error( L); + } } // ################################################################################################ @@ -114,90 +114,90 @@ static void cancel_hook( lua_State* L, lua_Debug* ar) static cancel_result thread_cancel_soft( Lane* s, double secs_, bool_t wake_lindas_) { - s->cancel_request = CANCEL_SOFT; // it's now signaled to stop - // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own - if( wake_lindas_) // wake the thread so that execution returns from any pending linda operation if desired - { - SIGNAL_T *waiting_on = s->waiting_on; - if( s->status == WAITING && waiting_on != NULL) - { - SIGNAL_ALL( waiting_on); - } - } - - return THREAD_WAIT( &s->thread, secs_, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout; + s->cancel_request = CANCEL_SOFT; // it's now signaled to stop + // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own + if( wake_lindas_) // wake the thread so that execution returns from any pending linda operation if desired + { + SIGNAL_T *waiting_on = s->waiting_on; + if( s->status == WAITING && waiting_on != NULL) + { + SIGNAL_ALL( waiting_on); + } + } + + return THREAD_WAIT( &s->thread, secs_, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout; } // ################################################################################################ static cancel_result thread_cancel_hard( lua_State* L, Lane* s, double secs_, bool_t force_, double waitkill_timeout_) { - cancel_result result; - - s->cancel_request = CANCEL_HARD; // it's now signaled to stop - { - SIGNAL_T *waiting_on = s->waiting_on; - if( s->status == WAITING && waiting_on != NULL) - { - SIGNAL_ALL( waiting_on); - } - } - - result = THREAD_WAIT( &s->thread, secs_, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout; - - if( (result == CR_Timeout) && force_) - { - // Killing is asynchronous; we _will_ wait for it to be done at - // GC, to make sure the data structure can be released (alternative - // would be use of "cancellation cleanup handlers" that at least - // PThread seems to have). - // - THREAD_KILL( &s->thread); + cancel_result result; + + s->cancel_request = CANCEL_HARD; // it's now signaled to stop + { + SIGNAL_T *waiting_on = s->waiting_on; + if( s->status == WAITING && waiting_on != NULL) + { + SIGNAL_ALL( waiting_on); + } + } + + result = THREAD_WAIT( &s->thread, secs_, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout; + + if( (result == CR_Timeout) && force_) + { + // Killing is asynchronous; we _will_ wait for it to be done at + // GC, to make sure the data structure can be released (alternative + // would be use of "cancellation cleanup handlers" that at least + // PThread seems to have). + // + THREAD_KILL( &s->thread); #if THREADAPI == THREADAPI_PTHREAD - // pthread: make sure the thread is really stopped! - // note that this may block forever if the lane doesn't call a cancellation point and pthread doesn't honor PTHREAD_CANCEL_ASYNCHRONOUS - result = THREAD_WAIT( &s->thread, waitkill_timeout_, &s->done_signal, &s->done_lock, &s->status); - if( result == CR_Timeout) - { - return luaL_error( L, "force-killed lane failed to terminate within %f second%s", waitkill_timeout_, waitkill_timeout_ > 1 ? "s" : ""); - } + // pthread: make sure the thread is really stopped! + // note that this may block forever if the lane doesn't call a cancellation point and pthread doesn't honor PTHREAD_CANCEL_ASYNCHRONOUS + result = THREAD_WAIT( &s->thread, waitkill_timeout_, &s->done_signal, &s->done_lock, &s->status); + if( result == CR_Timeout) + { + return luaL_error( L, "force-killed lane failed to terminate within %f second%s", waitkill_timeout_, waitkill_timeout_ > 1 ? "s" : ""); + } #else - (void) waitkill_timeout_; // unused - (void) L; // unused + (void) waitkill_timeout_; // unused + (void) L; // unused #endif // THREADAPI == THREADAPI_PTHREAD - s->mstatus = KILLED; // mark 'gc' to wait for it - // note that s->status value must remain to whatever it was at the time of the kill - // because we need to know if we can lua_close() the Lua State or not. - result = CR_Killed; - } - return result; + s->mstatus = KILLED; // mark 'gc' to wait for it + // note that s->status value must remain to whatever it was at the time of the kill + // because we need to know if we can lua_close() the Lua State or not. + result = CR_Killed; + } + return result; } // ################################################################################################ cancel_result thread_cancel( lua_State* L, Lane* s, CancelOp op_, double secs_, bool_t force_, double waitkill_timeout_) { - // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here - // We can read 's->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN) - if( s->mstatus == KILLED) - { - return CR_Killed; - } - - if( s->status >= DONE) - { - // say "ok" by default, including when lane is already done - return CR_Cancelled; - } - - // signal the linda the wake up the thread so that it can react to the cancel query - // let us hope we never land here with a pointer on a linda that has been destroyed... - if( op_ == CO_Soft) - { - return thread_cancel_soft( s, secs_, force_); - } - - return thread_cancel_hard( L, s, secs_, force_, waitkill_timeout_); + // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here + // We can read 's->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN) + if( s->mstatus == KILLED) + { + return CR_Killed; + } + + if( s->status >= DONE) + { + // say "ok" by default, including when lane is already done + return CR_Cancelled; + } + + // signal the linda the wake up the thread so that it can react to the cancel query + // let us hope we never land here with a pointer on a linda that has been destroyed... + if( op_ == CO_Soft) + { + return thread_cancel_soft( s, secs_, force_); + } + + return thread_cancel_hard( L, s, secs_, force_, waitkill_timeout_); } // ################################################################################################ @@ -208,95 +208,95 @@ cancel_result thread_cancel( lua_State* L, Lane* s, CancelOp op_, double secs_, // < 0: hard static CancelOp which_op( lua_State* L, int idx_) { - if( lua_type( L, idx_) == LUA_TSTRING) - { - CancelOp op = CO_Invalid; - char const* str = lua_tostring( L, idx_); - if( strcmp( str, "soft") == 0) - { - op = CO_Soft; - } - else if( strcmp( str, "count") == 0) - { - op = CO_Count; - } - else if( strcmp( str, "line") == 0) - { - op = CO_Line; - } - else if( strcmp( str, "call") == 0) - { - op = CO_Call; - } - else if( strcmp( str, "ret") == 0) - { - op = CO_Ret; - } - else if( strcmp( str, "hard") == 0) - { - op = CO_Hard; - } - lua_remove( L, idx_); // argument is processed, remove it - if( op == CO_Invalid) - { - luaL_error( L, "invalid hook option %s", str); - } - return op; - } - return CO_Hard; + if( lua_type( L, idx_) == LUA_TSTRING) + { + CancelOp op = CO_Invalid; + char const* str = lua_tostring( L, idx_); + if( strcmp( str, "soft") == 0) + { + op = CO_Soft; + } + else if( strcmp( str, "count") == 0) + { + op = CO_Count; + } + else if( strcmp( str, "line") == 0) + { + op = CO_Line; + } + else if( strcmp( str, "call") == 0) + { + op = CO_Call; + } + else if( strcmp( str, "ret") == 0) + { + op = CO_Ret; + } + else if( strcmp( str, "hard") == 0) + { + op = CO_Hard; + } + lua_remove( L, idx_); // argument is processed, remove it + if( op == CO_Invalid) + { + luaL_error( L, "invalid hook option %s", str); + } + return op; + } + return CO_Hard; } // ################################################################################################ // bool[,reason] = lane_h:cancel( [mode, hookcount] [, timeout] [, force [, forcekill_timeout]]) LUAG_FUNC( thread_cancel) { - Lane* s = lua_toLane( L, 1); - double secs = 0.0; - CancelOp op = which_op( L, 2); // this removes the op string from the stack - - if( op > 0) // hook is requested - { - int hook_count = (int) lua_tointeger( L, 2); - lua_remove( L, 2); // argument is processed, remove it - if( hook_count < 1) - { - return luaL_error( L, "hook count cannot be < 1"); - } - lua_sethook( s->L, cancel_hook, op, hook_count); - } - - if( lua_type( L, 2) == LUA_TNUMBER) - { - secs = lua_tonumber( L, 2); - lua_remove( L, 2); // argument is processed, remove it - if( secs < 0.0) - { - return luaL_error( L, "cancel timeout cannot be < 0"); - } - } - - { - bool_t force = lua_toboolean( L, 2); // FALSE if nothing there - double forcekill_timeout = luaL_optnumber( L, 3, 0.0); - - switch( thread_cancel( L, s, op, secs, force, forcekill_timeout)) - { - case CR_Timeout: - lua_pushboolean( L, 0); - lua_pushstring( L, "timeout"); - return 2; - - case CR_Cancelled: - lua_pushboolean( L, 1); - push_thread_status( L, s); - return 2; - - case CR_Killed: - lua_pushboolean( L, 1); - push_thread_status( L, s); - return 2; - } - } - // should never happen, only here to prevent the compiler from complaining of "not all control paths returning a value" - return 0; + Lane* s = lua_toLane( L, 1); + double secs = 0.0; + CancelOp op = which_op( L, 2); // this removes the op string from the stack + + if( op > 0) // hook is requested + { + int hook_count = (int) lua_tointeger( L, 2); + lua_remove( L, 2); // argument is processed, remove it + if( hook_count < 1) + { + return luaL_error( L, "hook count cannot be < 1"); + } + lua_sethook( s->L, cancel_hook, op, hook_count); + } + + if( lua_type( L, 2) == LUA_TNUMBER) + { + secs = lua_tonumber( L, 2); + lua_remove( L, 2); // argument is processed, remove it + if( secs < 0.0) + { + return luaL_error( L, "cancel timeout cannot be < 0"); + } + } + + { + bool_t force = lua_toboolean( L, 2); // FALSE if nothing there + double forcekill_timeout = luaL_optnumber( L, 3, 0.0); + + switch( thread_cancel( L, s, op, secs, force, forcekill_timeout)) + { + case CR_Timeout: + lua_pushboolean( L, 0); + lua_pushstring( L, "timeout"); + return 2; + + case CR_Cancelled: + lua_pushboolean( L, 1); + push_thread_status( L, s); + return 2; + + case CR_Killed: + lua_pushboolean( L, 1); + push_thread_status( L, s); + return 2; + } + } + // should never happen, only here to prevent the compiler from complaining of "not all control paths returning a value" + return 0; } diff --git a/src/cancel.h b/src/cancel.h index e7656ac..c7c5433 100644 --- a/src/cancel.h +++ b/src/cancel.h @@ -17,27 +17,27 @@ typedef struct s_Lane Lane; // forward */ enum e_cancel_request { - CANCEL_NONE, // no pending cancel request - CANCEL_SOFT, // user wants the lane to cancel itself manually on cancel_test() - CANCEL_HARD // user wants the lane to be interrupted (meaning code won't return from those functions) from inside linda:send/receive calls + CANCEL_NONE, // no pending cancel request + CANCEL_SOFT, // user wants the lane to cancel itself manually on cancel_test() + CANCEL_HARD // user wants the lane to be interrupted (meaning code won't return from those functions) from inside linda:send/receive calls }; typedef enum { - CR_Timeout, - CR_Cancelled, - CR_Killed + CR_Timeout, + CR_Cancelled, + CR_Killed } cancel_result; typedef enum { - CO_Invalid = -2, - CO_Hard = -1, - CO_Soft = 0, - CO_Count = LUA_MASKCOUNT, - CO_Line = LUA_MASKLINE, - CO_Call = LUA_MASKCALL, - CO_Ret = LUA_MASKRET, + CO_Invalid = -2, + CO_Hard = -1, + CO_Soft = 0, + CO_Count = LUA_MASKCOUNT, + CO_Line = LUA_MASKLINE, + CO_Call = LUA_MASKCALL, + CO_Ret = LUA_MASKRET, } CancelOp; // crc64/we of string "CANCEL_ERROR" generated at http://www.nitrxgen.net/hashgen/ @@ -50,9 +50,9 @@ cancel_result thread_cancel( lua_State* L, Lane* s, CancelOp op_, double secs_, static inline int cancel_error( lua_State* L) { - STACK_GROW( L, 1); - push_unique_key( L, CANCEL_ERROR); // special error value - return lua_error( L); // doesn't return + STACK_GROW( L, 1); + push_unique_key( L, CANCEL_ERROR); // special error value + return lua_error( L); // doesn't return } // ################################################################################################ diff --git a/src/compat.c b/src/compat.c index d9bc3dd..19159a9 100644 --- a/src/compat.c +++ b/src/compat.c @@ -12,34 +12,34 @@ #if LUA_VERSION_NUM == 501 static int luaL_getsubtable (lua_State *L, int idx, const char *fname) { - lua_getfield(L, idx, fname); - if (lua_istable(L, -1)) - return 1; /* table already there */ - else - { - lua_pop(L, 1); /* remove previous result */ - idx = lua_absindex(L, idx); - lua_newtable(L); - lua_pushvalue(L, -1); /* copy to be left at top */ - lua_setfield(L, idx, fname); /* assign new table to field */ - return 0; /* false, because did not find table there */ - } + lua_getfield(L, idx, fname); + if (lua_istable(L, -1)) + return 1; /* table already there */ + else + { + lua_pop(L, 1); /* remove previous result */ + idx = lua_absindex(L, idx); + lua_newtable(L); + lua_pushvalue(L, -1); /* copy to be left at top */ + lua_setfield(L, idx, fname); /* assign new table to field */ + return 0; /* false, because did not find table there */ + } } void luaL_requiref (lua_State *L, const char *modname, lua_CFunction openf, int glb) { - lua_pushcfunction(L, openf); - lua_pushstring(L, modname); /* argument to open function */ - lua_call(L, 1, 1); /* open module */ - luaL_getsubtable(L, LUA_REGISTRYINDEX, LUA_LOADED_TABLE); - lua_pushvalue(L, -2); /* make copy of module (call result) */ - lua_setfield(L, -2, modname); /* _LOADED[modname] = module */ - lua_pop(L, 1); /* remove _LOADED table */ - if (glb) - { - lua_pushvalue(L, -1); /* copy of 'mod' */ - lua_setglobal(L, modname); /* _G[modname] = module */ - } + lua_pushcfunction(L, openf); + lua_pushstring(L, modname); /* argument to open function */ + lua_call(L, 1, 1); /* open module */ + luaL_getsubtable(L, LUA_REGISTRYINDEX, LUA_LOADED_TABLE); + lua_pushvalue(L, -2); /* make copy of module (call result) */ + lua_setfield(L, -2, modname); /* _LOADED[modname] = module */ + lua_pop(L, 1); /* remove _LOADED table */ + if (glb) + { + lua_pushvalue(L, -1); /* copy of 'mod' */ + lua_setglobal(L, modname); /* _G[modname] = module */ + } } #endif // LUA_VERSION_NUM @@ -47,49 +47,49 @@ void luaL_requiref (lua_State *L, const char *modname, lua_CFunction openf, int void* lua_newuserdatauv( lua_State* L, size_t sz, int nuvalue) { - ASSERT_L( nuvalue <= 1); - return lua_newuserdata( L, sz); + ASSERT_L( nuvalue <= 1); + return lua_newuserdata( L, sz); } int lua_getiuservalue( lua_State* L, int idx, int n) { - if( n > 1) - { - lua_pushnil( L); - return LUA_TNONE; - } - lua_getuservalue( L, idx); + if( n > 1) + { + lua_pushnil( L); + return LUA_TNONE; + } + lua_getuservalue( L, idx); #if LUA_VERSION_NUM == 501 - /* default environment is not a nil (see lua_getfenv) */ - lua_getglobal(L, "package"); - if (lua_rawequal(L, -2, -1) || lua_rawequal(L, -2, LUA_GLOBALSINDEX)) - { - lua_pop(L, 2); - lua_pushnil( L); + /* default environment is not a nil (see lua_getfenv) */ + lua_getglobal(L, "package"); + if (lua_rawequal(L, -2, -1) || lua_rawequal(L, -2, LUA_GLOBALSINDEX)) + { + lua_pop(L, 2); + lua_pushnil( L); - return LUA_TNONE; - } - lua_pop(L, 1); /* remove package */ + return LUA_TNONE; + } + lua_pop(L, 1); /* remove package */ #endif - return lua_type( L, -1); + return lua_type( L, -1); } int lua_setiuservalue( lua_State* L, int idx, int n) { - if( n > 1 + if( n > 1 #if LUA_VERSION_NUM == 501 - || lua_type( L, -1) != LUA_TTABLE + || lua_type( L, -1) != LUA_TTABLE #endif - ) - { - lua_pop( L, 1); - return 0; - } + ) + { + lua_pop( L, 1); + return 0; + } - (void) lua_setuservalue( L, idx); - return 1; // I guess anything non-0 is ok + (void) lua_setuservalue( L, idx); + return 1; // I guess anything non-0 is ok } #endif // LUA_VERSION_NUM diff --git a/src/deep.c b/src/deep.c index 3c7680d..c475dc5 100644 --- a/src/deep.c +++ b/src/deep.c @@ -73,17 +73,17 @@ static DECLARE_CONST_UNIQUE_KEY( DEEP_PROXY_CACHE_KEY, 0x05773d6fc26be106); */ static void set_deep_lookup( lua_State* L) { - STACK_GROW( L, 3); - STACK_CHECK( L, 2); // a b - push_registry_subtable( L, DEEP_LOOKUP_KEY); // a b {} - STACK_MID( L, 3); - lua_insert( L, -3); // {} a b - lua_pushvalue( L, -1); // {} a b b - lua_pushvalue( L,-3); // {} a b b a - lua_rawset( L, -5); // {} a b - lua_rawset( L, -3); // {} - lua_pop( L, 1); // - STACK_END( L, 0); + STACK_GROW( L, 3); + STACK_CHECK( L, 2); // a b + push_registry_subtable( L, DEEP_LOOKUP_KEY); // a b {} + STACK_MID( L, 3); + lua_insert( L, -3); // {} a b + lua_pushvalue( L, -1); // {} a b b + lua_pushvalue( L,-3); // {} a b b a + lua_rawset( L, -5); // {} a b + lua_rawset( L, -3); // {} + lua_pop( L, 1); // + STACK_END( L, 0); } /* @@ -92,16 +92,16 @@ static void set_deep_lookup( lua_State* L) */ static void get_deep_lookup( lua_State* L) { - STACK_GROW( L, 1); - STACK_CHECK( L, 1); // a - REGISTRY_GET( L, DEEP_LOOKUP_KEY); // a {} - if( !lua_isnil( L, -1)) - { - lua_insert( L, -2); // {} a - lua_rawget( L, -2); // {} b - } - lua_remove( L, -2); // a|b - STACK_END( L, 1); + STACK_GROW( L, 1); + STACK_CHECK( L, 1); // a + REGISTRY_GET( L, DEEP_LOOKUP_KEY); // a {} + if( !lua_isnil( L, -1)) + { + lua_insert( L, -2); // {} a + lua_rawget( L, -2); // {} b + } + lua_remove( L, -2); // a|b + STACK_END( L, 1); } /* @@ -110,44 +110,44 @@ static void get_deep_lookup( lua_State* L) */ static inline luaG_IdFunction get_idfunc( lua_State* L, int index, LookupMode mode_) { - // when looking inside a keeper, we are 100% sure the object is a deep userdata - if( mode_ == eLM_FromKeeper) - { - DeepPrelude** proxy = (DeepPrelude**) lua_touserdata( L, index); - // we can (and must) cast and fetch the internally stored idfunc - return (*proxy)->idfunc; - } - else - { - // essentially we are making sure that the metatable of the object we want to copy is stored in our metatable/idfunc database - // it is the only way to ensure that the userdata is indeed a deep userdata! - // of course, we could just trust the caller, but we won't - luaG_IdFunction ret; - STACK_GROW( L, 1); - STACK_CHECK( L, 0); - - if( !lua_getmetatable( L, index)) // deep ... metatable? - { - return NULL; // no metatable: can't be a deep userdata object! - } - - // replace metatable with the idfunc pointer, if it is actually a deep userdata - get_deep_lookup( L); // deep ... idfunc|nil - - ret = (luaG_IdFunction) lua_touserdata( L, -1); // NULL if not a userdata - lua_pop( L, 1); - STACK_END( L, 0); - return ret; - } + // when looking inside a keeper, we are 100% sure the object is a deep userdata + if( mode_ == eLM_FromKeeper) + { + DeepPrelude** proxy = (DeepPrelude**) lua_touserdata( L, index); + // we can (and must) cast and fetch the internally stored idfunc + return (*proxy)->idfunc; + } + else + { + // essentially we are making sure that the metatable of the object we want to copy is stored in our metatable/idfunc database + // it is the only way to ensure that the userdata is indeed a deep userdata! + // of course, we could just trust the caller, but we won't + luaG_IdFunction ret; + STACK_GROW( L, 1); + STACK_CHECK( L, 0); + + if( !lua_getmetatable( L, index)) // deep ... metatable? + { + return NULL; // no metatable: can't be a deep userdata object! + } + + // replace metatable with the idfunc pointer, if it is actually a deep userdata + get_deep_lookup( L); // deep ... idfunc|nil + + ret = (luaG_IdFunction) lua_touserdata( L, -1); // NULL if not a userdata + lua_pop( L, 1); + STACK_END( L, 0); + return ret; + } } void free_deep_prelude( lua_State* L, DeepPrelude* prelude_) { - // Call 'idfunc( "delete", deep_ptr )' to make deep cleanup - lua_pushlightuserdata( L, prelude_); - ASSERT_L( prelude_->idfunc); - prelude_->idfunc( L, eDO_delete); + // Call 'idfunc( "delete", deep_ptr )' to make deep cleanup + lua_pushlightuserdata( L, prelude_); + ASSERT_L( prelude_->idfunc); + prelude_->idfunc( L, eDO_delete); } @@ -159,38 +159,38 @@ void free_deep_prelude( lua_State* L, DeepPrelude* prelude_) */ static int deep_userdata_gc( lua_State* L) { - DeepPrelude** proxy = (DeepPrelude**) lua_touserdata( L, 1); - DeepPrelude* p = *proxy; - Universe* U = universe_get( L); - int v; - - // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded - // in that case, we are not multithreaded and locking isn't necessary anyway - if( U) MUTEX_LOCK( &U->deep_lock); - v = -- (p->refcount); - if (U) MUTEX_UNLOCK( &U->deep_lock); - - if( v == 0) - { - // retrieve wrapped __gc - lua_pushvalue( L, lua_upvalueindex( 1)); // self __gc? - if( !lua_isnil( L, -1)) - { - lua_insert( L, -2); // __gc self - lua_call( L, 1, 0); // - } - // 'idfunc' expects a clean stack to work on - lua_settop( L, 0); - free_deep_prelude( L, p); - - // top was set to 0, then userdata was pushed. "delete" might want to pop the userdata (we don't care), but should not push anything! - if ( lua_gettop( L) > 1) - { - luaL_error( L, "Bad idfunc(eDO_delete): should not push anything"); - } - } - *proxy = NULL; // make sure we don't use it any more, just in case - return 0; + DeepPrelude** proxy = (DeepPrelude**) lua_touserdata( L, 1); + DeepPrelude* p = *proxy; + Universe* U = universe_get( L); + int v; + + // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded + // in that case, we are not multithreaded and locking isn't necessary anyway + if( U) MUTEX_LOCK( &U->deep_lock); + v = -- (p->refcount); + if (U) MUTEX_UNLOCK( &U->deep_lock); + + if( v == 0) + { + // retrieve wrapped __gc + lua_pushvalue( L, lua_upvalueindex( 1)); // self __gc? + if( !lua_isnil( L, -1)) + { + lua_insert( L, -2); // __gc self + lua_call( L, 1, 0); // + } + // 'idfunc' expects a clean stack to work on + lua_settop( L, 0); + free_deep_prelude( L, p); + + // top was set to 0, then userdata was pushed. "delete" might want to pop the userdata (we don't care), but should not push anything! + if ( lua_gettop( L) > 1) + { + luaL_error( L, "Bad idfunc(eDO_delete): should not push anything"); + } + } + *proxy = NULL; // make sure we don't use it any more, just in case + return 0; } @@ -205,155 +205,155 @@ static int deep_userdata_gc( lua_State* L) */ char const* push_deep_proxy( Universe* U, lua_State* L, DeepPrelude* prelude, int nuv_, LookupMode mode_) { - DeepPrelude** proxy; - - // Check if a proxy already exists - push_registry_subtable_mode( L, DEEP_PROXY_CACHE_KEY, "v"); // DPC - lua_pushlightuserdata( L, prelude); // DPC deep - lua_rawget( L, -2); // DPC proxy - if ( !lua_isnil( L, -1)) - { - lua_remove( L, -2); // proxy - return NULL; - } - else - { - lua_pop( L, 1); // DPC - } - - // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded - // in that case, we are not multithreaded and locking isn't necessary anyway - if( U) MUTEX_LOCK( &U->deep_lock); - ++ (prelude->refcount); // one more proxy pointing to this deep data - if( U) MUTEX_UNLOCK( &U->deep_lock); - - STACK_GROW( L, 7); - STACK_CHECK( L, 0); - - // a new full userdata, fitted with the specified number of uservalue slots (always 1 for Lua < 5.4) - proxy = lua_newuserdatauv( L, sizeof(DeepPrelude*), nuv_); // DPC proxy - ASSERT_L( proxy); - *proxy = prelude; - - // Get/create metatable for 'idfunc' (in this state) - lua_pushlightuserdata( L, (void*)(ptrdiff_t)(prelude->idfunc)); // DPC proxy idfunc - get_deep_lookup( L); // DPC proxy metatable? - - if( lua_isnil( L, -1)) // // No metatable yet. - { - char const* modname; - int oldtop = lua_gettop( L); // DPC proxy nil - lua_pop( L, 1); // DPC proxy - // 1 - make one and register it - if( mode_ != eLM_ToKeeper) - { - (void) prelude->idfunc( L, eDO_metatable); // DPC proxy metatable - if( lua_gettop( L) - oldtop != 0 || !lua_istable( L, -1)) - { - lua_settop( L, oldtop); // DPC proxy X - lua_pop( L, 3); // - return "Bad idfunc(eOP_metatable): unexpected pushed value"; - } - // if the metatable contains a __gc, we will call it from our own - lua_getfield( L, -1, "__gc"); // DPC proxy metatable __gc - } - else - { - // keepers need a minimal metatable that only contains our own __gc - lua_newtable( L); // DPC proxy metatable - lua_pushnil( L); // DPC proxy metatable nil - } - if( lua_isnil( L, -1)) - { - // Add our own '__gc' method - lua_pop( L, 1); // DPC proxy metatable - lua_pushcfunction( L, deep_userdata_gc); // DPC proxy metatable deep_userdata_gc - } - else - { - // Add our own '__gc' method wrapping the original - lua_pushcclosure( L, deep_userdata_gc, 1); // DPC proxy metatable deep_userdata_gc - } - lua_setfield( L, -2, "__gc"); // DPC proxy metatable - - // Memorize for later rounds - lua_pushvalue( L, -1); // DPC proxy metatable metatable - lua_pushlightuserdata( L, (void*)(ptrdiff_t)(prelude->idfunc)); // DPC proxy metatable metatable idfunc - set_deep_lookup( L); // DPC proxy metatable - - // 2 - cause the target state to require the module that exported the idfunc - // this is needed because we must make sure the shared library is still loaded as long as we hold a pointer on the idfunc - { - int oldtop_module = lua_gettop( L); - modname = (char const*) prelude->idfunc( L, eDO_module); // DPC proxy metatable - // make sure the function pushed nothing on the stack! - if( lua_gettop( L) - oldtop_module != 0) - { - lua_pop( L, 3); // - return "Bad idfunc(eOP_module): should not push anything"; - } - } - if( NULL != modname) // we actually got a module name - { - // L.registry._LOADED exists without having registered the 'package' library. - lua_getglobal( L, "require"); // DPC proxy metatable require() - // check that the module is already loaded (or being loaded, we are happy either way) - if( lua_isfunction( L, -1)) - { - lua_pushstring( L, modname); // DPC proxy metatable require() "module" - lua_getfield( L, LUA_REGISTRYINDEX, LUA_LOADED_TABLE); // DPC proxy metatable require() "module" _R._LOADED - if( lua_istable( L, -1)) - { - bool_t alreadyloaded; - lua_pushvalue( L, -2); // DPC proxy metatable require() "module" _R._LOADED "module" - lua_rawget( L, -2); // DPC proxy metatable require() "module" _R._LOADED module - alreadyloaded = lua_toboolean( L, -1); - if( !alreadyloaded) // not loaded - { - int require_result; - lua_pop( L, 2); // DPC proxy metatable require() "module" - // require "modname" - require_result = lua_pcall( L, 1, 0, 0); // DPC proxy metatable error? - if( require_result != LUA_OK) - { - // failed, return the error message - lua_pushfstring( L, "error while requiring '%s' identified by idfunc(eOP_module): ", modname); - lua_insert( L, -2); // DPC proxy metatable prefix error - lua_concat( L, 2); // DPC proxy metatable error - return lua_tostring( L, -1); - } - } - else // already loaded, we are happy - { - lua_pop( L, 4); // DPC proxy metatable - } - } - else // no L.registry._LOADED; can this ever happen? - { - lua_pop( L, 6); // - return "unexpected error while requiring a module identified by idfunc(eOP_module)"; - } - } - else // a module name, but no require() function :-( - { - lua_pop( L, 4); // - return "lanes receiving deep userdata should register the 'package' library"; - } - } - } - STACK_MID( L, 2); // DPC proxy metatable - ASSERT_L( lua_isuserdata( L, -2)); - ASSERT_L( lua_istable( L, -1)); - lua_setmetatable( L, -2); // DPC proxy - - // If we're here, we obviously had to create a new proxy, so cache it. - lua_pushlightuserdata( L, prelude); // DPC proxy deep - lua_pushvalue( L, -2); // DPC proxy deep proxy - lua_rawset( L, -4); // DPC proxy - lua_remove( L, -2); // proxy - ASSERT_L( lua_isuserdata( L, -1)); - STACK_END( L, 0); - return NULL; + DeepPrelude** proxy; + + // Check if a proxy already exists + push_registry_subtable_mode( L, DEEP_PROXY_CACHE_KEY, "v"); // DPC + lua_pushlightuserdata( L, prelude); // DPC deep + lua_rawget( L, -2); // DPC proxy + if ( !lua_isnil( L, -1)) + { + lua_remove( L, -2); // proxy + return NULL; + } + else + { + lua_pop( L, 1); // DPC + } + + // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded + // in that case, we are not multithreaded and locking isn't necessary anyway + if( U) MUTEX_LOCK( &U->deep_lock); + ++ (prelude->refcount); // one more proxy pointing to this deep data + if( U) MUTEX_UNLOCK( &U->deep_lock); + + STACK_GROW( L, 7); + STACK_CHECK( L, 0); + + // a new full userdata, fitted with the specified number of uservalue slots (always 1 for Lua < 5.4) + proxy = lua_newuserdatauv( L, sizeof(DeepPrelude*), nuv_); // DPC proxy + ASSERT_L( proxy); + *proxy = prelude; + + // Get/create metatable for 'idfunc' (in this state) + lua_pushlightuserdata( L, (void*)(ptrdiff_t)(prelude->idfunc)); // DPC proxy idfunc + get_deep_lookup( L); // DPC proxy metatable? + + if( lua_isnil( L, -1)) // // No metatable yet. + { + char const* modname; + int oldtop = lua_gettop( L); // DPC proxy nil + lua_pop( L, 1); // DPC proxy + // 1 - make one and register it + if( mode_ != eLM_ToKeeper) + { + (void) prelude->idfunc( L, eDO_metatable); // DPC proxy metatable + if( lua_gettop( L) - oldtop != 0 || !lua_istable( L, -1)) + { + lua_settop( L, oldtop); // DPC proxy X + lua_pop( L, 3); // + return "Bad idfunc(eOP_metatable): unexpected pushed value"; + } + // if the metatable contains a __gc, we will call it from our own + lua_getfield( L, -1, "__gc"); // DPC proxy metatable __gc + } + else + { + // keepers need a minimal metatable that only contains our own __gc + lua_newtable( L); // DPC proxy metatable + lua_pushnil( L); // DPC proxy metatable nil + } + if( lua_isnil( L, -1)) + { + // Add our own '__gc' method + lua_pop( L, 1); // DPC proxy metatable + lua_pushcfunction( L, deep_userdata_gc); // DPC proxy metatable deep_userdata_gc + } + else + { + // Add our own '__gc' method wrapping the original + lua_pushcclosure( L, deep_userdata_gc, 1); // DPC proxy metatable deep_userdata_gc + } + lua_setfield( L, -2, "__gc"); // DPC proxy metatable + + // Memorize for later rounds + lua_pushvalue( L, -1); // DPC proxy metatable metatable + lua_pushlightuserdata( L, (void*)(ptrdiff_t)(prelude->idfunc)); // DPC proxy metatable metatable idfunc + set_deep_lookup( L); // DPC proxy metatable + + // 2 - cause the target state to require the module that exported the idfunc + // this is needed because we must make sure the shared library is still loaded as long as we hold a pointer on the idfunc + { + int oldtop_module = lua_gettop( L); + modname = (char const*) prelude->idfunc( L, eDO_module); // DPC proxy metatable + // make sure the function pushed nothing on the stack! + if( lua_gettop( L) - oldtop_module != 0) + { + lua_pop( L, 3); // + return "Bad idfunc(eOP_module): should not push anything"; + } + } + if( NULL != modname) // we actually got a module name + { + // L.registry._LOADED exists without having registered the 'package' library. + lua_getglobal( L, "require"); // DPC proxy metatable require() + // check that the module is already loaded (or being loaded, we are happy either way) + if( lua_isfunction( L, -1)) + { + lua_pushstring( L, modname); // DPC proxy metatable require() "module" + lua_getfield( L, LUA_REGISTRYINDEX, LUA_LOADED_TABLE); // DPC proxy metatable require() "module" _R._LOADED + if( lua_istable( L, -1)) + { + bool_t alreadyloaded; + lua_pushvalue( L, -2); // DPC proxy metatable require() "module" _R._LOADED "module" + lua_rawget( L, -2); // DPC proxy metatable require() "module" _R._LOADED module + alreadyloaded = lua_toboolean( L, -1); + if( !alreadyloaded) // not loaded + { + int require_result; + lua_pop( L, 2); // DPC proxy metatable require() "module" + // require "modname" + require_result = lua_pcall( L, 1, 0, 0); // DPC proxy metatable error? + if( require_result != LUA_OK) + { + // failed, return the error message + lua_pushfstring( L, "error while requiring '%s' identified by idfunc(eOP_module): ", modname); + lua_insert( L, -2); // DPC proxy metatable prefix error + lua_concat( L, 2); // DPC proxy metatable error + return lua_tostring( L, -1); + } + } + else // already loaded, we are happy + { + lua_pop( L, 4); // DPC proxy metatable + } + } + else // no L.registry._LOADED; can this ever happen? + { + lua_pop( L, 6); // + return "unexpected error while requiring a module identified by idfunc(eOP_module)"; + } + } + else // a module name, but no require() function :-( + { + lua_pop( L, 4); // + return "lanes receiving deep userdata should register the 'package' library"; + } + } + } + STACK_MID( L, 2); // DPC proxy metatable + ASSERT_L( lua_isuserdata( L, -2)); + ASSERT_L( lua_istable( L, -1)); + lua_setmetatable( L, -2); // DPC proxy + + // If we're here, we obviously had to create a new proxy, so cache it. + lua_pushlightuserdata( L, prelude); // DPC proxy deep + lua_pushvalue( L, -2); // DPC proxy deep proxy + lua_rawset( L, -4); // DPC proxy + lua_remove( L, -2); // proxy + ASSERT_L( lua_isuserdata( L, -1)); + STACK_END( L, 0); + return NULL; } /* @@ -380,42 +380,42 @@ char const* push_deep_proxy( Universe* U, lua_State* L, DeepPrelude* prelude, in */ int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc, int nuv_) { - char const* errmsg; - - STACK_GROW( L, 1); - STACK_CHECK( L, 0); - { - int const oldtop = lua_gettop( L); - DeepPrelude* prelude = idfunc( L, eDO_new); - if( prelude == NULL) - { - luaL_error( L, "idfunc(eDO_new) failed to create deep userdata (out of memory)"); - } - if( prelude->magic.value != DEEP_VERSION.value) - { - // just in case, don't leak the newly allocated deep userdata object - lua_pushlightuserdata( L, prelude); - idfunc( L, eDO_delete); - return luaL_error( L, "Bad idfunc(eDO_new): DEEP_VERSION is incorrect, rebuild your implementation with the latest deep implementation"); - } - prelude->refcount = 0; // 'push_deep_proxy' will lift it to 1 - prelude->idfunc = idfunc; - - if( lua_gettop( L) - oldtop != 0) - { - // just in case, don't leak the newly allocated deep userdata object - lua_pushlightuserdata( L, prelude); - idfunc( L, eDO_delete); - return luaL_error( L, "Bad idfunc(eDO_new): should not push anything on the stack"); - } - errmsg = push_deep_proxy( universe_get( L), L, prelude, nuv_, eLM_LaneBody); // proxy - if( errmsg != NULL) - { - return luaL_error( L, errmsg); - } - } - STACK_END( L, 1); - return 1; + char const* errmsg; + + STACK_GROW( L, 1); + STACK_CHECK( L, 0); + { + int const oldtop = lua_gettop( L); + DeepPrelude* prelude = idfunc( L, eDO_new); + if( prelude == NULL) + { + luaL_error( L, "idfunc(eDO_new) failed to create deep userdata (out of memory)"); + } + if( prelude->magic.value != DEEP_VERSION.value) + { + // just in case, don't leak the newly allocated deep userdata object + lua_pushlightuserdata( L, prelude); + idfunc( L, eDO_delete); + return luaL_error( L, "Bad idfunc(eDO_new): DEEP_VERSION is incorrect, rebuild your implementation with the latest deep implementation"); + } + prelude->refcount = 0; // 'push_deep_proxy' will lift it to 1 + prelude->idfunc = idfunc; + + if( lua_gettop( L) - oldtop != 0) + { + // just in case, don't leak the newly allocated deep userdata object + lua_pushlightuserdata( L, prelude); + idfunc( L, eDO_delete); + return luaL_error( L, "Bad idfunc(eDO_new): should not push anything on the stack"); + } + errmsg = push_deep_proxy( universe_get( L), L, prelude, nuv_, eLM_LaneBody); // proxy + if( errmsg != NULL) + { + return luaL_error( L, errmsg); + } + } + STACK_END( L, 1); + return 1; } @@ -427,19 +427,19 @@ int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc, int nuv_) */ void* luaG_todeep( lua_State* L, luaG_IdFunction idfunc, int index) { - DeepPrelude** proxy; + DeepPrelude** proxy; - STACK_CHECK( L, 0); - // ensure it is actually a deep userdata - if( get_idfunc( L, index, eLM_LaneBody) != idfunc) - { - return NULL; // no metatable, or wrong kind - } + STACK_CHECK( L, 0); + // ensure it is actually a deep userdata + if( get_idfunc( L, index, eLM_LaneBody) != idfunc) + { + return NULL; // no metatable, or wrong kind + } - proxy = (DeepPrelude**) lua_touserdata( L, index); - STACK_END( L, 0); + proxy = (DeepPrelude**) lua_touserdata( L, index); + STACK_END( L, 0); - return *proxy; + return *proxy; } @@ -452,50 +452,50 @@ void* luaG_todeep( lua_State* L, luaG_IdFunction idfunc, int index) */ bool_t copydeep( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) { - char const* errmsg; - luaG_IdFunction idfunc = get_idfunc( L, i, mode_); - int nuv = 0; - - if( idfunc == NULL) - { - return FALSE; // not a deep userdata - } - - STACK_CHECK( L, 0); - STACK_CHECK( L2, 0); - - // extract all uservalues of the source - while( lua_getiuservalue( L, i, nuv + 1) != LUA_TNONE) // ... u [uv]* nil - { - ++ nuv; - } - // last call returned TNONE and pushed nil, that we don't need - lua_pop( L, 1); // ... u [uv]* - STACK_MID( L, nuv); - - errmsg = push_deep_proxy( U, L2, *(DeepPrelude**) lua_touserdata( L, i), nuv, mode_); // u - - // transfer all uservalues of the source in the destination - { - int const clone_i = lua_gettop( L2); - while( nuv) - { - inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), VT_NORMAL, mode_, upName_); // u uv - lua_pop( L, 1); // ... u [uv]* - // this pops the value from the stack - lua_setiuservalue( L2, clone_i, nuv); // u - -- nuv; - } - } - - STACK_END( L2, 1); - STACK_END( L, 0); - - if( errmsg != NULL) - { - // raise the error in the proper state (not the keeper) - lua_State* errL = (mode_ == eLM_FromKeeper) ? L2 : L; - luaL_error( errL, errmsg); - } - return TRUE; + char const* errmsg; + luaG_IdFunction idfunc = get_idfunc( L, i, mode_); + int nuv = 0; + + if( idfunc == NULL) + { + return FALSE; // not a deep userdata + } + + STACK_CHECK( L, 0); + STACK_CHECK( L2, 0); + + // extract all uservalues of the source + while( lua_getiuservalue( L, i, nuv + 1) != LUA_TNONE) // ... u [uv]* nil + { + ++ nuv; + } + // last call returned TNONE and pushed nil, that we don't need + lua_pop( L, 1); // ... u [uv]* + STACK_MID( L, nuv); + + errmsg = push_deep_proxy( U, L2, *(DeepPrelude**) lua_touserdata( L, i), nuv, mode_); // u + + // transfer all uservalues of the source in the destination + { + int const clone_i = lua_gettop( L2); + while( nuv) + { + inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), VT_NORMAL, mode_, upName_); // u uv + lua_pop( L, 1); // ... u [uv]* + // this pops the value from the stack + lua_setiuservalue( L2, clone_i, nuv); // u + -- nuv; + } + } + + STACK_END( L2, 1); + STACK_END( L, 0); + + if( errmsg != NULL) + { + // raise the error in the proper state (not the keeper) + lua_State* errL = (mode_ == eLM_FromKeeper) ? L2 : L; + luaL_error( errL, errmsg); + } + return TRUE; } \ No newline at end of file diff --git a/src/deep.h b/src/deep.h index 35c8bd4..9b00b70 100644 --- a/src/deep.h +++ b/src/deep.h @@ -24,18 +24,18 @@ typedef struct s_Universe Universe; enum eLookupMode { - eLM_LaneBody, // send the lane body directly from the source to the destination lane - eLM_ToKeeper, // send a function from a lane to a keeper state - eLM_FromKeeper // send a function from a keeper state to a lane + eLM_LaneBody, // send the lane body directly from the source to the destination lane + eLM_ToKeeper, // send a function from a lane to a keeper state + eLM_FromKeeper // send a function from a keeper state to a lane }; typedef enum eLookupMode LookupMode; enum eDeepOp { - eDO_new, - eDO_delete, - eDO_metatable, - eDO_module, + eDO_new, + eDO_delete, + eDO_metatable, + eDO_module, }; typedef enum eDeepOp DeepOp; @@ -49,11 +49,11 @@ static DECLARE_CONST_UNIQUE_KEY( DEEP_VERSION, 0xB4B0119C10642B29); // should be used as header for full userdata struct s_DeepPrelude { - DECLARE_UNIQUE_KEY( magic); // must be filled by the Deep userdata idfunc that allocates it on eDO_new operation - // when stored in a keeper state, the full userdata doesn't have a metatable, so we need direct access to the idfunc - luaG_IdFunction idfunc; - // data is destroyed when refcount is 0 - volatile int refcount; + DECLARE_UNIQUE_KEY( magic); // must be filled by the Deep userdata idfunc that allocates it on eDO_new operation + // when stored in a keeper state, the full userdata doesn't have a metatable, so we need direct access to the idfunc + luaG_IdFunction idfunc; + // data is destroyed when refcount is 0 + volatile int refcount; }; typedef struct s_DeepPrelude DeepPrelude; diff --git a/src/keeper.c b/src/keeper.c index c777866..eea017f 100644 --- a/src/keeper.c +++ b/src/keeper.c @@ -61,9 +61,9 @@ typedef struct { - lua_Integer first; - lua_Integer count; - lua_Integer limit; + lua_Integer first; + lua_Integer count; + lua_Integer limit; } keeper_fifo; static int const CONTENTS_TABLE = 1; @@ -71,47 +71,47 @@ static int const CONTENTS_TABLE = 1; // replaces the fifo ud by its uservalue on the stack static keeper_fifo* prepare_fifo_access( lua_State* L, int idx_) { - keeper_fifo* fifo = (keeper_fifo*) lua_touserdata( L, idx_); - if( fifo != NULL) - { - idx_ = lua_absindex( L, idx_); - STACK_GROW( L, 1); - // we can replace the fifo userdata in the stack without fear of it being GCed, there are other references around - lua_getiuservalue( L, idx_, CONTENTS_TABLE); - lua_replace( L, idx_); - } - return fifo; + keeper_fifo* fifo = (keeper_fifo*) lua_touserdata( L, idx_); + if( fifo != NULL) + { + idx_ = lua_absindex( L, idx_); + STACK_GROW( L, 1); + // we can replace the fifo userdata in the stack without fear of it being GCed, there are other references around + lua_getiuservalue( L, idx_, CONTENTS_TABLE); + lua_replace( L, idx_); + } + return fifo; } // in: nothing // out: { first = 1, count = 0, limit = -1} static void fifo_new( lua_State* L) { - keeper_fifo* fifo; - STACK_GROW( L, 2); - // a fifo full userdata has one uservalue, the table that holds the actual fifo contents - fifo = (keeper_fifo*)lua_newuserdatauv( L, sizeof( keeper_fifo), 1); - fifo->first = 1; - fifo->count = 0; - fifo->limit = -1; - lua_newtable( L); - lua_setiuservalue( L, -2, CONTENTS_TABLE); + keeper_fifo* fifo; + STACK_GROW( L, 2); + // a fifo full userdata has one uservalue, the table that holds the actual fifo contents + fifo = (keeper_fifo*)lua_newuserdatauv( L, sizeof( keeper_fifo), 1); + fifo->first = 1; + fifo->count = 0; + fifo->limit = -1; + lua_newtable( L); + lua_setiuservalue( L, -2, CONTENTS_TABLE); } // in: expect fifo ... on top of the stack // out: nothing, removes all pushed values from the stack static void fifo_push( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) { - int const idx = lua_gettop( L) - (int) count_; - lua_Integer start = fifo_->first + fifo_->count - 1; - lua_Integer i; - // pop all additional arguments, storing them in the fifo - for( i = count_; i >= 1; -- i) - { - // store in the fifo the value at the top of the stack at the specified index, popping it from the stack - lua_rawseti( L, idx, (int)(start + i)); - } - fifo_->count += count_; + int const idx = lua_gettop( L) - (int) count_; + lua_Integer start = fifo_->first + fifo_->count - 1; + lua_Integer i; + // pop all additional arguments, storing them in the fifo + for( i = count_; i >= 1; -- i) + { + // store in the fifo the value at the top of the stack at the specified index, popping it from the stack + lua_rawseti( L, idx, (int)(start + i)); + } + fifo_->count += count_; } // in: fifo @@ -121,46 +121,46 @@ static void fifo_push( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) // function assumes that there is enough data in the fifo to satisfy the request static void fifo_peek( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) { - lua_Integer i; - STACK_GROW( L, count_); - for( i = 0; i < count_; ++ i) - { - lua_rawgeti( L, 1, (int)( fifo_->first + i)); - } + lua_Integer i; + STACK_GROW( L, count_); + for( i = 0; i < count_; ++ i) + { + lua_rawgeti( L, 1, (int)( fifo_->first + i)); + } } // in: fifo // out: remove the fifo from the stack, push as many items as required on the stack (function assumes they exist in sufficient number) static void fifo_pop( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) { - int const fifo_idx = lua_gettop( L); // ... fifo - int i; - // each iteration pushes a value on the stack! - STACK_GROW( L, count_ + 2); - // skip first item, we will push it last - for( i = 1; i < count_; ++ i) - { - int const at = (int)( fifo_->first + i); - // push item on the stack - lua_rawgeti( L, fifo_idx, at); // ... fifo val - // remove item from the fifo - lua_pushnil( L); // ... fifo val nil - lua_rawseti( L, fifo_idx, at); // ... fifo val - } - // now process first item - { - int const at = (int)( fifo_->first); - lua_rawgeti( L, fifo_idx, at); // ... fifo vals val - lua_pushnil( L); // ... fifo vals val nil - lua_rawseti( L, fifo_idx, at); // ... fifo vals val - lua_replace( L, fifo_idx); // ... vals - } - { - // avoid ever-growing indexes by resetting each time we detect the fifo is empty - lua_Integer const new_count = fifo_->count - count_; - fifo_->first = (new_count == 0) ? 1 : (fifo_->first + count_); - fifo_->count = new_count; - } + int const fifo_idx = lua_gettop( L); // ... fifo + int i; + // each iteration pushes a value on the stack! + STACK_GROW( L, count_ + 2); + // skip first item, we will push it last + for( i = 1; i < count_; ++ i) + { + int const at = (int)( fifo_->first + i); + // push item on the stack + lua_rawgeti( L, fifo_idx, at); // ... fifo val + // remove item from the fifo + lua_pushnil( L); // ... fifo val nil + lua_rawseti( L, fifo_idx, at); // ... fifo val + } + // now process first item + { + int const at = (int)( fifo_->first); + lua_rawgeti( L, fifo_idx, at); // ... fifo vals val + lua_pushnil( L); // ... fifo vals val nil + lua_rawseti( L, fifo_idx, at); // ... fifo vals val + lua_replace( L, fifo_idx); // ... vals + } + { + // avoid ever-growing indexes by resetting each time we detect the fifo is empty + lua_Integer const new_count = fifo_->count - count_; + fifo_->first = (new_count == 0) ? 1 : (fifo_->first + count_); + fifo_->count = new_count; + } } // in: linda_ud expected at *absolute* stack slot idx @@ -169,87 +169,87 @@ static void fifo_pop( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) static DECLARE_CONST_UNIQUE_KEY( FIFOS_KEY, 0xdce50bbc351cd465); static void push_table( lua_State* L, int idx_) { - STACK_GROW( L, 4); - STACK_CHECK( L, 0); - idx_ = lua_absindex( L, idx_); - REGISTRY_GET( L, FIFOS_KEY); // ud fifos - lua_pushvalue( L, idx_); // ud fifos ud - lua_rawget( L, -2); // ud fifos fifos[ud] - STACK_MID( L, 2); - if( lua_isnil( L, -1)) - { - lua_pop( L, 1); // ud fifos - // add a new fifos table for this linda - lua_newtable( L); // ud fifos fifos[ud] - lua_pushvalue( L, idx_); // ud fifos fifos[ud] ud - lua_pushvalue( L, -2); // ud fifos fifos[ud] ud fifos[ud] - lua_rawset( L, -4); // ud fifos fifos[ud] - } - lua_remove( L, -2); // ud fifos[ud] - STACK_END( L, 1); + STACK_GROW( L, 4); + STACK_CHECK( L, 0); + idx_ = lua_absindex( L, idx_); + REGISTRY_GET( L, FIFOS_KEY); // ud fifos + lua_pushvalue( L, idx_); // ud fifos ud + lua_rawget( L, -2); // ud fifos fifos[ud] + STACK_MID( L, 2); + if( lua_isnil( L, -1)) + { + lua_pop( L, 1); // ud fifos + // add a new fifos table for this linda + lua_newtable( L); // ud fifos fifos[ud] + lua_pushvalue( L, idx_); // ud fifos fifos[ud] ud + lua_pushvalue( L, -2); // ud fifos fifos[ud] ud fifos[ud] + lua_rawset( L, -4); // ud fifos fifos[ud] + } + lua_remove( L, -2); // ud fifos[ud] + STACK_END( L, 1); } int keeper_push_linda_storage( Universe* U, lua_State* L, void* ptr_, ptrdiff_t magic_) { - Keeper* const K = which_keeper( U->keepers, magic_); - lua_State* const KL = K ? K->L : NULL; - if( KL == NULL) return 0; - STACK_GROW( KL, 4); - STACK_CHECK( KL, 0); - REGISTRY_GET( KL, FIFOS_KEY); // fifos - lua_pushlightuserdata( KL, ptr_); // fifos ud - lua_rawget( KL, -2); // fifos storage - lua_remove( KL, -2); // storage - if( !lua_istable( KL, -1)) - { - lua_pop( KL, 1); // - STACK_MID( KL, 0); - return 0; - } - // move data from keeper to destination state KEEPER MAIN - lua_pushnil( KL); // storage nil - STACK_GROW( L, 5); - STACK_CHECK( L, 0); - lua_newtable( L); // out - while( lua_next( KL, -2)) // storage key fifo - { - keeper_fifo* fifo = prepare_fifo_access( KL, -1); // storage key fifo - lua_pushvalue( KL, -2); // storage key fifo key - luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key fifo // out key - STACK_MID( L, 2); - lua_newtable( L); // out key keyout - luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key // out key keyout fifo - lua_pushinteger( L, fifo->first); // out key keyout fifo first - STACK_MID( L, 5); - lua_setfield( L, -3, "first"); // out key keyout fifo - lua_pushinteger( L, fifo->count); // out key keyout fifo count - STACK_MID( L, 5); - lua_setfield( L, -3, "count"); // out key keyout fifo - lua_pushinteger( L, fifo->limit); // out key keyout fifo limit - STACK_MID( L, 5); - lua_setfield( L, -3, "limit"); // out key keyout fifo - lua_setfield( L, -2, "fifo"); // out key keyout - lua_rawset( L, -3); // out - STACK_MID( L, 1); - } - STACK_END( L, 1); - lua_pop( KL, 1); // - STACK_END( KL, 0); - return 1; + Keeper* const K = which_keeper( U->keepers, magic_); + lua_State* const KL = K ? K->L : NULL; + if( KL == NULL) return 0; + STACK_GROW( KL, 4); + STACK_CHECK( KL, 0); + REGISTRY_GET( KL, FIFOS_KEY); // fifos + lua_pushlightuserdata( KL, ptr_); // fifos ud + lua_rawget( KL, -2); // fifos storage + lua_remove( KL, -2); // storage + if( !lua_istable( KL, -1)) + { + lua_pop( KL, 1); // + STACK_MID( KL, 0); + return 0; + } + // move data from keeper to destination state KEEPER MAIN + lua_pushnil( KL); // storage nil + STACK_GROW( L, 5); + STACK_CHECK( L, 0); + lua_newtable( L); // out + while( lua_next( KL, -2)) // storage key fifo + { + keeper_fifo* fifo = prepare_fifo_access( KL, -1); // storage key fifo + lua_pushvalue( KL, -2); // storage key fifo key + luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key fifo // out key + STACK_MID( L, 2); + lua_newtable( L); // out key keyout + luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key // out key keyout fifo + lua_pushinteger( L, fifo->first); // out key keyout fifo first + STACK_MID( L, 5); + lua_setfield( L, -3, "first"); // out key keyout fifo + lua_pushinteger( L, fifo->count); // out key keyout fifo count + STACK_MID( L, 5); + lua_setfield( L, -3, "count"); // out key keyout fifo + lua_pushinteger( L, fifo->limit); // out key keyout fifo limit + STACK_MID( L, 5); + lua_setfield( L, -3, "limit"); // out key keyout fifo + lua_setfield( L, -2, "fifo"); // out key keyout + lua_rawset( L, -3); // out + STACK_MID( L, 1); + } + STACK_END( L, 1); + lua_pop( KL, 1); // + STACK_END( KL, 0); + return 1; } // in: linda_ud int keepercall_clear( lua_State* L) { - STACK_GROW( L, 3); - STACK_CHECK( L, 0); - REGISTRY_GET( L, FIFOS_KEY); // ud fifos - lua_pushvalue( L, 1); // ud fifos ud - lua_pushnil( L); // ud fifos ud nil - lua_rawset( L, -3); // ud fifos - lua_pop( L, 1); // ud - STACK_END( L, 0); - return 0; + STACK_GROW( L, 3); + STACK_CHECK( L, 0); + REGISTRY_GET( L, FIFOS_KEY); // ud fifos + lua_pushvalue( L, 1); // ud fifos ud + lua_pushnil( L); // ud fifos ud nil + lua_rawset( L, -3); // ud fifos + lua_pop( L, 1); // ud + STACK_END( L, 0); + return 0; } @@ -257,311 +257,311 @@ int keepercall_clear( lua_State* L) // out: true|false int keepercall_send( lua_State* L) { - keeper_fifo* fifo; - int n = lua_gettop( L) - 2; - push_table( L, 1); // ud key ... fifos - // get the fifo associated to this key in this linda, create it if it doesn't exist - lua_pushvalue( L, 2); // ud key ... fifos key - lua_rawget( L, -2); // ud key ... fifos fifo - if( lua_isnil( L, -1)) - { - lua_pop( L, 1); // ud key ... fifos - fifo_new( L); // ud key ... fifos fifo - lua_pushvalue( L, 2); // ud key ... fifos fifo key - lua_pushvalue( L, -2); // ud key ... fifos fifo key fifo - lua_rawset( L, -4); // ud key ... fifos fifo - } - lua_remove( L, -2); // ud key ... fifo - fifo = (keeper_fifo*) lua_touserdata( L, -1); - if( fifo->limit >= 0 && fifo->count + n > fifo->limit) - { - lua_settop( L, 0); // - lua_pushboolean( L, 0); // false - } - else - { - fifo = prepare_fifo_access( L, -1); - lua_replace( L, 2); // ud fifo ... - fifo_push( L, fifo, n); // ud fifo - lua_settop( L, 0); // - lua_pushboolean( L, 1); // true - } - return 1; + keeper_fifo* fifo; + int n = lua_gettop( L) - 2; + push_table( L, 1); // ud key ... fifos + // get the fifo associated to this key in this linda, create it if it doesn't exist + lua_pushvalue( L, 2); // ud key ... fifos key + lua_rawget( L, -2); // ud key ... fifos fifo + if( lua_isnil( L, -1)) + { + lua_pop( L, 1); // ud key ... fifos + fifo_new( L); // ud key ... fifos fifo + lua_pushvalue( L, 2); // ud key ... fifos fifo key + lua_pushvalue( L, -2); // ud key ... fifos fifo key fifo + lua_rawset( L, -4); // ud key ... fifos fifo + } + lua_remove( L, -2); // ud key ... fifo + fifo = (keeper_fifo*) lua_touserdata( L, -1); + if( fifo->limit >= 0 && fifo->count + n > fifo->limit) + { + lua_settop( L, 0); // + lua_pushboolean( L, 0); // false + } + else + { + fifo = prepare_fifo_access( L, -1); + lua_replace( L, 2); // ud fifo ... + fifo_push( L, fifo, n); // ud fifo + lua_settop( L, 0); // + lua_pushboolean( L, 1); // true + } + return 1; } // in: linda_ud, key [, key]? // out: (key, val) or nothing int keepercall_receive( lua_State* L) { - int top = lua_gettop( L); - int i; - push_table( L, 1); // ud keys fifos - lua_replace( L, 1); // fifos keys - for( i = 2; i <= top; ++ i) - { - keeper_fifo* fifo; - lua_pushvalue( L, i); // fifos keys key[i] - lua_rawget( L, 1); // fifos keys fifo - fifo = prepare_fifo_access( L, -1); // fifos keys fifo - if( fifo != NULL && fifo->count > 0) - { - fifo_pop( L, fifo, 1); // fifos keys val - if( !lua_isnil( L, -1)) - { - lua_replace( L, 1); // val keys - lua_settop( L, i); // val keys key[i] - if( i != 2) - { - lua_replace( L, 2); // val key keys - lua_settop( L, 2); // val key - } - lua_insert( L, 1); // key, val - return 2; - } - } - lua_settop( L, top); // data keys - } - // nothing to receive - return 0; + int top = lua_gettop( L); + int i; + push_table( L, 1); // ud keys fifos + lua_replace( L, 1); // fifos keys + for( i = 2; i <= top; ++ i) + { + keeper_fifo* fifo; + lua_pushvalue( L, i); // fifos keys key[i] + lua_rawget( L, 1); // fifos keys fifo + fifo = prepare_fifo_access( L, -1); // fifos keys fifo + if( fifo != NULL && fifo->count > 0) + { + fifo_pop( L, fifo, 1); // fifos keys val + if( !lua_isnil( L, -1)) + { + lua_replace( L, 1); // val keys + lua_settop( L, i); // val keys key[i] + if( i != 2) + { + lua_replace( L, 2); // val key keys + lua_settop( L, 2); // val key + } + lua_insert( L, 1); // key, val + return 2; + } + } + lua_settop( L, top); // data keys + } + // nothing to receive + return 0; } //in: linda_ud key mincount [maxcount] int keepercall_receive_batched( lua_State* L) { - lua_Integer const min_count = lua_tointeger( L, 3); - if( min_count > 0) - { - keeper_fifo* fifo; - lua_Integer const max_count = luaL_optinteger( L, 4, min_count); - lua_settop( L, 2); // ud key - lua_insert( L, 1); // key ud - push_table( L, 2); // key ud fifos - lua_remove( L, 2); // key fifos - lua_pushvalue( L, 1); // key fifos key - lua_rawget( L, 2); // key fifos fifo - lua_remove( L, 2); // key fifo - fifo = prepare_fifo_access( L, 2); // key fifo - if( fifo != NULL && fifo->count >= min_count) - { - fifo_pop( L, fifo, __min( max_count, fifo->count)); // key ... - } - else - { - lua_settop( L, 0); - } - return lua_gettop( L); - } - else - { - return 0; - } + lua_Integer const min_count = lua_tointeger( L, 3); + if( min_count > 0) + { + keeper_fifo* fifo; + lua_Integer const max_count = luaL_optinteger( L, 4, min_count); + lua_settop( L, 2); // ud key + lua_insert( L, 1); // key ud + push_table( L, 2); // key ud fifos + lua_remove( L, 2); // key fifos + lua_pushvalue( L, 1); // key fifos key + lua_rawget( L, 2); // key fifos fifo + lua_remove( L, 2); // key fifo + fifo = prepare_fifo_access( L, 2); // key fifo + if( fifo != NULL && fifo->count >= min_count) + { + fifo_pop( L, fifo, __min( max_count, fifo->count)); // key ... + } + else + { + lua_settop( L, 0); + } + return lua_gettop( L); + } + else + { + return 0; + } } // in: linda_ud key n // out: true or nil int keepercall_limit( lua_State* L) { - keeper_fifo* fifo; - lua_Integer limit = lua_tointeger( L, 3); - push_table( L, 1); // ud key n fifos - lua_replace( L, 1); // fifos key n - lua_pop( L, 1); // fifos key - lua_pushvalue( L, -1); // fifos key key - lua_rawget( L, -3); // fifos key fifo|nil - fifo = (keeper_fifo*) lua_touserdata( L, -1); - if( fifo == NULL) - { // fifos key nil - lua_pop( L, 1); // fifos key - fifo_new( L); // fifos key fifo - fifo = (keeper_fifo*) lua_touserdata( L, -1); - lua_rawset( L, -3); // fifos - } - // remove any clutter on the stack - lua_settop( L, 0); - // return true if we decide that blocked threads waiting to write on that key should be awakened - // this is the case if we detect the key was full but it is no longer the case - if( - ((fifo->limit >= 0) && (fifo->count >= fifo->limit)) // the key was full if limited and count exceeded the previous limit - && ((limit < 0) || (fifo->count < limit)) // the key is not full if unlimited or count is lower than the new limit - ) - { - lua_pushboolean( L, 1); - } - // set the new limit - fifo->limit = limit; - // return 0 or 1 value - return lua_gettop( L); + keeper_fifo* fifo; + lua_Integer limit = lua_tointeger( L, 3); + push_table( L, 1); // ud key n fifos + lua_replace( L, 1); // fifos key n + lua_pop( L, 1); // fifos key + lua_pushvalue( L, -1); // fifos key key + lua_rawget( L, -3); // fifos key fifo|nil + fifo = (keeper_fifo*) lua_touserdata( L, -1); + if( fifo == NULL) + { // fifos key nil + lua_pop( L, 1); // fifos key + fifo_new( L); // fifos key fifo + fifo = (keeper_fifo*) lua_touserdata( L, -1); + lua_rawset( L, -3); // fifos + } + // remove any clutter on the stack + lua_settop( L, 0); + // return true if we decide that blocked threads waiting to write on that key should be awakened + // this is the case if we detect the key was full but it is no longer the case + if( + ((fifo->limit >= 0) && (fifo->count >= fifo->limit)) // the key was full if limited and count exceeded the previous limit + && ((limit < 0) || (fifo->count < limit)) // the key is not full if unlimited or count is lower than the new limit + ) + { + lua_pushboolean( L, 1); + } + // set the new limit + fifo->limit = limit; + // return 0 or 1 value + return lua_gettop( L); } //in: linda_ud key [[val] ...] //out: true or nil int keepercall_set( lua_State* L) { - bool_t should_wake_writers = FALSE; - STACK_GROW( L, 6); - - // retrieve fifos associated with the linda - push_table( L, 1); // ud key [val [, ...]] fifos - lua_replace( L, 1); // fifos key [val [, ...]] - - // make sure we have a value on the stack - if( lua_gettop( L) == 2) // fifos key - { - keeper_fifo* fifo; - lua_pushvalue( L, -1); // fifos key key - lua_rawget( L, 1); // fifos key fifo|nil - // empty the fifo for the specified key: replace uservalue with a virgin table, reset counters, but leave limit unchanged! - fifo = (keeper_fifo*) lua_touserdata( L, -1); - if( fifo != NULL) // might be NULL if we set a nonexistent key to nil - { // fifos key fifo - if( fifo->limit < 0) // fifo limit value is the default (unlimited): we can totally remove it - { - lua_pop( L, 1); // fifos key - lua_pushnil( L); // fifos key nil - lua_rawset( L, -3); // fifos - } - else - { - // we create room if the fifo was full but it is no longer the case - should_wake_writers = (fifo->limit > 0) && (fifo->count >= fifo->limit); - lua_remove( L, -2); // fifos fifo - lua_newtable( L); // fifos fifo {} - lua_setiuservalue( L, -2, CONTENTS_TABLE); // fifos fifo - fifo->first = 1; - fifo->count = 0; - } - } - } - else // set/replace contents stored at the specified key? - { - lua_Integer count = lua_gettop( L) - 2; // number of items we want to store - keeper_fifo* fifo; // fifos key [val [, ...]] - lua_pushvalue( L, 2); // fifos key [val [, ...]] key - lua_rawget( L, 1); // fifos key [val [, ...]] fifo|nil - fifo = (keeper_fifo*) lua_touserdata( L, -1); - if( fifo == NULL) // can be NULL if we store a value at a new key - { // fifos key [val [, ...]] nil - // no need to wake writers in that case, because a writer can't wait on an inexistent key - lua_pop( L, 1); // fifos key [val [, ...]] - fifo_new( L); // fifos key [val [, ...]] fifo - lua_pushvalue( L, 2); // fifos key [val [, ...]] fifo key - lua_pushvalue( L, -2); // fifos key [val [, ...]] fifo key fifo - lua_rawset( L, 1); // fifos key [val [, ...]] fifo - } - else // the fifo exists, we just want to update its contents - { // fifos key [val [, ...]] fifo - // we create room if the fifo was full but it is no longer the case - should_wake_writers = (fifo->limit > 0) && (fifo->count >= fifo->limit) && (count < fifo->limit); - // empty the fifo for the specified key: replace uservalue with a virgin table, reset counters, but leave limit unchanged! - lua_newtable( L); // fifos key [val [, ...]] fifo {} - lua_setiuservalue( L, -2, CONTENTS_TABLE); // fifos key [val [, ...]] fifo - fifo->first = 1; - fifo->count = 0; - } - fifo = prepare_fifo_access( L, -1); - // move the fifo below the values we want to store - lua_insert( L, 3); // fifos key fifo [val [, ...]] - fifo_push( L, fifo, count); // fifos key fifo - } - return should_wake_writers ? (lua_pushboolean( L, 1), 1) : 0; + bool_t should_wake_writers = FALSE; + STACK_GROW( L, 6); + + // retrieve fifos associated with the linda + push_table( L, 1); // ud key [val [, ...]] fifos + lua_replace( L, 1); // fifos key [val [, ...]] + + // make sure we have a value on the stack + if( lua_gettop( L) == 2) // fifos key + { + keeper_fifo* fifo; + lua_pushvalue( L, -1); // fifos key key + lua_rawget( L, 1); // fifos key fifo|nil + // empty the fifo for the specified key: replace uservalue with a virgin table, reset counters, but leave limit unchanged! + fifo = (keeper_fifo*) lua_touserdata( L, -1); + if( fifo != NULL) // might be NULL if we set a nonexistent key to nil + { // fifos key fifo + if( fifo->limit < 0) // fifo limit value is the default (unlimited): we can totally remove it + { + lua_pop( L, 1); // fifos key + lua_pushnil( L); // fifos key nil + lua_rawset( L, -3); // fifos + } + else + { + // we create room if the fifo was full but it is no longer the case + should_wake_writers = (fifo->limit > 0) && (fifo->count >= fifo->limit); + lua_remove( L, -2); // fifos fifo + lua_newtable( L); // fifos fifo {} + lua_setiuservalue( L, -2, CONTENTS_TABLE); // fifos fifo + fifo->first = 1; + fifo->count = 0; + } + } + } + else // set/replace contents stored at the specified key? + { + lua_Integer count = lua_gettop( L) - 2; // number of items we want to store + keeper_fifo* fifo; // fifos key [val [, ...]] + lua_pushvalue( L, 2); // fifos key [val [, ...]] key + lua_rawget( L, 1); // fifos key [val [, ...]] fifo|nil + fifo = (keeper_fifo*) lua_touserdata( L, -1); + if( fifo == NULL) // can be NULL if we store a value at a new key + { // fifos key [val [, ...]] nil + // no need to wake writers in that case, because a writer can't wait on an inexistent key + lua_pop( L, 1); // fifos key [val [, ...]] + fifo_new( L); // fifos key [val [, ...]] fifo + lua_pushvalue( L, 2); // fifos key [val [, ...]] fifo key + lua_pushvalue( L, -2); // fifos key [val [, ...]] fifo key fifo + lua_rawset( L, 1); // fifos key [val [, ...]] fifo + } + else // the fifo exists, we just want to update its contents + { // fifos key [val [, ...]] fifo + // we create room if the fifo was full but it is no longer the case + should_wake_writers = (fifo->limit > 0) && (fifo->count >= fifo->limit) && (count < fifo->limit); + // empty the fifo for the specified key: replace uservalue with a virgin table, reset counters, but leave limit unchanged! + lua_newtable( L); // fifos key [val [, ...]] fifo {} + lua_setiuservalue( L, -2, CONTENTS_TABLE); // fifos key [val [, ...]] fifo + fifo->first = 1; + fifo->count = 0; + } + fifo = prepare_fifo_access( L, -1); + // move the fifo below the values we want to store + lua_insert( L, 3); // fifos key fifo [val [, ...]] + fifo_push( L, fifo, count); // fifos key fifo + } + return should_wake_writers ? (lua_pushboolean( L, 1), 1) : 0; } // in: linda_ud key [count] // out: at most values int keepercall_get( lua_State* L) { - keeper_fifo* fifo; - lua_Integer count = 1; - if( lua_gettop( L) == 3) // ud key count - { - count = lua_tointeger( L, 3); - lua_pop( L, 1); // ud key - } - push_table( L, 1); // ud key fifos - lua_replace( L, 1); // fifos key - lua_rawget( L, 1); // fifos fifo - fifo = prepare_fifo_access( L, -1); // fifos fifo - if( fifo != NULL && fifo->count > 0) - { - lua_remove( L, 1); // fifo - count = __min( count, fifo->count); - // read value off the fifo - fifo_peek( L, fifo, count); // fifo ... - return (int) count; - } - // no fifo was ever registered for this key, or it is empty - return 0; + keeper_fifo* fifo; + lua_Integer count = 1; + if( lua_gettop( L) == 3) // ud key count + { + count = lua_tointeger( L, 3); + lua_pop( L, 1); // ud key + } + push_table( L, 1); // ud key fifos + lua_replace( L, 1); // fifos key + lua_rawget( L, 1); // fifos fifo + fifo = prepare_fifo_access( L, -1); // fifos fifo + if( fifo != NULL && fifo->count > 0) + { + lua_remove( L, 1); // fifo + count = __min( count, fifo->count); + // read value off the fifo + fifo_peek( L, fifo, count); // fifo ... + return (int) count; + } + // no fifo was ever registered for this key, or it is empty + return 0; } // in: linda_ud [, key [, ...]] int keepercall_count( lua_State* L) { - push_table( L, 1); // ud keys fifos - switch( lua_gettop( L)) - { - // no key is specified: return a table giving the count of all known keys - case 2: // ud fifos - lua_newtable( L); // ud fifos out - lua_replace( L, 1); // out fifos - lua_pushnil( L); // out fifos nil - while( lua_next( L, 2)) // out fifos key fifo - { - keeper_fifo* fifo = prepare_fifo_access( L, -1); // out fifos key fifo - lua_pop( L, 1); // out fifos key - lua_pushvalue( L, -1); // out fifos key key - lua_pushinteger( L, fifo->count); // out fifos key key count - lua_rawset( L, -5); // out fifos key - } - lua_pop( L, 1); // out - break; - - // 1 key is specified: return its count - case 3: // ud key fifos - { - keeper_fifo* fifo; - lua_replace( L, 1); // fifos key - lua_rawget( L, -2); // fifos fifo|nil - if( lua_isnil( L, -1)) // the key is unknown - { // fifos nil - lua_remove( L, -2); // nil - } - else // the key is known - { // fifos fifo - fifo = prepare_fifo_access( L, -1); // fifos fifo - lua_pushinteger( L, fifo->count); // fifos fifo count - lua_replace( L, -3); // count fifo - lua_pop( L, 1); // count - } - } - break; - - // a variable number of keys is specified: return a table of their counts - default: // ud keys fifos - lua_newtable( L); // ud keys fifos out - lua_replace( L, 1); // out keys fifos - // shifts all keys up in the stack. potentially slow if there are a lot of them, but then it should be bearable - lua_insert( L, 2); // out fifos keys - while( lua_gettop( L) > 2) - { - keeper_fifo* fifo; - lua_pushvalue( L, -1); // out fifos keys key - lua_rawget( L, 2); // out fifos keys fifo|nil - fifo = prepare_fifo_access( L, -1); // out fifos keys fifo|nil - lua_pop( L, 1); // out fifos keys - if( fifo != NULL) // the key is known - { - lua_pushinteger( L, fifo->count); // out fifos keys count - lua_rawset( L, 1); // out fifos keys - } - else // the key is unknown - { - lua_pop( L, 1); // out fifos keys - } - } - lua_pop( L, 1); // out - } - ASSERT_L( lua_gettop( L) == 1); - return 1; + push_table( L, 1); // ud keys fifos + switch( lua_gettop( L)) + { + // no key is specified: return a table giving the count of all known keys + case 2: // ud fifos + lua_newtable( L); // ud fifos out + lua_replace( L, 1); // out fifos + lua_pushnil( L); // out fifos nil + while( lua_next( L, 2)) // out fifos key fifo + { + keeper_fifo* fifo = prepare_fifo_access( L, -1); // out fifos key fifo + lua_pop( L, 1); // out fifos key + lua_pushvalue( L, -1); // out fifos key key + lua_pushinteger( L, fifo->count); // out fifos key key count + lua_rawset( L, -5); // out fifos key + } + lua_pop( L, 1); // out + break; + + // 1 key is specified: return its count + case 3: // ud key fifos + { + keeper_fifo* fifo; + lua_replace( L, 1); // fifos key + lua_rawget( L, -2); // fifos fifo|nil + if( lua_isnil( L, -1)) // the key is unknown + { // fifos nil + lua_remove( L, -2); // nil + } + else // the key is known + { // fifos fifo + fifo = prepare_fifo_access( L, -1); // fifos fifo + lua_pushinteger( L, fifo->count); // fifos fifo count + lua_replace( L, -3); // count fifo + lua_pop( L, 1); // count + } + } + break; + + // a variable number of keys is specified: return a table of their counts + default: // ud keys fifos + lua_newtable( L); // ud keys fifos out + lua_replace( L, 1); // out keys fifos + // shifts all keys up in the stack. potentially slow if there are a lot of them, but then it should be bearable + lua_insert( L, 2); // out fifos keys + while( lua_gettop( L) > 2) + { + keeper_fifo* fifo; + lua_pushvalue( L, -1); // out fifos keys key + lua_rawget( L, 2); // out fifos keys fifo|nil + fifo = prepare_fifo_access( L, -1); // out fifos keys fifo|nil + lua_pop( L, 1); // out fifos keys + if( fifo != NULL) // the key is known + { + lua_pushinteger( L, fifo->count); // out fifos keys count + lua_rawset( L, 1); // out fifos keys + } + else // the key is unknown + { + lua_pop( L, 1); // out fifos keys + } + } + lua_pop( L, 1); // out + } + ASSERT_L( lua_gettop( L) == 1); + return 1; } //################################################################################### @@ -582,41 +582,41 @@ int keepercall_count( lua_State* L) // called as __gc for the keepers array userdata void close_keepers( Universe* U, lua_State* L) { - if( U->keepers != NULL) - { - int i; - int nbKeepers = U->keepers->nb_keepers; - // NOTE: imagine some keeper state N+1 currently holds a linda that uses another keeper N, and a _gc that will make use of it - // when keeper N+1 is closed, object is GCed, linda operation is called, which attempts to acquire keeper N, whose Lua state no longer exists - // in that case, the linda operation should do nothing. which means that these operations must check for keeper acquisition success - // which is early-outed with a U->keepers->nbKeepers null-check - U->keepers->nb_keepers = 0; - for( i = 0; i < nbKeepers; ++ i) - { - lua_State* K = U->keepers->keeper_array[i].L; - U->keepers->keeper_array[i].L = NULL; - if( K != NULL) - { - lua_close( K); - } - else - { - // detected partial init: destroy only the mutexes that got initialized properly - nbKeepers = i; - } - } - for( i = 0; i < nbKeepers; ++ i) - { - MUTEX_FREE( &U->keepers->keeper_array[i].keeper_cs); - } - // free the keeper bookkeeping structure - { - void* allocUD; - lua_Alloc allocF = lua_getallocf( L, &allocUD); - allocF( allocUD, U->keepers, sizeof( Keepers) + (nbKeepers - 1) * sizeof( Keeper), 0); - U->keepers = NULL; - } - } + if( U->keepers != NULL) + { + int i; + int nbKeepers = U->keepers->nb_keepers; + // NOTE: imagine some keeper state N+1 currently holds a linda that uses another keeper N, and a _gc that will make use of it + // when keeper N+1 is closed, object is GCed, linda operation is called, which attempts to acquire keeper N, whose Lua state no longer exists + // in that case, the linda operation should do nothing. which means that these operations must check for keeper acquisition success + // which is early-outed with a U->keepers->nbKeepers null-check + U->keepers->nb_keepers = 0; + for( i = 0; i < nbKeepers; ++ i) + { + lua_State* K = U->keepers->keeper_array[i].L; + U->keepers->keeper_array[i].L = NULL; + if( K != NULL) + { + lua_close( K); + } + else + { + // detected partial init: destroy only the mutexes that got initialized properly + nbKeepers = i; + } + } + for( i = 0; i < nbKeepers; ++ i) + { + MUTEX_FREE( &U->keepers->keeper_array[i].keeper_cs); + } + // free the keeper bookkeeping structure + { + void* allocUD; + lua_Alloc allocF = lua_getallocf( L, &allocUD); + allocF( allocUD, U->keepers, sizeof( Keepers) + (nbKeepers - 1) * sizeof( Keeper), 0); + U->keepers = NULL; + } + } } /* @@ -632,156 +632,156 @@ void close_keepers( Universe* U, lua_State* L) */ void init_keepers( Universe* U, lua_State* L) { - int i; - int nb_keepers; - void* allocUD; - lua_Alloc allocF = lua_getallocf( L, &allocUD); - - STACK_CHECK( L, 0); // L K - lua_getfield( L, 1, "nb_keepers"); // nb_keepers - nb_keepers = (int) lua_tointeger( L, -1); - lua_pop( L, 1); // - if( nb_keepers < 1) - { - (void) luaL_error( L, "Bad number of keepers (%d)", nb_keepers); - } - - // Keepers contains an array of 1 s_Keeper, adjust for the actual number of keeper states - { - size_t const bytes = sizeof( Keepers) + (nb_keepers - 1) * sizeof( Keeper); - U->keepers = (Keepers*) allocF( allocUD, NULL, 0, bytes); - if( U->keepers == NULL) - { - (void) luaL_error( L, "init_keepers() failed while creating keeper array; out of memory"); - return; - } - memset( U->keepers, 0, bytes); - U->keepers->nb_keepers = nb_keepers; - } - for( i = 0; i < nb_keepers; ++ i) // keepersUD - { - // note that we will leak K if we raise an error later - lua_State* K = create_state( U, L); - if( K == NULL) - { - (void) luaL_error( L, "init_keepers() failed while creating keeper states; out of memory"); - return; - } - - U->keepers->keeper_array[i].L = K; - // we can trigger a GC from inside keeper_call(), where a keeper is acquired - // from there, GC can collect a linda, which would acquire the keeper again, and deadlock the thread. - // therefore, we need a recursive mutex. - MUTEX_RECURSIVE_INIT( &U->keepers->keeper_array[i].keeper_cs); - - STACK_CHECK( K, 0); - - // copy the universe pointer in the keeper itself - universe_store( K, U); - STACK_MID( K, 0); - - // make sure 'package' is initialized in keeper states, so that we have require() - // this because this is needed when transferring deep userdata object - luaL_requiref( K, "package", luaopen_package, 1); // package - lua_pop( K, 1); // - STACK_MID( K, 0); - serialize_require( DEBUGSPEW_PARAM_COMMA( U) K); - STACK_MID( K, 0); - - // copy package.path and package.cpath from the source state - lua_getglobal( L, "package"); // "..." keepersUD package - if( !lua_isnil( L, -1)) - { - // when copying with mode eLM_ToKeeper, error message is pushed at the top of the stack, not raised immediately - if( luaG_inter_copy_package( U, L, K, -1, eLM_ToKeeper)) - { - // if something went wrong, the error message is at the top of the stack - lua_remove( L, -2); // error_msg - (void) lua_error( L); - return; - } - } - lua_pop( L, 1); // - STACK_MID( L, 0); - - // attempt to call on_state_create(), if we have one and it is a C function - // (only support a C function because we can't transfer executable Lua code in keepers) - // will raise an error in L in case of problem - call_on_state_create( U, K, L, eLM_ToKeeper); - - // to see VM name in Decoda debugger - lua_pushfstring( K, "Keeper #%d", i + 1); // "Keeper #n" - lua_setglobal( K, "decoda_name"); // - - // create the fifos table in the keeper state - REGISTRY_SET( K, FIFOS_KEY, lua_newtable( K)); - STACK_END( K, 0); - } - STACK_END( L, 0); + int i; + int nb_keepers; + void* allocUD; + lua_Alloc allocF = lua_getallocf( L, &allocUD); + + STACK_CHECK( L, 0); // L K + lua_getfield( L, 1, "nb_keepers"); // nb_keepers + nb_keepers = (int) lua_tointeger( L, -1); + lua_pop( L, 1); // + if( nb_keepers < 1) + { + (void) luaL_error( L, "Bad number of keepers (%d)", nb_keepers); + } + + // Keepers contains an array of 1 s_Keeper, adjust for the actual number of keeper states + { + size_t const bytes = sizeof( Keepers) + (nb_keepers - 1) * sizeof( Keeper); + U->keepers = (Keepers*) allocF( allocUD, NULL, 0, bytes); + if( U->keepers == NULL) + { + (void) luaL_error( L, "init_keepers() failed while creating keeper array; out of memory"); + return; + } + memset( U->keepers, 0, bytes); + U->keepers->nb_keepers = nb_keepers; + } + for( i = 0; i < nb_keepers; ++ i) // keepersUD + { + // note that we will leak K if we raise an error later + lua_State* K = create_state( U, L); + if( K == NULL) + { + (void) luaL_error( L, "init_keepers() failed while creating keeper states; out of memory"); + return; + } + + U->keepers->keeper_array[i].L = K; + // we can trigger a GC from inside keeper_call(), where a keeper is acquired + // from there, GC can collect a linda, which would acquire the keeper again, and deadlock the thread. + // therefore, we need a recursive mutex. + MUTEX_RECURSIVE_INIT( &U->keepers->keeper_array[i].keeper_cs); + + STACK_CHECK( K, 0); + + // copy the universe pointer in the keeper itself + universe_store( K, U); + STACK_MID( K, 0); + + // make sure 'package' is initialized in keeper states, so that we have require() + // this because this is needed when transferring deep userdata object + luaL_requiref( K, "package", luaopen_package, 1); // package + lua_pop( K, 1); // + STACK_MID( K, 0); + serialize_require( DEBUGSPEW_PARAM_COMMA( U) K); + STACK_MID( K, 0); + + // copy package.path and package.cpath from the source state + lua_getglobal( L, "package"); // "..." keepersUD package + if( !lua_isnil( L, -1)) + { + // when copying with mode eLM_ToKeeper, error message is pushed at the top of the stack, not raised immediately + if( luaG_inter_copy_package( U, L, K, -1, eLM_ToKeeper)) + { + // if something went wrong, the error message is at the top of the stack + lua_remove( L, -2); // error_msg + (void) lua_error( L); + return; + } + } + lua_pop( L, 1); // + STACK_MID( L, 0); + + // attempt to call on_state_create(), if we have one and it is a C function + // (only support a C function because we can't transfer executable Lua code in keepers) + // will raise an error in L in case of problem + call_on_state_create( U, K, L, eLM_ToKeeper); + + // to see VM name in Decoda debugger + lua_pushfstring( K, "Keeper #%d", i + 1); // "Keeper #n" + lua_setglobal( K, "decoda_name"); // + + // create the fifos table in the keeper state + REGISTRY_SET( K, FIFOS_KEY, lua_newtable( K)); + STACK_END( K, 0); + } + STACK_END( L, 0); } // should be called only when inside a keeper_acquire/keeper_release pair (see linda_protected_call) Keeper* which_keeper(Keepers* keepers_, ptrdiff_t magic_) { - int const nbKeepers = keepers_->nb_keepers; - unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); - return &keepers_->keeper_array[i]; + int const nbKeepers = keepers_->nb_keepers; + unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); + return &keepers_->keeper_array[i]; } Keeper* keeper_acquire( Keepers* keepers_, ptrdiff_t magic_) { - int const nbKeepers = keepers_->nb_keepers; - // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers) - if( nbKeepers == 0) - { - return NULL; - } - else - { - /* - * Any hashing will do that maps pointers to 0..GNbKeepers-1 - * consistently. - * - * Pointers are often aligned by 8 or so - ignore the low order bits - * have to cast to unsigned long to avoid compilation warnings about loss of data when converting pointer-to-integer - */ - unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); - Keeper* K = &keepers_->keeper_array[i]; - - MUTEX_LOCK( &K->keeper_cs); - //++ K->count; - return K; - } + int const nbKeepers = keepers_->nb_keepers; + // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers) + if( nbKeepers == 0) + { + return NULL; + } + else + { + /* + * Any hashing will do that maps pointers to 0..GNbKeepers-1 + * consistently. + * + * Pointers are often aligned by 8 or so - ignore the low order bits + * have to cast to unsigned long to avoid compilation warnings about loss of data when converting pointer-to-integer + */ + unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); + Keeper* K = &keepers_->keeper_array[i]; + + MUTEX_LOCK( &K->keeper_cs); + //++ K->count; + return K; + } } void keeper_release( Keeper* K) { - //-- K->count; - if( K) MUTEX_UNLOCK( &K->keeper_cs); + //-- K->count; + if( K) MUTEX_UNLOCK( &K->keeper_cs); } void keeper_toggle_nil_sentinels( lua_State* L, int val_i_, LookupMode const mode_) { - int i, n = lua_gettop( L); - for( i = val_i_; i <= n; ++ i) - { - if( mode_ == eLM_ToKeeper) - { - if( lua_isnil( L, i)) - { - push_unique_key( L, NIL_SENTINEL); - lua_replace( L, i); - } - } - else - { - if( equal_unique_key( L, i, NIL_SENTINEL)) - { - lua_pushnil( L); - lua_replace( L, i); - } - } - } + int i, n = lua_gettop( L); + for( i = val_i_; i <= n; ++ i) + { + if( mode_ == eLM_ToKeeper) + { + if( lua_isnil( L, i)) + { + push_unique_key( L, NIL_SENTINEL); + lua_replace( L, i); + } + } + else + { + if( equal_unique_key( L, i, NIL_SENTINEL)) + { + lua_pushnil( L); + lua_replace( L, i); + } + } + } } /* @@ -795,31 +795,31 @@ void keeper_toggle_nil_sentinels( lua_State* L, int val_i_, LookupMode const mod */ int keeper_call( Universe* U, lua_State* K, keeper_api_t func_, lua_State* L, void* linda, uint_t starting_index) { - int const args = starting_index ? (lua_gettop( L) - starting_index + 1) : 0; - int const Ktos = lua_gettop( K); - int retvals = -1; - - STACK_GROW( K, 2); - - PUSH_KEEPER_FUNC( K, func_); - - lua_pushlightuserdata( K, linda); - - if( (args == 0) || luaG_inter_copy( U, L, K, args, eLM_ToKeeper) == 0) // L->K - { - lua_call( K, 1 + args, LUA_MULTRET); - - retvals = lua_gettop( K) - Ktos; - // note that this can raise a luaL_error while the keeper state (and its mutex) is acquired - // this may interrupt a lane, causing the destruction of the underlying OS thread - // after this, another lane making use of this keeper can get an error code from the mutex-locking function - // when attempting to grab the mutex again (WINVER <= 0x400 does this, but locks just fine, I don't know about pthread) - if( (retvals > 0) && luaG_inter_move( U, K, L, retvals, eLM_FromKeeper) != 0) // K->L - { - retvals = -1; - } - } - // whatever happens, restore the stack to where it was at the origin - lua_settop( K, Ktos); - return retvals; + int const args = starting_index ? (lua_gettop( L) - starting_index + 1) : 0; + int const Ktos = lua_gettop( K); + int retvals = -1; + + STACK_GROW( K, 2); + + PUSH_KEEPER_FUNC( K, func_); + + lua_pushlightuserdata( K, linda); + + if( (args == 0) || luaG_inter_copy( U, L, K, args, eLM_ToKeeper) == 0) // L->K + { + lua_call( K, 1 + args, LUA_MULTRET); + + retvals = lua_gettop( K) - Ktos; + // note that this can raise a luaL_error while the keeper state (and its mutex) is acquired + // this may interrupt a lane, causing the destruction of the underlying OS thread + // after this, another lane making use of this keeper can get an error code from the mutex-locking function + // when attempting to grab the mutex again (WINVER <= 0x400 does this, but locks just fine, I don't know about pthread) + if( (retvals > 0) && luaG_inter_move( U, K, L, retvals, eLM_FromKeeper) != 0) // K->L + { + retvals = -1; + } + } + // whatever happens, restore the stack to where it was at the origin + lua_settop( K, Ktos); + return retvals; } diff --git a/src/keeper.h b/src/keeper.h index 60410da..8c09322 100644 --- a/src/keeper.h +++ b/src/keeper.h @@ -13,16 +13,16 @@ typedef enum eLookupMode LookupMode; struct s_Keeper { - MUTEX_T keeper_cs; - lua_State* L; - //int count; + MUTEX_T keeper_cs; + lua_State* L; + //int count; }; typedef struct s_Keeper Keeper; struct s_Keepers { - int nb_keepers; - Keeper keeper_array[1]; + int nb_keepers; + Keeper keeper_array[1]; }; typedef struct s_Keepers Keepers; diff --git a/src/lanes.c b/src/lanes.c index e697bf5..c5b6c4f 100644 --- a/src/lanes.c +++ b/src/lanes.c @@ -113,16 +113,16 @@ THE SOFTWARE. // intern the debug name in the specified lua state so that the pointer remains valid when the lane's state is closed static void securize_debug_threadname( lua_State* L, Lane* s) { - STACK_CHECK( L, 0); - STACK_GROW( L, 3); - lua_getiuservalue( L, 1, 1); - lua_newtable( L); - // Lua 5.1 can't do 's->debug_name = lua_pushstring( L, s->debug_name);' - lua_pushstring( L, s->debug_name); - s->debug_name = lua_tostring( L, -1); - lua_rawset( L, -3); - lua_pop( L, 1); - STACK_END( L, 0); + STACK_CHECK( L, 0); + STACK_GROW( L, 3); + lua_getiuservalue( L, 1, 1); + lua_newtable( L); + // Lua 5.1 can't do 's->debug_name = lua_pushstring( L, s->debug_name);' + lua_pushstring( L, s->debug_name); + s->debug_name = lua_tostring( L, -1); + lua_rawset( L, -3); + lua_pop( L, 1); + STACK_END( L, 0); } #if ERROR_FULL_STACK @@ -154,24 +154,24 @@ struct s_Linda; */ static bool_t push_registry_table( lua_State* L, UniqueKey key, bool_t create) { - STACK_GROW( L, 3); - STACK_CHECK( L, 0); - - REGISTRY_GET( L, key); // ? - if( lua_isnil( L, -1)) // nil? - { - lua_pop( L, 1); // - - if( !create) - { - return FALSE; - } - - lua_newtable( L); // t - REGISTRY_SET( L, key, lua_pushvalue( L, -2)); - } - STACK_END( L, 1); - return TRUE; // table pushed + STACK_GROW( L, 3); + STACK_CHECK( L, 0); + + REGISTRY_GET( L, key); // ? + if( lua_isnil( L, -1)) // nil? + { + lua_pop( L, 1); // + + if( !create) + { + return FALSE; + } + + lua_newtable( L); // t + REGISTRY_SET( L, key, lua_pushvalue( L, -2)); + } + STACK_END( L, 1); + return TRUE; // table pushed } #if HAVE_LANE_TRACKING @@ -187,14 +187,14 @@ static bool_t push_registry_table( lua_State* L, UniqueKey key, bool_t create) static void tracking_add( Lane* s) { - MUTEX_LOCK( &s->U->tracking_cs); - { - assert( s->tracking_next == NULL); + MUTEX_LOCK( &s->U->tracking_cs); + { + assert( s->tracking_next == NULL); - s->tracking_next = s->U->tracking_first; - s->U->tracking_first = s; - } - MUTEX_UNLOCK( &s->U->tracking_cs); + s->tracking_next = s->U->tracking_first; + s->U->tracking_first = s; + } + MUTEX_UNLOCK( &s->U->tracking_cs); } /* @@ -202,33 +202,33 @@ static void tracking_add( Lane* s) */ static bool_t tracking_remove( Lane* s) { - bool_t found = FALSE; - MUTEX_LOCK( &s->U->tracking_cs); - { - // Make sure (within the MUTEX) that we actually are in the chain - // still (at process exit they will remove us from chain and then - // cancel/kill). - // - if( s->tracking_next != NULL) - { - Lane** ref = (Lane**) &s->U->tracking_first; - - while( *ref != TRACKING_END) - { - if( *ref == s) - { - *ref = s->tracking_next; - s->tracking_next = NULL; - found = TRUE; - break; - } - ref = (Lane**) &((*ref)->tracking_next); - } - assert( found); - } - } - MUTEX_UNLOCK( &s->U->tracking_cs); - return found; + bool_t found = FALSE; + MUTEX_LOCK( &s->U->tracking_cs); + { + // Make sure (within the MUTEX) that we actually are in the chain + // still (at process exit they will remove us from chain and then + // cancel/kill). + // + if( s->tracking_next != NULL) + { + Lane** ref = (Lane**) &s->U->tracking_first; + + while( *ref != TRACKING_END) + { + if( *ref == s) + { + *ref = s->tracking_next; + s->tracking_next = NULL; + found = TRUE; + break; + } + ref = (Lane**) &((*ref)->tracking_next); + } + assert( found); + } + } + MUTEX_UNLOCK( &s->U->tracking_cs); + return found; } #endif // HAVE_LANE_TRACKING @@ -238,22 +238,22 @@ static bool_t tracking_remove( Lane* s) static void lane_cleanup( Lane* s) { - // Clean up after a (finished) thread - // + // Clean up after a (finished) thread + // #if THREADWAIT_METHOD == THREADWAIT_CONDVAR - SIGNAL_FREE( &s->done_signal); - MUTEX_FREE( &s->done_lock); + SIGNAL_FREE( &s->done_signal); + MUTEX_FREE( &s->done_lock); #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR #if HAVE_LANE_TRACKING - if( s->U->tracking_first != NULL) - { - // Lane was cleaned up, no need to handle at process termination - tracking_remove( s); - } + if( s->U->tracking_first != NULL) + { + // Lane was cleaned up, no need to handle at process termination + tracking_remove( s); + } #endif // HAVE_LANE_TRACKING - free( s); + free( s); } /* @@ -272,16 +272,16 @@ static void lane_cleanup( Lane* s) // LUAG_FUNC( set_finalizer) { - luaL_argcheck( L, lua_isfunction( L, 1), 1, "finalizer should be a function"); - luaL_argcheck( L, lua_gettop( L) == 1, 1, "too many arguments"); - // Get the current finalizer table (if any) - push_registry_table( L, FINALIZER_REGKEY, TRUE /*do create if none*/); // finalizer {finalisers} - STACK_GROW( L, 2); - lua_pushinteger( L, lua_rawlen( L, -1) + 1); // finalizer {finalisers} idx - lua_pushvalue( L, 1); // finalizer {finalisers} idx finalizer - lua_rawset( L, -3); // finalizer {finalisers} - lua_pop( L, 2); // - return 0; + luaL_argcheck( L, lua_isfunction( L, 1), 1, "finalizer should be a function"); + luaL_argcheck( L, lua_gettop( L) == 1, 1, "too many arguments"); + // Get the current finalizer table (if any) + push_registry_table( L, FINALIZER_REGKEY, TRUE /*do create if none*/); // finalizer {finalisers} + STACK_GROW( L, 2); + lua_pushinteger( L, lua_rawlen( L, -1) + 1); // finalizer {finalisers} idx + lua_pushvalue( L, 1); // finalizer {finalisers} idx finalizer + lua_rawset( L, -3); // finalizer {finalisers} + lua_pop( L, 2); // + return 0; } @@ -302,74 +302,74 @@ static void push_stack_trace( lua_State* L, int rc_, int stk_base_); static int run_finalizers( lua_State* L, int lua_rc) { - int finalizers_index; - int n; - int err_handler_index = 0; - int rc = LUA_OK; // ... - if( !push_registry_table( L, FINALIZER_REGKEY, FALSE)) // ... finalizers? - { - return 0; // no finalizers - } + int finalizers_index; + int n; + int err_handler_index = 0; + int rc = LUA_OK; // ... + if( !push_registry_table( L, FINALIZER_REGKEY, FALSE)) // ... finalizers? + { + return 0; // no finalizers + } - STACK_GROW( L, 5); + STACK_GROW( L, 5); - finalizers_index = lua_gettop( L); + finalizers_index = lua_gettop( L); #if ERROR_FULL_STACK - lua_pushcfunction( L, lane_error); // ... finalizers lane_error - err_handler_index = lua_gettop( L); + lua_pushcfunction( L, lane_error); // ... finalizers lane_error + err_handler_index = lua_gettop( L); #endif // ERROR_FULL_STACK - for( n = (int) lua_rawlen( L, finalizers_index); n > 0; -- n) - { - int args = 0; - lua_pushinteger( L, n); // ... finalizers lane_error n - lua_rawget( L, finalizers_index); // ... finalizers lane_error finalizer - ASSERT_L( lua_isfunction( L, -1)); - if( lua_rc != LUA_OK) // we have an error message and an optional stack trace at the bottom of the stack - { - ASSERT_L( finalizers_index == 2 || finalizers_index == 3); - //char const* err_msg = lua_tostring( L, 1); - lua_pushvalue( L, 1); // ... finalizers lane_error finalizer err_msg - // note we don't always have a stack trace for example when CANCEL_ERROR, or when we got an error that doesn't call our handler, such as LUA_ERRMEM - if( finalizers_index == 3) - { - lua_pushvalue( L, 2); // ... finalizers lane_error finalizer err_msg stack_trace - } - args = finalizers_index - 1; - } - - // if no error from the main body, finalizer doesn't receive any argument, else it gets the error message and optional stack trace - rc = lua_pcall( L, args, 0, err_handler_index); // ... finalizers lane_error err_msg2? - if( rc != LUA_OK) - { - push_stack_trace( L, rc, lua_gettop( L)); - // If one finalizer fails, don't run the others. Return this - // as the 'real' error, replacing what we could have had (or not) - // from the actual code. - break; - } - // no error, proceed to next finalizer // ... finalizers lane_error - } - - if( rc != LUA_OK) - { - // ERROR_FULL_STACK accounts for the presence of lane_error on the stack - int nb_err_slots = lua_gettop( L) - finalizers_index - ERROR_FULL_STACK; - // a finalizer generated an error, this is what we leave of the stack - for( n = nb_err_slots; n > 0; -- n) - { - lua_replace( L, n); - } - // leave on the stack only the error and optional stack trace produced by the error in the finalizer - lua_settop( L, nb_err_slots); - } - else // no error from the finalizers, make sure only the original return values from the lane body remain on the stack - { - lua_settop( L, finalizers_index - 1); - } - - return rc; + for( n = (int) lua_rawlen( L, finalizers_index); n > 0; -- n) + { + int args = 0; + lua_pushinteger( L, n); // ... finalizers lane_error n + lua_rawget( L, finalizers_index); // ... finalizers lane_error finalizer + ASSERT_L( lua_isfunction( L, -1)); + if( lua_rc != LUA_OK) // we have an error message and an optional stack trace at the bottom of the stack + { + ASSERT_L( finalizers_index == 2 || finalizers_index == 3); + //char const* err_msg = lua_tostring( L, 1); + lua_pushvalue( L, 1); // ... finalizers lane_error finalizer err_msg + // note we don't always have a stack trace for example when CANCEL_ERROR, or when we got an error that doesn't call our handler, such as LUA_ERRMEM + if( finalizers_index == 3) + { + lua_pushvalue( L, 2); // ... finalizers lane_error finalizer err_msg stack_trace + } + args = finalizers_index - 1; + } + + // if no error from the main body, finalizer doesn't receive any argument, else it gets the error message and optional stack trace + rc = lua_pcall( L, args, 0, err_handler_index); // ... finalizers lane_error err_msg2? + if( rc != LUA_OK) + { + push_stack_trace( L, rc, lua_gettop( L)); + // If one finalizer fails, don't run the others. Return this + // as the 'real' error, replacing what we could have had (or not) + // from the actual code. + break; + } + // no error, proceed to next finalizer // ... finalizers lane_error + } + + if( rc != LUA_OK) + { + // ERROR_FULL_STACK accounts for the presence of lane_error on the stack + int nb_err_slots = lua_gettop( L) - finalizers_index - ERROR_FULL_STACK; + // a finalizer generated an error, this is what we leave of the stack + for( n = nb_err_slots; n > 0; -- n) + { + lua_replace( L, n); + } + // leave on the stack only the error and optional stack trace produced by the error in the finalizer + lua_settop( L, nb_err_slots); + } + else // no error from the finalizers, make sure only the original return values from the lane body remain on the stack + { + lua_settop( L, finalizers_index - 1); + } + + return rc; } /* @@ -392,12 +392,12 @@ static int run_finalizers( lua_State* L, int lua_rc) */ static void selfdestruct_add( Lane* s) { - MUTEX_LOCK( &s->U->selfdestruct_cs); - assert( s->selfdestruct_next == NULL); + MUTEX_LOCK( &s->U->selfdestruct_cs); + assert( s->selfdestruct_next == NULL); - s->selfdestruct_next = s->U->selfdestruct_first; - s->U->selfdestruct_first= s; - MUTEX_UNLOCK( &s->U->selfdestruct_cs); + s->selfdestruct_next = s->U->selfdestruct_first; + s->U->selfdestruct_first= s; + MUTEX_UNLOCK( &s->U->selfdestruct_cs); } /* @@ -405,35 +405,35 @@ static void selfdestruct_add( Lane* s) */ static bool_t selfdestruct_remove( Lane* s) { - bool_t found = FALSE; - MUTEX_LOCK( &s->U->selfdestruct_cs); - { - // Make sure (within the MUTEX) that we actually are in the chain - // still (at process exit they will remove us from chain and then - // cancel/kill). - // - if( s->selfdestruct_next != NULL) - { - Lane** ref = (Lane**) &s->U->selfdestruct_first; - - while( *ref != SELFDESTRUCT_END ) - { - if( *ref == s) - { - *ref = s->selfdestruct_next; - s->selfdestruct_next = NULL; - // the terminal shutdown should wait until the lane is done with its lua_close() - ++ s->U->selfdestructing_count; - found = TRUE; - break; - } - ref = (Lane**) &((*ref)->selfdestruct_next); - } - assert( found); - } - } - MUTEX_UNLOCK( &s->U->selfdestruct_cs); - return found; + bool_t found = FALSE; + MUTEX_LOCK( &s->U->selfdestruct_cs); + { + // Make sure (within the MUTEX) that we actually are in the chain + // still (at process exit they will remove us from chain and then + // cancel/kill). + // + if( s->selfdestruct_next != NULL) + { + Lane** ref = (Lane**) &s->U->selfdestruct_first; + + while( *ref != SELFDESTRUCT_END ) + { + if( *ref == s) + { + *ref = s->selfdestruct_next; + s->selfdestruct_next = NULL; + // the terminal shutdown should wait until the lane is done with its lua_close() + ++ s->U->selfdestructing_count; + found = TRUE; + break; + } + ref = (Lane**) &((*ref)->selfdestruct_next); + } + assert( found); + } + } + MUTEX_UNLOCK( &s->U->selfdestruct_cs); + return found; } /* @@ -441,160 +441,160 @@ static bool_t selfdestruct_remove( Lane* s) */ static int selfdestruct_gc( lua_State* L) { - Universe* U = (Universe*) lua_touserdata( L, 1); - - while( U->selfdestruct_first != SELFDESTRUCT_END) // true at most once! - { - // Signal _all_ still running threads to exit (including the timer thread) - // - MUTEX_LOCK( &U->selfdestruct_cs); - { - Lane* s = U->selfdestruct_first; - while( s != SELFDESTRUCT_END) - { - // attempt a regular unforced hard cancel with a small timeout - bool_t cancelled = THREAD_ISNULL( s->thread) || thread_cancel( L, s, CO_Hard, 0.0001, FALSE, 0.0); - // if we failed, and we know the thread is waiting on a linda - if( cancelled == FALSE && s->status == WAITING && s->waiting_on != NULL) - { - // signal the linda the wake up the thread so that it can react to the cancel query - // let us hope we never land here with a pointer on a linda that has been destroyed... - SIGNAL_T *waiting_on = s->waiting_on; - //s->waiting_on = NULL; // useful, or not? - SIGNAL_ALL( waiting_on); - } - s = s->selfdestruct_next; - } - } - MUTEX_UNLOCK( &U->selfdestruct_cs); - - // When noticing their cancel, the lanes will remove themselves from - // the selfdestruct chain. - - // TBD: Not sure if Windows (multi core) will require the timed approach, - // or single Yield. I don't have machine to test that (so leaving - // for timed approach). -- AKa 25-Oct-2008 - - // OS X 10.5 (Intel) needs more to avoid segfaults. - // - // "make test" is okay. 100's of "make require" are okay. - // - // Tested on MacBook Core Duo 2GHz and 10.5.5: - // -- AKa 25-Oct-2008 - // - { - lua_Number const shutdown_timeout = lua_tonumber( L, lua_upvalueindex( 1)); - double const t_until = now_secs() + shutdown_timeout; - - while( U->selfdestruct_first != SELFDESTRUCT_END) - { - YIELD(); // give threads time to act on their cancel - { - // count the number of cancelled thread that didn't have the time to act yet - int n = 0; - double t_now = 0.0; - MUTEX_LOCK( &U->selfdestruct_cs); - { - Lane* s = U->selfdestruct_first; - while( s != SELFDESTRUCT_END) - { - if( s->cancel_request == CANCEL_HARD) - ++ n; - s = s->selfdestruct_next; - } - } - MUTEX_UNLOCK( &U->selfdestruct_cs); - // if timeout elapsed, or we know all threads have acted, stop waiting - t_now = now_secs(); - if( n == 0 || (t_now >= t_until)) - { - DEBUGSPEW_CODE( fprintf( stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now))); - break; - } - } - } - } - - // If some lanes are currently cleaning after themselves, wait until they are done. - // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). - while( U->selfdestructing_count > 0) - { - YIELD(); - } - - //--- - // Kill the still free running threads - // - if( U->selfdestruct_first != SELFDESTRUCT_END) - { - unsigned int n = 0; - // first thing we did was to raise the linda signals the threads were waiting on (if any) - // therefore, any well-behaved thread should be in CANCELLED state - // these are not running, and the state can be closed - MUTEX_LOCK( &U->selfdestruct_cs); - { - Lane* s = U->selfdestruct_first; - while( s != SELFDESTRUCT_END) - { - Lane* next_s = s->selfdestruct_next; - s->selfdestruct_next = NULL; // detach from selfdestruct chain - if( !THREAD_ISNULL( s->thread)) // can be NULL if previous 'soft' termination succeeded - { - THREAD_KILL( &s->thread); + Universe* U = (Universe*) lua_touserdata( L, 1); + + while( U->selfdestruct_first != SELFDESTRUCT_END) // true at most once! + { + // Signal _all_ still running threads to exit (including the timer thread) + // + MUTEX_LOCK( &U->selfdestruct_cs); + { + Lane* s = U->selfdestruct_first; + while( s != SELFDESTRUCT_END) + { + // attempt a regular unforced hard cancel with a small timeout + bool_t cancelled = THREAD_ISNULL( s->thread) || thread_cancel( L, s, CO_Hard, 0.0001, FALSE, 0.0); + // if we failed, and we know the thread is waiting on a linda + if( cancelled == FALSE && s->status == WAITING && s->waiting_on != NULL) + { + // signal the linda the wake up the thread so that it can react to the cancel query + // let us hope we never land here with a pointer on a linda that has been destroyed... + SIGNAL_T *waiting_on = s->waiting_on; + //s->waiting_on = NULL; // useful, or not? + SIGNAL_ALL( waiting_on); + } + s = s->selfdestruct_next; + } + } + MUTEX_UNLOCK( &U->selfdestruct_cs); + + // When noticing their cancel, the lanes will remove themselves from + // the selfdestruct chain. + + // TBD: Not sure if Windows (multi core) will require the timed approach, + // or single Yield. I don't have machine to test that (so leaving + // for timed approach). -- AKa 25-Oct-2008 + + // OS X 10.5 (Intel) needs more to avoid segfaults. + // + // "make test" is okay. 100's of "make require" are okay. + // + // Tested on MacBook Core Duo 2GHz and 10.5.5: + // -- AKa 25-Oct-2008 + // + { + lua_Number const shutdown_timeout = lua_tonumber( L, lua_upvalueindex( 1)); + double const t_until = now_secs() + shutdown_timeout; + + while( U->selfdestruct_first != SELFDESTRUCT_END) + { + YIELD(); // give threads time to act on their cancel + { + // count the number of cancelled thread that didn't have the time to act yet + int n = 0; + double t_now = 0.0; + MUTEX_LOCK( &U->selfdestruct_cs); + { + Lane* s = U->selfdestruct_first; + while( s != SELFDESTRUCT_END) + { + if( s->cancel_request == CANCEL_HARD) + ++ n; + s = s->selfdestruct_next; + } + } + MUTEX_UNLOCK( &U->selfdestruct_cs); + // if timeout elapsed, or we know all threads have acted, stop waiting + t_now = now_secs(); + if( n == 0 || (t_now >= t_until)) + { + DEBUGSPEW_CODE( fprintf( stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now))); + break; + } + } + } + } + + // If some lanes are currently cleaning after themselves, wait until they are done. + // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). + while( U->selfdestructing_count > 0) + { + YIELD(); + } + + //--- + // Kill the still free running threads + // + if( U->selfdestruct_first != SELFDESTRUCT_END) + { + unsigned int n = 0; + // first thing we did was to raise the linda signals the threads were waiting on (if any) + // therefore, any well-behaved thread should be in CANCELLED state + // these are not running, and the state can be closed + MUTEX_LOCK( &U->selfdestruct_cs); + { + Lane* s = U->selfdestruct_first; + while( s != SELFDESTRUCT_END) + { + Lane* next_s = s->selfdestruct_next; + s->selfdestruct_next = NULL; // detach from selfdestruct chain + if( !THREAD_ISNULL( s->thread)) // can be NULL if previous 'soft' termination succeeded + { + THREAD_KILL( &s->thread); #if THREADAPI == THREADAPI_PTHREAD - // pthread: make sure the thread is really stopped! - THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status); + // pthread: make sure the thread is really stopped! + THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status); #endif // THREADAPI == THREADAPI_PTHREAD - } - // NO lua_close() in this case because we don't know where execution of the state was interrupted - lane_cleanup( s); - s = next_s; - ++ n; - } - U->selfdestruct_first = SELFDESTRUCT_END; - } - MUTEX_UNLOCK( &U->selfdestruct_cs); - - DEBUGSPEW_CODE( fprintf( stderr, "Killed %d lane(s) at process end.\n", n)); - } - } - - // If some lanes are currently cleaning after themselves, wait until they are done. - // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). - while( U->selfdestructing_count > 0) - { - YIELD(); - } - - // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 - lua_settop( L, 0); - // no need to mutex-protect this as all threads in the universe are gone at that point - if( U->timer_deep != NULL) // test ins case some early internal error prevented Lanes from creating the deep timer - { - -- U->timer_deep->refcount; // should be 0 now - } - free_deep_prelude( L, (DeepPrelude*) U->timer_deep); - U->timer_deep = NULL; - - close_keepers( U, L); - - // remove the protected allocator, if any - cleanup_allocator_function( U, L); + } + // NO lua_close() in this case because we don't know where execution of the state was interrupted + lane_cleanup( s); + s = next_s; + ++ n; + } + U->selfdestruct_first = SELFDESTRUCT_END; + } + MUTEX_UNLOCK( &U->selfdestruct_cs); + + DEBUGSPEW_CODE( fprintf( stderr, "Killed %d lane(s) at process end.\n", n)); + } + } + + // If some lanes are currently cleaning after themselves, wait until they are done. + // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). + while( U->selfdestructing_count > 0) + { + YIELD(); + } + + // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 + lua_settop( L, 0); + // no need to mutex-protect this as all threads in the universe are gone at that point + if( U->timer_deep != NULL) // test ins case some early internal error prevented Lanes from creating the deep timer + { + -- U->timer_deep->refcount; // should be 0 now + free_deep_prelude( L, (DeepPrelude*) U->timer_deep); + U->timer_deep = NULL; + } + + close_keepers( U, L); + + // remove the protected allocator, if any + cleanup_allocator_function( U, L); #if HAVE_LANE_TRACKING - MUTEX_FREE( &U->tracking_cs); + MUTEX_FREE( &U->tracking_cs); #endif // HAVE_LANE_TRACKING - // Linked chains handling - MUTEX_FREE( &U->selfdestruct_cs); - MUTEX_FREE( &U->require_cs); - // Locks for 'tools.c' inc/dec counters - MUTEX_FREE( &U->deep_lock); - MUTEX_FREE( &U->mtid_lock); - // universe is no longer available (nor necessary) - // we need to do this in case some deep userdata objects were created before Lanes was initialized, - // as potentially they will be garbage collected after Lanes at application shutdown - universe_store( L, NULL); - return 0; + // Linked chains handling + MUTEX_FREE( &U->selfdestruct_cs); + MUTEX_FREE( &U->require_cs); + // Locks for 'tools.c' inc/dec counters + MUTEX_FREE( &U->deep_lock); + MUTEX_FREE( &U->mtid_lock); + // universe is no longer available (nor necessary) + // we need to do this in case some deep userdata objects were created before Lanes was initialized, + // as potentially they will be garbage collected after Lanes at application shutdown + universe_store( L, NULL); + return 0; } @@ -606,23 +606,23 @@ static int selfdestruct_gc( lua_State* L) // LUAG_FUNC( set_singlethreaded) { - uint_t cores = luaG_optunsigned( L, 1, 1); - (void) cores; // prevent "unused" warning + uint_t cores = luaG_optunsigned( L, 1, 1); + (void) cores; // prevent "unused" warning #ifdef PLATFORM_OSX #ifdef _UTILBINDTHREADTOCPU - if( cores > 1) - { - return luaL_error( L, "Limiting to N>1 cores not possible"); - } - // requires 'chudInitialize()' - utilBindThreadToCPU(0); // # of CPU to run on (we cannot limit to 2..N CPUs?) - return 0; + if( cores > 1) + { + return luaL_error( L, "Limiting to N>1 cores not possible"); + } + // requires 'chudInitialize()' + utilBindThreadToCPU(0); // # of CPU to run on (we cannot limit to 2..N CPUs?) + return 0; #else - return luaL_error( L, "Not available: compile with _UTILBINDTHREADTOCPU"); + return luaL_error( L, "Not available: compile with _UTILBINDTHREADTOCPU"); #endif #else - return luaL_error( L, "not implemented"); + return luaL_error( L, "not implemented"); #endif } @@ -650,190 +650,190 @@ static DECLARE_CONST_UNIQUE_KEY( EXTENDED_STACKTRACE_REGKEY, 0x2357c69a7c92c936) LUAG_FUNC( set_error_reporting) { - bool_t equal; - luaL_checktype( L, 1, LUA_TSTRING); - lua_pushliteral( L, "extended"); - equal = lua_rawequal( L, -1, 1); - lua_pop( L, 1); - if( equal) - { - goto done; - } - lua_pushliteral( L, "basic"); - equal = !lua_rawequal( L, -1, 1); - lua_pop( L, 1); - if( equal) - { - return luaL_error( L, "unsupported error reporting model"); - } + bool_t equal; + luaL_checktype( L, 1, LUA_TSTRING); + lua_pushliteral( L, "extended"); + equal = lua_rawequal( L, -1, 1); + lua_pop( L, 1); + if( equal) + { + goto done; + } + lua_pushliteral( L, "basic"); + equal = !lua_rawequal( L, -1, 1); + lua_pop( L, 1); + if( equal) + { + return luaL_error( L, "unsupported error reporting model"); + } done: - REGISTRY_SET( L, EXTENDED_STACKTRACE_REGKEY, lua_pushboolean( L, equal)); - return 0; + REGISTRY_SET( L, EXTENDED_STACKTRACE_REGKEY, lua_pushboolean( L, equal)); + return 0; } static int lane_error( lua_State* L) { - lua_Debug ar; - int n; - bool_t extended; - - // error message (any type) - STACK_CHECK_ABS( L, 1); // some_error - - // Don't do stack survey for cancelled lanes. - // - if( equal_unique_key( L, 1, CANCEL_ERROR)) - { - return 1; // just pass on - } - - STACK_GROW( L, 3); - REGISTRY_GET( L, EXTENDED_STACKTRACE_REGKEY); // some_error basic|extended - extended = lua_toboolean( L, -1); - lua_pop( L, 1); // some_error - - // Place stack trace at 'registry[lane_error]' for the 'lua_pcall()' - // caller to fetch. This bypasses the Lua 5.1 limitation of only one - // return value from error handler to 'lua_pcall()' caller. - - // It's adequate to push stack trace as a table. This gives the receiver - // of the stack best means to format it to their liking. Also, it allows - // us to add more stack info later, if needed. - // - // table of { "sourcefile.lua:", ... } - // - lua_newtable( L); // some_error {} - - // Best to start from level 1, but in some cases it might be a C function - // and we don't get '.currentline' for that. It's okay - just keep level - // and table index growing separate. --AKa 22-Jan-2009 - // - for( n = 1; lua_getstack( L, n, &ar); ++ n) - { - lua_getinfo( L, extended ? "Sln" : "Sl", &ar); - if( extended) - { - lua_newtable( L); // some_error {} {} - - lua_pushstring( L, ar.source); // some_error {} {} source - lua_setfield( L, -2, "source"); // some_error {} {} - - lua_pushinteger( L, ar.currentline); // some_error {} {} currentline - lua_setfield( L, -2, "currentline"); // some_error {} {} - - lua_pushstring( L, ar.name); // some_error {} {} name - lua_setfield( L, -2, "name"); // some_error {} {} - - lua_pushstring( L, ar.namewhat); // some_error {} {} namewhat - lua_setfield( L, -2, "namewhat"); // some_error {} {} - - lua_pushstring( L, ar.what); // some_error {} {} what - lua_setfield( L, -2, "what"); // some_error {} {} - } - else if( ar.currentline > 0) - { - lua_pushfstring( L, "%s:%d", ar.short_src, ar.currentline); // some_error {} "blah:blah" - } - else - { - lua_pushfstring( L, "%s:?", ar.short_src); // some_error {} "blah" - } - lua_rawseti( L, -2, (lua_Integer) n); // some_error {} - } - - REGISTRY_SET( L, STACKTRACE_REGKEY, lua_insert( L, -2)); // some_error - - STACK_END( L, 1); - return 1; // the untouched error value + lua_Debug ar; + int n; + bool_t extended; + + // error message (any type) + STACK_CHECK_ABS( L, 1); // some_error + + // Don't do stack survey for cancelled lanes. + // + if( equal_unique_key( L, 1, CANCEL_ERROR)) + { + return 1; // just pass on + } + + STACK_GROW( L, 3); + REGISTRY_GET( L, EXTENDED_STACKTRACE_REGKEY); // some_error basic|extended + extended = lua_toboolean( L, -1); + lua_pop( L, 1); // some_error + + // Place stack trace at 'registry[lane_error]' for the 'lua_pcall()' + // caller to fetch. This bypasses the Lua 5.1 limitation of only one + // return value from error handler to 'lua_pcall()' caller. + + // It's adequate to push stack trace as a table. This gives the receiver + // of the stack best means to format it to their liking. Also, it allows + // us to add more stack info later, if needed. + // + // table of { "sourcefile.lua:", ... } + // + lua_newtable( L); // some_error {} + + // Best to start from level 1, but in some cases it might be a C function + // and we don't get '.currentline' for that. It's okay - just keep level + // and table index growing separate. --AKa 22-Jan-2009 + // + for( n = 1; lua_getstack( L, n, &ar); ++ n) + { + lua_getinfo( L, extended ? "Sln" : "Sl", &ar); + if( extended) + { + lua_newtable( L); // some_error {} {} + + lua_pushstring( L, ar.source); // some_error {} {} source + lua_setfield( L, -2, "source"); // some_error {} {} + + lua_pushinteger( L, ar.currentline); // some_error {} {} currentline + lua_setfield( L, -2, "currentline"); // some_error {} {} + + lua_pushstring( L, ar.name); // some_error {} {} name + lua_setfield( L, -2, "name"); // some_error {} {} + + lua_pushstring( L, ar.namewhat); // some_error {} {} namewhat + lua_setfield( L, -2, "namewhat"); // some_error {} {} + + lua_pushstring( L, ar.what); // some_error {} {} what + lua_setfield( L, -2, "what"); // some_error {} {} + } + else if( ar.currentline > 0) + { + lua_pushfstring( L, "%s:%d", ar.short_src, ar.currentline); // some_error {} "blah:blah" + } + else + { + lua_pushfstring( L, "%s:?", ar.short_src); // some_error {} "blah" + } + lua_rawseti( L, -2, (lua_Integer) n); // some_error {} + } + + REGISTRY_SET( L, STACKTRACE_REGKEY, lua_insert( L, -2)); // some_error + + STACK_END( L, 1); + return 1; // the untouched error value } #endif // ERROR_FULL_STACK static void push_stack_trace( lua_State* L, int rc_, int stk_base_) { - // Lua 5.1 error handler is limited to one return value; it stored the stack trace in the registry - switch( rc_) - { - case LUA_OK: // no error, body return values are on the stack - break; + // Lua 5.1 error handler is limited to one return value; it stored the stack trace in the registry + switch( rc_) + { + case LUA_OK: // no error, body return values are on the stack + break; - case LUA_ERRRUN: // cancellation or a runtime error + case LUA_ERRRUN: // cancellation or a runtime error #if ERROR_FULL_STACK // when ERROR_FULL_STACK, we installed a handler - { - STACK_CHECK( L, 0); - // fetch the call stack table from the registry where the handler stored it - STACK_GROW( L, 1); - // yields nil if no stack was generated (in case of cancellation for example) - REGISTRY_GET( L, STACKTRACE_REGKEY); // err trace|nil - STACK_END( L, 1); - - // For cancellation the error message is CANCEL_ERROR, and a stack trace isn't placed - // For other errors, the message can be whatever was thrown, and we should have a stack trace table - ASSERT_L( lua_type( L, 1 + stk_base_) == (equal_unique_key( L, stk_base_, CANCEL_ERROR) ? LUA_TNIL : LUA_TTABLE)); - // Just leaving the stack trace table on the stack is enough to get it through to the master. - break; - } + { + STACK_CHECK( L, 0); + // fetch the call stack table from the registry where the handler stored it + STACK_GROW( L, 1); + // yields nil if no stack was generated (in case of cancellation for example) + REGISTRY_GET( L, STACKTRACE_REGKEY); // err trace|nil + STACK_END( L, 1); + + // For cancellation the error message is CANCEL_ERROR, and a stack trace isn't placed + // For other errors, the message can be whatever was thrown, and we should have a stack trace table + ASSERT_L( lua_type( L, 1 + stk_base_) == (equal_unique_key( L, stk_base_, CANCEL_ERROR) ? LUA_TNIL : LUA_TTABLE)); + // Just leaving the stack trace table on the stack is enough to get it through to the master. + break; + } #endif // fall through if not ERROR_FULL_STACK - case LUA_ERRMEM: // memory allocation error (handler not called) - case LUA_ERRERR: // error while running the error handler (if any, for example an out-of-memory condition) - default: - // we should have a single value which is either a string (the error message) or CANCEL_ERROR - ASSERT_L( (lua_gettop( L) == stk_base_) && ((lua_type( L, stk_base_) == LUA_TSTRING) || equal_unique_key( L, stk_base_, CANCEL_ERROR))); - break; - } + case LUA_ERRMEM: // memory allocation error (handler not called) + case LUA_ERRERR: // error while running the error handler (if any, for example an out-of-memory condition) + default: + // we should have a single value which is either a string (the error message) or CANCEL_ERROR + ASSERT_L( (lua_gettop( L) == stk_base_) && ((lua_type( L, stk_base_) == LUA_TSTRING) || equal_unique_key( L, stk_base_, CANCEL_ERROR))); + break; + } } LUAG_FUNC( set_debug_threadname) { - DECLARE_CONST_UNIQUE_KEY( hidden_regkey, LG_set_debug_threadname); - // C s_lane structure is a light userdata upvalue - Lane* s = lua_touserdata( L, lua_upvalueindex( 1)); - luaL_checktype( L, -1, LUA_TSTRING); // "name" - lua_settop( L, 1); - STACK_CHECK_ABS( L, 1); - // store a hidden reference in the registry to make sure the string is kept around even if a lane decides to manually change the "decoda_name" global... - REGISTRY_SET( L, hidden_regkey, lua_pushvalue( L, -2)); - STACK_MID( L, 1); - s->debug_name = lua_tostring( L, -1); - // keep a direct pointer on the string - THREAD_SETNAME( s->debug_name); - // to see VM name in Decoda debugger Virtual Machine window - lua_setglobal( L, "decoda_name"); // - STACK_END( L, 0); - return 0; + DECLARE_CONST_UNIQUE_KEY( hidden_regkey, LG_set_debug_threadname); + // C s_lane structure is a light userdata upvalue + Lane* s = lua_touserdata( L, lua_upvalueindex( 1)); + luaL_checktype( L, -1, LUA_TSTRING); // "name" + lua_settop( L, 1); + STACK_CHECK_ABS( L, 1); + // store a hidden reference in the registry to make sure the string is kept around even if a lane decides to manually change the "decoda_name" global... + REGISTRY_SET( L, hidden_regkey, lua_pushvalue( L, -2)); + STACK_MID( L, 1); + s->debug_name = lua_tostring( L, -1); + // keep a direct pointer on the string + THREAD_SETNAME( s->debug_name); + // to see VM name in Decoda debugger Virtual Machine window + lua_setglobal( L, "decoda_name"); // + STACK_END( L, 0); + return 0; } LUAG_FUNC( get_debug_threadname) { - Lane* const s = lua_toLane( L, 1); - luaL_argcheck( L, lua_gettop( L) == 1, 2, "too many arguments"); - lua_pushstring( L, s->debug_name); - return 1; + Lane* const s = lua_toLane( L, 1); + luaL_argcheck( L, lua_gettop( L) == 1, 2, "too many arguments"); + lua_pushstring( L, s->debug_name); + return 1; } LUAG_FUNC( set_thread_priority) { - int const prio = (int) luaL_checkinteger( L, 1); - // public Lanes API accepts a generic range -3/+3 - // that will be remapped into the platform-specific scheduler priority scheme - // On some platforms, -3 is equivalent to -2 and +3 to +2 - if( prio < THREAD_PRIO_MIN || prio > THREAD_PRIO_MAX) - { - return luaL_error( L, "priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio); - } - THREAD_SET_PRIORITY( prio); - return 0; + int const prio = (int) luaL_checkinteger( L, 1); + // public Lanes API accepts a generic range -3/+3 + // that will be remapped into the platform-specific scheduler priority scheme + // On some platforms, -3 is equivalent to -2 and +3 to +2 + if( prio < THREAD_PRIO_MIN || prio > THREAD_PRIO_MAX) + { + return luaL_error( L, "priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio); + } + THREAD_SET_PRIORITY( prio); + return 0; } LUAG_FUNC( set_thread_affinity) { - lua_Integer affinity = luaL_checkinteger( L, 1); - if( affinity <= 0) - { - return luaL_error( L, "invalid affinity (%d)", affinity); - } - THREAD_SET_AFFINITY( (unsigned int) affinity); - return 0; + lua_Integer affinity = luaL_checkinteger( L, 1); + if( affinity <= 0) + { + return luaL_error( L, "invalid affinity (%d)", affinity); + } + THREAD_SET_AFFINITY( (unsigned int) affinity); + return 0; } #if USE_DEBUG_SPEW @@ -841,141 +841,141 @@ LUAG_FUNC( set_thread_affinity) // LUA_ERRERR doesn't have the same value struct errcode_name { - int code; - char const* name; + int code; + char const* name; }; static struct errcode_name s_errcodes[] = { - { LUA_OK, "LUA_OK"}, - { LUA_YIELD, "LUA_YIELD"}, - { LUA_ERRRUN, "LUA_ERRRUN"}, - { LUA_ERRSYNTAX, "LUA_ERRSYNTAX"}, - { LUA_ERRMEM, "LUA_ERRMEM"}, - { LUA_ERRGCMM, "LUA_ERRGCMM"}, - { LUA_ERRERR, "LUA_ERRERR"}, + { LUA_OK, "LUA_OK"}, + { LUA_YIELD, "LUA_YIELD"}, + { LUA_ERRRUN, "LUA_ERRRUN"}, + { LUA_ERRSYNTAX, "LUA_ERRSYNTAX"}, + { LUA_ERRMEM, "LUA_ERRMEM"}, + { LUA_ERRGCMM, "LUA_ERRGCMM"}, + { LUA_ERRERR, "LUA_ERRERR"}, }; static char const* get_errcode_name( int _code) { - int i; - for( i = 0; i < 7; ++ i) - { - if( s_errcodes[i].code == _code) - { - return s_errcodes[i].name; - } - } - return ""; + int i; + for( i = 0; i < 7; ++ i) + { + if( s_errcodes[i].code == _code) + { + return s_errcodes[i].name; + } + } + return ""; } #endif // USE_DEBUG_SPEW #if THREADWAIT_METHOD == THREADWAIT_CONDVAR // implies THREADAPI == THREADAPI_PTHREAD static void thread_cleanup_handler( void* opaque) { - Lane* s= (Lane*) opaque; - MUTEX_LOCK( &s->done_lock); - s->status = CANCELLED; - SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on) - MUTEX_UNLOCK( &s->done_lock); + Lane* s= (Lane*) opaque; + MUTEX_LOCK( &s->done_lock); + s->status = CANCELLED; + SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on) + MUTEX_UNLOCK( &s->done_lock); } #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR static THREAD_RETURN_T THREAD_CALLCONV lane_main( void* vs) { - Lane* s = (Lane*) vs; - int rc, rc2; - lua_State* L = s->L; - // Called with the lane function and arguments on the stack - int const nargs = lua_gettop( L) - 1; - DEBUGSPEW_CODE( Universe* U = universe_get( L)); - THREAD_MAKE_ASYNCH_CANCELLABLE(); - THREAD_CLEANUP_PUSH( thread_cleanup_handler, s); - s->status = RUNNING; // PENDING -> RUNNING - - // Tie "set_finalizer()" to the state - lua_pushcfunction( L, LG_set_finalizer); - populate_func_lookup_table( L, -1, "set_finalizer"); - lua_setglobal( L, "set_finalizer"); - - // Tie "set_debug_threadname()" to the state - // But don't register it in the lookup database because of the s_lane pointer upvalue - lua_pushlightuserdata( L, s); - lua_pushcclosure( L, LG_set_debug_threadname, 1); - lua_setglobal( L, "set_debug_threadname"); - - // Tie "cancel_test()" to the state - lua_pushcfunction( L, LG_cancel_test); - populate_func_lookup_table( L, -1, "cancel_test"); - lua_setglobal( L, "cancel_test"); - - // this could be done in lane_new before the lane body function is pushed on the stack to avoid unnecessary stack slot shifting around + Lane* s = (Lane*) vs; + int rc, rc2; + lua_State* L = s->L; + // Called with the lane function and arguments on the stack + int const nargs = lua_gettop( L) - 1; + DEBUGSPEW_CODE( Universe* U = universe_get( L)); + THREAD_MAKE_ASYNCH_CANCELLABLE(); + THREAD_CLEANUP_PUSH( thread_cleanup_handler, s); + s->status = RUNNING; // PENDING -> RUNNING + + // Tie "set_finalizer()" to the state + lua_pushcfunction( L, LG_set_finalizer); + populate_func_lookup_table( L, -1, "set_finalizer"); + lua_setglobal( L, "set_finalizer"); + + // Tie "set_debug_threadname()" to the state + // But don't register it in the lookup database because of the s_lane pointer upvalue + lua_pushlightuserdata( L, s); + lua_pushcclosure( L, LG_set_debug_threadname, 1); + lua_setglobal( L, "set_debug_threadname"); + + // Tie "cancel_test()" to the state + lua_pushcfunction( L, LG_cancel_test); + populate_func_lookup_table( L, -1, "cancel_test"); + lua_setglobal( L, "cancel_test"); + + // this could be done in lane_new before the lane body function is pushed on the stack to avoid unnecessary stack slot shifting around #if ERROR_FULL_STACK - // Tie "set_error_reporting()" to the state - lua_pushcfunction( L, LG_set_error_reporting); - populate_func_lookup_table( L, -1, "set_error_reporting"); - lua_setglobal( L, "set_error_reporting"); - - STACK_GROW( L, 1); - lua_pushcfunction( L, lane_error); // func args handler - lua_insert( L, 1); // handler func args + // Tie "set_error_reporting()" to the state + lua_pushcfunction( L, LG_set_error_reporting); + populate_func_lookup_table( L, -1, "set_error_reporting"); + lua_setglobal( L, "set_error_reporting"); + + STACK_GROW( L, 1); + lua_pushcfunction( L, lane_error); // func args handler + lua_insert( L, 1); // handler func args #endif // ERROR_FULL_STACK - rc = lua_pcall( L, nargs, LUA_MULTRET, ERROR_FULL_STACK); // retvals|err + rc = lua_pcall( L, nargs, LUA_MULTRET, ERROR_FULL_STACK); // retvals|err #if ERROR_FULL_STACK - lua_remove( L, 1); // retvals|error + lua_remove( L, 1); // retvals|error # endif // ERROR_FULL_STACK - // in case of error and if it exists, fetch stack trace from registry and push it - push_stack_trace( L, rc, 1); // retvals|error [trace] - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p body: %s (%s)\n" INDENT_END, L, get_errcode_name( rc), equal_unique_key( L, 1, CANCEL_ERROR) ? "cancelled" : lua_typename( L, lua_type( L, 1)))); - //STACK_DUMP(L); - // Call finalizers, if the script has set them up. - // - rc2 = run_finalizers( L, rc); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p finalizer: %s\n" INDENT_END, L, get_errcode_name( rc2))); - if( rc2 != LUA_OK) // Error within a finalizer! - { - // the finalizer generated an error, and left its own error message [and stack trace] on the stack - rc = rc2; // we're overruling the earlier script error or normal return - } - s->waiting_on = NULL; // just in case - if( selfdestruct_remove( s)) // check and remove (under lock!) - { - // We're a free-running thread and no-one's there to clean us up. - // - lua_close( s->L); - - MUTEX_LOCK( &s->U->selfdestruct_cs); - // done with lua_close(), terminal shutdown sequence may proceed - -- s->U->selfdestructing_count; - MUTEX_UNLOCK( &s->U->selfdestruct_cs); - - lane_cleanup( s); // s is freed at this point - } - else - { - // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them - - enum e_status st = (rc == 0) ? DONE : equal_unique_key( L, 1, CANCEL_ERROR) ? CANCELLED : ERROR_ST; - - // Posix no PTHREAD_TIMEDJOIN: - // 'done_lock' protects the -> DONE|ERROR_ST|CANCELLED state change - // + // in case of error and if it exists, fetch stack trace from registry and push it + push_stack_trace( L, rc, 1); // retvals|error [trace] + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p body: %s (%s)\n" INDENT_END, L, get_errcode_name( rc), equal_unique_key( L, 1, CANCEL_ERROR) ? "cancelled" : lua_typename( L, lua_type( L, 1)))); + //STACK_DUMP(L); + // Call finalizers, if the script has set them up. + // + rc2 = run_finalizers( L, rc); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p finalizer: %s\n" INDENT_END, L, get_errcode_name( rc2))); + if( rc2 != LUA_OK) // Error within a finalizer! + { + // the finalizer generated an error, and left its own error message [and stack trace] on the stack + rc = rc2; // we're overruling the earlier script error or normal return + } + s->waiting_on = NULL; // just in case + if( selfdestruct_remove( s)) // check and remove (under lock!) + { + // We're a free-running thread and no-one's there to clean us up. + // + lua_close( s->L); + + MUTEX_LOCK( &s->U->selfdestruct_cs); + // done with lua_close(), terminal shutdown sequence may proceed + -- s->U->selfdestructing_count; + MUTEX_UNLOCK( &s->U->selfdestruct_cs); + + lane_cleanup( s); // s is freed at this point + } + else + { + // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them + + enum e_status st = (rc == 0) ? DONE : equal_unique_key( L, 1, CANCEL_ERROR) ? CANCELLED : ERROR_ST; + + // Posix no PTHREAD_TIMEDJOIN: + // 'done_lock' protects the -> DONE|ERROR_ST|CANCELLED state change + // #if THREADWAIT_METHOD == THREADWAIT_CONDVAR - MUTEX_LOCK( &s->done_lock); - { + MUTEX_LOCK( &s->done_lock); + { #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR - s->status = st; + s->status = st; #if THREADWAIT_METHOD == THREADWAIT_CONDVAR - SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on) - } - MUTEX_UNLOCK( &s->done_lock); + SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on) + } + MUTEX_UNLOCK( &s->done_lock); #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR - } - THREAD_CLEANUP_POP( FALSE); - return 0; // ignored + } + THREAD_CLEANUP_POP( FALSE); + return 0; // ignored } // --- If a client wants to transfer stuff of a given module from the current state to another Lane, the module must be required @@ -984,20 +984,20 @@ static THREAD_RETURN_T THREAD_CALLCONV lane_main( void* vs) // upvalue[1]: _G.require LUAG_FUNC( require) { - char const* name = lua_tostring( L, 1); - int const nargs = lua_gettop( L); - DEBUGSPEW_CODE( Universe* U = universe_get( L)); - STACK_CHECK( L, 0); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - lua_pushvalue( L, lua_upvalueindex(1)); // "name" require - lua_insert( L, 1); // require "name" - lua_call( L, nargs, 1); // module - populate_func_lookup_table( L, -1, name); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name)); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - STACK_END( L, 0); - return 1; + char const* name = lua_tostring( L, 1); + int const nargs = lua_gettop( L); + DEBUGSPEW_CODE( Universe* U = universe_get( L)); + STACK_CHECK( L, 0); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + lua_pushvalue( L, lua_upvalueindex(1)); // "name" require + lua_insert( L, 1); // require "name" + lua_call( L, nargs, 1); // module + populate_func_lookup_table( L, -1, name); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name)); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + STACK_END( L, 0); + return 1; } @@ -1006,20 +1006,20 @@ LUAG_FUNC( require) // lanes.register( "modname", module) LUAG_FUNC( register) { - char const* name = luaL_checkstring( L, 1); - int const mod_type = lua_type( L, 2); - // ignore extra parameters, just in case - lua_settop( L, 2); - luaL_argcheck( L, (mod_type == LUA_TTABLE) || (mod_type == LUA_TFUNCTION), 2, "unexpected module type"); - DEBUGSPEW_CODE( Universe* U = universe_get( L)); - STACK_CHECK( L, 0); // "name" mod_table - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.register %s BEGIN\n" INDENT_END, name)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - populate_func_lookup_table( L, -1, name); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.register %s END\n" INDENT_END, name)); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - STACK_END( L, 0); - return 0; + char const* name = luaL_checkstring( L, 1); + int const mod_type = lua_type( L, 2); + // ignore extra parameters, just in case + lua_settop( L, 2); + luaL_argcheck( L, (mod_type == LUA_TTABLE) || (mod_type == LUA_TFUNCTION), 2, "unexpected module type"); + DEBUGSPEW_CODE( Universe* U = universe_get( L)); + STACK_CHECK( L, 0); // "name" mod_table + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.register %s BEGIN\n" INDENT_END, name)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + populate_func_lookup_table( L, -1, name); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.register %s END\n" INDENT_END, name)); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + STACK_END( L, 0); + return 0; } // crc64/we of string "GCCB_KEY" generated at http://www.nitrxgen.net/hashgen/ @@ -1039,247 +1039,247 @@ static DECLARE_CONST_UNIQUE_KEY( GCCB_KEY, 0xcfb1f046ef074e88); // LUAG_FUNC( lane_new) { - lua_State* L2; - Lane* s; - Lane** ud; + lua_State* L2; + Lane* s; + Lane** ud; - char const* libs_str = lua_tostring( L, 2); - int const priority = (int) luaL_optinteger( L, 3, 0); - uint_t globals_idx = lua_isnoneornil( L, 4) ? 0 : 4; - uint_t package_idx = lua_isnoneornil( L, 5) ? 0 : 5; - uint_t required_idx = lua_isnoneornil( L, 6) ? 0 : 6; - uint_t gc_cb_idx = lua_isnoneornil( L, 7) ? 0 : 7; + char const* libs_str = lua_tostring( L, 2); + int const priority = (int) luaL_optinteger( L, 3, 0); + uint_t globals_idx = lua_isnoneornil( L, 4) ? 0 : 4; + uint_t package_idx = lua_isnoneornil( L, 5) ? 0 : 5; + uint_t required_idx = lua_isnoneornil( L, 6) ? 0 : 6; + uint_t gc_cb_idx = lua_isnoneornil( L, 7) ? 0 : 7; #define FIXED_ARGS 7 - int const nargs = lua_gettop(L) - FIXED_ARGS; - Universe* U = universe_get( L); - ASSERT_L( nargs >= 0); - - // public Lanes API accepts a generic range -3/+3 - // that will be remapped into the platform-specific scheduler priority scheme - // On some platforms, -3 is equivalent to -2 and +3 to +2 - if( priority < THREAD_PRIO_MIN || priority > THREAD_PRIO_MAX) - { - return luaL_error( L, "Priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, priority); - } - - /* --- Create and prepare the sub state --- */ - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: setup\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - - // populate with selected libraries at the same time - L2 = luaG_newstate( U, L, libs_str); // L // L2 - - STACK_GROW( L2, nargs + 3); // - STACK_CHECK( L2, 0); - - STACK_GROW( L, 3); // func libs priority globals package required gc_cb [... args ...] - STACK_CHECK( L, 0); - - // give a default "Lua" name to the thread to see VM name in Decoda debugger - lua_pushfstring( L2, "Lane #%p", L2); // "..." - lua_setglobal( L2, "decoda_name"); // - ASSERT_L( lua_gettop( L2) == 0); - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: update 'package'\n" INDENT_END)); - // package - if( package_idx != 0) - { - // when copying with mode eLM_LaneBody, should raise an error in case of problem, not leave it one the stack - (void) luaG_inter_copy_package( U, L, L2, package_idx, eLM_LaneBody); - } - - // modules to require in the target lane *before* the function is transfered! - - if( required_idx != 0) - { - int nbRequired = 1; - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require 'required' list\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - // should not happen, was checked in lanes.lua before calling lane_new() - if( lua_type( L, required_idx) != LUA_TTABLE) - { - return luaL_error( L, "expected required module list as a table, got %s", luaL_typename( L, required_idx)); - } - - lua_pushnil( L); // func libs priority globals package required gc_cb [... args ...] nil - while( lua_next( L, required_idx) != 0) // func libs priority globals package required gc_cb [... args ...] n "modname" - { - if( lua_type( L, -1) != LUA_TSTRING || lua_type( L, -2) != LUA_TNUMBER || lua_tonumber( L, -2) != nbRequired) - { - return luaL_error( L, "required module list should be a list of strings"); - } - else - { - // require the module in the target state, and populate the lookup table there too - size_t len; - char const* name = lua_tolstring( L, -1, &len); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require '%s'\n" INDENT_END, name)); - - // require the module in the target lane - lua_getglobal( L2, "require"); // require()? - if( lua_isnil( L2, -1)) - { - lua_pop( L2, 1); // - luaL_error( L, "cannot pre-require modules without loading 'package' library first"); - } - else - { - lua_pushlstring( L2, name, len); // require() name - if( lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode - { - // propagate error to main state if any - luaG_inter_move( U, L2, L, 1, eLM_LaneBody); // func libs priority globals package required gc_cb [... args ...] n "modname" error - return lua_error( L); - } - // after requiring the module, register the functions it exported in our name<->function database - populate_func_lookup_table( L2, -1, name); - lua_pop( L2, 1); // - } - } - lua_pop( L, 1); // func libs priority globals package required gc_cb [... args ...] n - ++ nbRequired; - } // func libs priority globals package required gc_cb [... args ...] - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - } - STACK_MID( L, 0); - STACK_MID( L2, 0); // - - // Appending the specified globals to the global environment - // *after* stdlibs have been loaded and modules required, in case we transfer references to native functions they exposed... - // - if( globals_idx != 0) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer globals\n" INDENT_END)); - if( !lua_istable( L, globals_idx)) - { - return luaL_error( L, "Expected table, got %s", luaL_typename( L, globals_idx)); - } - - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - lua_pushnil( L); // func libs priority globals package required gc_cb [... args ...] nil - // Lua 5.2 wants us to push the globals table on the stack - lua_pushglobaltable( L2); // _G - while( lua_next( L, globals_idx)) // func libs priority globals package required gc_cb [... args ...] k v - { - luaG_inter_copy( U, L, L2, 2, eLM_LaneBody); // _G k v - // assign it in L2's globals table - lua_rawset( L2, -3); // _G - lua_pop( L, 1); // func libs priority globals package required gc_cb [... args ...] k - } // func libs priority globals package required gc_cb [... args ...] - lua_pop( L2, 1); // - - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - } - STACK_MID( L, 0); - STACK_MID( L2, 0); - - // Lane main function - if( lua_type( L, 1) == LUA_TFUNCTION) - { - int res; - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane body\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - lua_pushvalue( L, 1); // func libs priority globals package required gc_cb [... args ...] func - res = luaG_inter_move( U, L, L2, 1, eLM_LaneBody); // func libs priority globals package required gc_cb [... args ...] // func - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - if( res != 0) - { - return luaL_error( L, "tried to copy unsupported types"); - } - } - else if( lua_type( L, 1) == LUA_TSTRING) - { - // compile the string - if( luaL_loadstring( L2, lua_tostring( L, 1)) != 0) // func - { - return luaL_error( L, "error when parsing lane function code"); - } - } - STACK_MID( L, 0); - STACK_MID( L2, 1); - ASSERT_L( lua_isfunction( L2, 1)); - - // revive arguments - if( nargs > 0) - { - int res; - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane arguments\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - res = luaG_inter_move( U, L, L2, nargs, eLM_LaneBody); // func libs priority globals package required gc_cb // func [... args ...] - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - if( res != 0) - { - return luaL_error( L, "tried to copy unsupported types"); - } - } - STACK_END( L, -nargs); - ASSERT_L( lua_gettop( L) == FIXED_ARGS); - STACK_CHECK( L, 0); - STACK_MID( L2, 1 + nargs); - - // 's' is allocated from heap, not Lua, since its life span may surpass the handle's (if free running thread) - // - // a Lane full userdata needs a single uservalue - ud = lua_newuserdatauv( L, sizeof( Lane*), 1); // func libs priority globals package required gc_cb lane - s = *ud = (Lane*) malloc( sizeof( Lane)); - if( s == NULL) - { - return luaL_error( L, "could not create lane: out of memory"); - } - - s->L = L2; - s->U = U; - s->status = PENDING; - s->waiting_on = NULL; - s->debug_name = ""; - s->cancel_request = CANCEL_NONE; + int const nargs = lua_gettop(L) - FIXED_ARGS; + Universe* U = universe_get( L); + ASSERT_L( nargs >= 0); + + // public Lanes API accepts a generic range -3/+3 + // that will be remapped into the platform-specific scheduler priority scheme + // On some platforms, -3 is equivalent to -2 and +3 to +2 + if( priority < THREAD_PRIO_MIN || priority > THREAD_PRIO_MAX) + { + return luaL_error( L, "Priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, priority); + } + + /* --- Create and prepare the sub state --- */ + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: setup\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + + // populate with selected libraries at the same time + L2 = luaG_newstate( U, L, libs_str); // L // L2 + + STACK_GROW( L2, nargs + 3); // + STACK_CHECK( L2, 0); + + STACK_GROW( L, 3); // func libs priority globals package required gc_cb [... args ...] + STACK_CHECK( L, 0); + + // give a default "Lua" name to the thread to see VM name in Decoda debugger + lua_pushfstring( L2, "Lane #%p", L2); // "..." + lua_setglobal( L2, "decoda_name"); // + ASSERT_L( lua_gettop( L2) == 0); + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: update 'package'\n" INDENT_END)); + // package + if( package_idx != 0) + { + // when copying with mode eLM_LaneBody, should raise an error in case of problem, not leave it one the stack + (void) luaG_inter_copy_package( U, L, L2, package_idx, eLM_LaneBody); + } + + // modules to require in the target lane *before* the function is transfered! + + if( required_idx != 0) + { + int nbRequired = 1; + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require 'required' list\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + // should not happen, was checked in lanes.lua before calling lane_new() + if( lua_type( L, required_idx) != LUA_TTABLE) + { + return luaL_error( L, "expected required module list as a table, got %s", luaL_typename( L, required_idx)); + } + + lua_pushnil( L); // func libs priority globals package required gc_cb [... args ...] nil + while( lua_next( L, required_idx) != 0) // func libs priority globals package required gc_cb [... args ...] n "modname" + { + if( lua_type( L, -1) != LUA_TSTRING || lua_type( L, -2) != LUA_TNUMBER || lua_tonumber( L, -2) != nbRequired) + { + return luaL_error( L, "required module list should be a list of strings"); + } + else + { + // require the module in the target state, and populate the lookup table there too + size_t len; + char const* name = lua_tolstring( L, -1, &len); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require '%s'\n" INDENT_END, name)); + + // require the module in the target lane + lua_getglobal( L2, "require"); // require()? + if( lua_isnil( L2, -1)) + { + lua_pop( L2, 1); // + luaL_error( L, "cannot pre-require modules without loading 'package' library first"); + } + else + { + lua_pushlstring( L2, name, len); // require() name + if( lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode + { + // propagate error to main state if any + luaG_inter_move( U, L2, L, 1, eLM_LaneBody); // func libs priority globals package required gc_cb [... args ...] n "modname" error + return lua_error( L); + } + // after requiring the module, register the functions it exported in our name<->function database + populate_func_lookup_table( L2, -1, name); + lua_pop( L2, 1); // + } + } + lua_pop( L, 1); // func libs priority globals package required gc_cb [... args ...] n + ++ nbRequired; + } // func libs priority globals package required gc_cb [... args ...] + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + } + STACK_MID( L, 0); + STACK_MID( L2, 0); // + + // Appending the specified globals to the global environment + // *after* stdlibs have been loaded and modules required, in case we transfer references to native functions they exposed... + // + if( globals_idx != 0) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer globals\n" INDENT_END)); + if( !lua_istable( L, globals_idx)) + { + return luaL_error( L, "Expected table, got %s", luaL_typename( L, globals_idx)); + } + + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + lua_pushnil( L); // func libs priority globals package required gc_cb [... args ...] nil + // Lua 5.2 wants us to push the globals table on the stack + lua_pushglobaltable( L2); // _G + while( lua_next( L, globals_idx)) // func libs priority globals package required gc_cb [... args ...] k v + { + luaG_inter_copy( U, L, L2, 2, eLM_LaneBody); // _G k v + // assign it in L2's globals table + lua_rawset( L2, -3); // _G + lua_pop( L, 1); // func libs priority globals package required gc_cb [... args ...] k + } // func libs priority globals package required gc_cb [... args ...] + lua_pop( L2, 1); // + + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + } + STACK_MID( L, 0); + STACK_MID( L2, 0); + + // Lane main function + if( lua_type( L, 1) == LUA_TFUNCTION) + { + int res; + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane body\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + lua_pushvalue( L, 1); // func libs priority globals package required gc_cb [... args ...] func + res = luaG_inter_move( U, L, L2, 1, eLM_LaneBody); // func libs priority globals package required gc_cb [... args ...] // func + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + if( res != 0) + { + return luaL_error( L, "tried to copy unsupported types"); + } + } + else if( lua_type( L, 1) == LUA_TSTRING) + { + // compile the string + if( luaL_loadstring( L2, lua_tostring( L, 1)) != 0) // func + { + return luaL_error( L, "error when parsing lane function code"); + } + } + STACK_MID( L, 0); + STACK_MID( L2, 1); + ASSERT_L( lua_isfunction( L2, 1)); + + // revive arguments + if( nargs > 0) + { + int res; + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane arguments\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + res = luaG_inter_move( U, L, L2, nargs, eLM_LaneBody); // func libs priority globals package required gc_cb // func [... args ...] + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + if( res != 0) + { + return luaL_error( L, "tried to copy unsupported types"); + } + } + STACK_END( L, -nargs); + ASSERT_L( lua_gettop( L) == FIXED_ARGS); + STACK_CHECK( L, 0); + STACK_MID( L2, 1 + nargs); + + // 's' is allocated from heap, not Lua, since its life span may surpass the handle's (if free running thread) + // + // a Lane full userdata needs a single uservalue + ud = lua_newuserdatauv( L, sizeof( Lane*), 1); // func libs priority globals package required gc_cb lane + s = *ud = (Lane*) malloc( sizeof( Lane)); + if( s == NULL) + { + return luaL_error( L, "could not create lane: out of memory"); + } + + s->L = L2; + s->U = U; + s->status = PENDING; + s->waiting_on = NULL; + s->debug_name = ""; + s->cancel_request = CANCEL_NONE; #if THREADWAIT_METHOD == THREADWAIT_CONDVAR - MUTEX_INIT( &s->done_lock); - SIGNAL_INIT( &s->done_signal); + MUTEX_INIT( &s->done_lock); + SIGNAL_INIT( &s->done_signal); #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR - s->mstatus = NORMAL; - s->selfdestruct_next = NULL; + s->mstatus = NORMAL; + s->selfdestruct_next = NULL; #if HAVE_LANE_TRACKING - s->tracking_next = NULL; - if( s->U->tracking_first) - { - tracking_add( s); - } + s->tracking_next = NULL; + if( s->U->tracking_first) + { + tracking_add( s); + } #endif // HAVE_LANE_TRACKING - // Set metatable for the userdata - // - lua_pushvalue( L, lua_upvalueindex( 1)); // func libs priority globals package required gc_cb lane mt - lua_setmetatable( L, -2); // func libs priority globals package required gc_cb lane - STACK_MID( L, 1); + // Set metatable for the userdata + // + lua_pushvalue( L, lua_upvalueindex( 1)); // func libs priority globals package required gc_cb lane mt + lua_setmetatable( L, -2); // func libs priority globals package required gc_cb lane + STACK_MID( L, 1); - // Create uservalue for the userdata - // (this is where lane body return values will be stored when the handle is indexed by a numeric key) - lua_newtable( L); // func libs cancelstep priority globals package required gc_cb lane uv + // Create uservalue for the userdata + // (this is where lane body return values will be stored when the handle is indexed by a numeric key) + lua_newtable( L); // func libs cancelstep priority globals package required gc_cb lane uv - // Store the gc_cb callback in the uservalue - if( gc_cb_idx > 0) - { - push_unique_key( L, GCCB_KEY); // func libs priority globals package required gc_cb lane uv k - lua_pushvalue( L, gc_cb_idx); // func libs priority globals package required gc_cb lane uv k gc_cb - lua_rawset( L, -3); // func libs priority globals package required gc_cb lane uv - } + // Store the gc_cb callback in the uservalue + if( gc_cb_idx > 0) + { + push_unique_key( L, GCCB_KEY); // func libs priority globals package required gc_cb lane uv k + lua_pushvalue( L, gc_cb_idx); // func libs priority globals package required gc_cb lane uv k gc_cb + lua_rawset( L, -3); // func libs priority globals package required gc_cb lane uv + } - lua_setiuservalue( L, -2, 1); // func libs priority globals package required gc_cb lane + lua_setiuservalue( L, -2, 1); // func libs priority globals package required gc_cb lane - // Store 's' in the lane's registry, for 'cancel_test()' (we do cancel tests at pending send/receive). - REGISTRY_SET( L2, CANCEL_TEST_KEY, lua_pushlightuserdata( L2, s)); // func [... args ...] + // Store 's' in the lane's registry, for 'cancel_test()' (we do cancel tests at pending send/receive). + REGISTRY_SET( L2, CANCEL_TEST_KEY, lua_pushlightuserdata( L2, s)); // func [... args ...] - STACK_END( L, 1); - STACK_END( L2, 1 + nargs); + STACK_END( L, 1); + STACK_END( L2, 1 + nargs); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END)); - THREAD_CREATE( &s->thread, lane_main, s, priority); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END)); + THREAD_CREATE( &s->thread, lane_main, s, priority); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - return 1; + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + return 1; } @@ -1297,79 +1297,79 @@ LUAG_FUNC( lane_new) // LUAG_FUNC( thread_gc) { - bool_t have_gc_cb = FALSE; - Lane* s = lua_toLane( L, 1); // ud - - // if there a gc callback? - lua_getiuservalue( L, 1, 1); // ud uservalue - push_unique_key( L, GCCB_KEY); // ud uservalue __gc - lua_rawget( L, -2); // ud uservalue gc_cb|nil - if( !lua_isnil( L, -1)) - { - lua_remove( L, -2); // ud gc_cb|nil - lua_pushstring( L, s->debug_name); // ud gc_cb name - have_gc_cb = TRUE; - } - else - { - lua_pop( L, 2); // ud - } - - // We can read 's->status' without locks, but not wait for it - // test KILLED state first, as it doesn't need to enter the selfdestruct chain - if( s->mstatus == KILLED) - { - // Make sure a kill has proceeded, before cleaning up the data structure. - // - // NO lua_close() in this case because we don't know where execution of the state was interrupted - DEBUGSPEW_CODE( fprintf( stderr, "** Joining with a killed thread (needs testing) **")); - // make sure the thread is no longer running, just like thread_join() - if(! THREAD_ISNULL( s->thread)) - { - THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status); - } - if( s->status >= DONE && s->L) - { - // we know the thread was killed while the Lua VM was not doing anything: we should be able to close it without crashing - // now, thread_cancel() will not forcefully kill a lane with s->status >= DONE, so I am not sure it can ever happen - lua_close( s->L); - s->L = 0; - // just in case, but s will be freed soon so... - s->debug_name = ""; - } - DEBUGSPEW_CODE( fprintf( stderr, "** Joined ok **")); - } - else if( s->status < DONE) - { - // still running: will have to be cleaned up later - selfdestruct_add( s); - assert( s->selfdestruct_next); - if( have_gc_cb) - { - lua_pushliteral( L, "selfdestruct"); // ud gc_cb name status - lua_call( L, 2, 0); // ud - } - return 0; - } - else if( s->L) - { - // no longer accessing the Lua VM: we can close right now - lua_close( s->L); - s->L = 0; - // just in case, but s will be freed soon so... - s->debug_name = ""; - } - - // Clean up after a (finished) thread - lane_cleanup( s); - - // do this after lane cleanup in case the callback triggers an error - if( have_gc_cb) - { - lua_pushliteral( L, "closed"); // ud gc_cb name status - lua_call( L, 2, 0); // ud - } - return 0; + bool_t have_gc_cb = FALSE; + Lane* s = lua_toLane( L, 1); // ud + + // if there a gc callback? + lua_getiuservalue( L, 1, 1); // ud uservalue + push_unique_key( L, GCCB_KEY); // ud uservalue __gc + lua_rawget( L, -2); // ud uservalue gc_cb|nil + if( !lua_isnil( L, -1)) + { + lua_remove( L, -2); // ud gc_cb|nil + lua_pushstring( L, s->debug_name); // ud gc_cb name + have_gc_cb = TRUE; + } + else + { + lua_pop( L, 2); // ud + } + + // We can read 's->status' without locks, but not wait for it + // test KILLED state first, as it doesn't need to enter the selfdestruct chain + if( s->mstatus == KILLED) + { + // Make sure a kill has proceeded, before cleaning up the data structure. + // + // NO lua_close() in this case because we don't know where execution of the state was interrupted + DEBUGSPEW_CODE( fprintf( stderr, "** Joining with a killed thread (needs testing) **")); + // make sure the thread is no longer running, just like thread_join() + if(! THREAD_ISNULL( s->thread)) + { + THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status); + } + if( s->status >= DONE && s->L) + { + // we know the thread was killed while the Lua VM was not doing anything: we should be able to close it without crashing + // now, thread_cancel() will not forcefully kill a lane with s->status >= DONE, so I am not sure it can ever happen + lua_close( s->L); + s->L = 0; + // just in case, but s will be freed soon so... + s->debug_name = ""; + } + DEBUGSPEW_CODE( fprintf( stderr, "** Joined ok **")); + } + else if( s->status < DONE) + { + // still running: will have to be cleaned up later + selfdestruct_add( s); + assert( s->selfdestruct_next); + if( have_gc_cb) + { + lua_pushliteral( L, "selfdestruct"); // ud gc_cb name status + lua_call( L, 2, 0); // ud + } + return 0; + } + else if( s->L) + { + // no longer accessing the Lua VM: we can close right now + lua_close( s->L); + s->L = 0; + // just in case, but s will be freed soon so... + s->debug_name = ""; + } + + // Clean up after a (finished) thread + lane_cleanup( s); + + // do this after lane cleanup in case the callback triggers an error + if( have_gc_cb) + { + lua_pushliteral( L, "closed"); // ud gc_cb name status + lua_call( L, 2, 0); // ud + } + return 0; } //--- @@ -1384,25 +1384,25 @@ LUAG_FUNC( thread_gc) // static char const * thread_status_string( Lane* s) { - enum e_status st = s->status; // read just once (volatile) - char const* str = - (s->mstatus == KILLED) ? "killed" : // new to v3.3.0! - (st == PENDING) ? "pending" : - (st == RUNNING) ? "running" : // like in 'co.status()' - (st == WAITING) ? "waiting" : - (st == DONE) ? "done" : - (st == ERROR_ST) ? "error" : - (st == CANCELLED) ? "cancelled" : NULL; - return str; + enum e_status st = s->status; // read just once (volatile) + char const* str = + (s->mstatus == KILLED) ? "killed" : // new to v3.3.0! + (st == PENDING) ? "pending" : + (st == RUNNING) ? "running" : // like in 'co.status()' + (st == WAITING) ? "waiting" : + (st == DONE) ? "done" : + (st == ERROR_ST) ? "error" : + (st == CANCELLED) ? "cancelled" : NULL; + return str; } int push_thread_status( lua_State* L, Lane* s) { - char const* const str = thread_status_string( s); - ASSERT_L( str); + char const* const str = thread_status_string( s); + ASSERT_L( str); - lua_pushstring( L, str); - return 1; + lua_pushstring( L, str); + return 1; } @@ -1416,77 +1416,77 @@ int push_thread_status( lua_State* L, Lane* s) // LUAG_FUNC( thread_join) { - Lane* const s = lua_toLane( L, 1); - double wait_secs = luaL_optnumber( L, 2, -1.0); - lua_State* L2 = s->L; - int ret; - bool_t done = THREAD_ISNULL( s->thread) || THREAD_WAIT( &s->thread, wait_secs, &s->done_signal, &s->done_lock, &s->status); - if( !done || !L2) - { - STACK_GROW( L, 2); - lua_pushnil( L); - lua_pushliteral( L, "timeout"); - return 2; - } - - STACK_CHECK( L, 0); - // Thread is DONE/ERROR_ST/CANCELLED; all ours now - - if( s->mstatus == KILLED) // OS thread was killed if thread_cancel was forced - { - // in that case, even if the thread was killed while DONE/ERROR_ST/CANCELLED, ignore regular return values - STACK_GROW( L, 2); - lua_pushnil( L); - lua_pushliteral( L, "killed"); - ret = 2; - } - else - { - Universe* U = universe_get( L); - // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed - // so store it in the userdata uservalue at a key that can't possibly collide - securize_debug_threadname( L, s); - switch( s->status) - { - case DONE: - { - uint_t n = lua_gettop( L2); // whole L2 stack - if( (n > 0) && (luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0)) - { - return luaL_error( L, "tried to copy unsupported types"); - } - ret = n; - } - break; - - case ERROR_ST: - { - int const n = lua_gettop( L2); - STACK_GROW( L, 3); - lua_pushnil( L); - // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ... - if( luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0) // nil "err" [trace] - { - return luaL_error( L, "tried to copy unsupported types: %s", lua_tostring( L, -n)); - } - ret = 1 + n; - } - break; - - case CANCELLED: - ret = 0; - break; - - default: - DEBUGSPEW_CODE( fprintf( stderr, "Status: %d\n", s->status)); - ASSERT_L( FALSE); - ret = 0; - } - lua_close( L2); - } - s->L = 0; - STACK_END( L, ret); - return ret; + Lane* const s = lua_toLane( L, 1); + double wait_secs = luaL_optnumber( L, 2, -1.0); + lua_State* L2 = s->L; + int ret; + bool_t done = THREAD_ISNULL( s->thread) || THREAD_WAIT( &s->thread, wait_secs, &s->done_signal, &s->done_lock, &s->status); + if( !done || !L2) + { + STACK_GROW( L, 2); + lua_pushnil( L); + lua_pushliteral( L, "timeout"); + return 2; + } + + STACK_CHECK( L, 0); + // Thread is DONE/ERROR_ST/CANCELLED; all ours now + + if( s->mstatus == KILLED) // OS thread was killed if thread_cancel was forced + { + // in that case, even if the thread was killed while DONE/ERROR_ST/CANCELLED, ignore regular return values + STACK_GROW( L, 2); + lua_pushnil( L); + lua_pushliteral( L, "killed"); + ret = 2; + } + else + { + Universe* U = universe_get( L); + // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed + // so store it in the userdata uservalue at a key that can't possibly collide + securize_debug_threadname( L, s); + switch( s->status) + { + case DONE: + { + uint_t n = lua_gettop( L2); // whole L2 stack + if( (n > 0) && (luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0)) + { + return luaL_error( L, "tried to copy unsupported types"); + } + ret = n; + } + break; + + case ERROR_ST: + { + int const n = lua_gettop( L2); + STACK_GROW( L, 3); + lua_pushnil( L); + // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ... + if( luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0) // nil "err" [trace] + { + return luaL_error( L, "tried to copy unsupported types: %s", lua_tostring( L, -n)); + } + ret = 1 + n; + } + break; + + case CANCELLED: + ret = 0; + break; + + default: + DEBUGSPEW_CODE( fprintf( stderr, "Status: %d\n", s->status)); + ASSERT_L( FALSE); + ret = 0; + } + lua_close( L2); + } + s->L = 0; + STACK_END( L, ret); + return ret; } @@ -1500,150 +1500,150 @@ LUAG_FUNC( thread_join) // Else raise an error LUAG_FUNC( thread_index) { - int const UD = 1; - int const KEY = 2; - int const USR = 3; - Lane* const s = lua_toLane( L, UD); - ASSERT_L( lua_gettop( L) == 2); - - STACK_GROW( L, 8); // up to 8 positions are needed in case of error propagation - - // If key is numeric, wait until the thread returns and populate the environment with the return values - if( lua_type( L, KEY) == LUA_TNUMBER) - { - // first, check that we don't already have an environment that holds the requested value - { - // If key is found in the uservalue, return it - lua_getiuservalue( L, UD, 1); - lua_pushvalue( L, KEY); - lua_rawget( L, USR); - if( !lua_isnil( L, -1)) - { - return 1; - } - lua_pop( L, 1); - } - { - // check if we already fetched the values from the thread or not - bool_t fetched; - lua_Integer key = lua_tointeger( L, KEY); - lua_pushinteger( L, 0); - lua_rawget( L, USR); - fetched = !lua_isnil( L, -1); - lua_pop( L, 1); // back to our 2 args + uservalue on the stack - if( !fetched) - { - lua_pushinteger( L, 0); - lua_pushboolean( L, 1); - lua_rawset( L, USR); - // wait until thread has completed - lua_pushcfunction( L, LG_thread_join); - lua_pushvalue( L, UD); - lua_call( L, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+ - switch( s->status) - { - default: - if( s->mstatus != KILLED) - { - // this is an internal error, we probably never get here - lua_settop( L, 0); - lua_pushliteral( L, "Unexpected status: "); - lua_pushstring( L, thread_status_string( s)); - lua_concat( L, 2); - lua_error( L); - break; - } - // fall through if we are killed, as we got nil, "killed" on the stack - - case DONE: // got regular return values - { - int i, nvalues = lua_gettop( L) - 3; - for( i = nvalues; i > 0; -- i) - { - // pop the last element of the stack, to store it in the uservalue at its proper index - lua_rawseti( L, USR, i); - } - } - break; - - case ERROR_ST: // got 3 values: nil, errstring, callstack table - // me[-2] could carry the stack table, but even - // me[-1] is rather unnecessary (and undocumented); - // use ':join()' instead. --AKa 22-Jan-2009 - ASSERT_L( lua_isnil( L, 4) && !lua_isnil( L, 5) && lua_istable( L, 6)); - // store errstring at key -1 - lua_pushnumber( L, -1); - lua_pushvalue( L, 5); - lua_rawset( L, USR); - break; - - case CANCELLED: - // do nothing - break; - } - } - lua_settop( L, 3); // UD KEY ENV - if( key != -1) - { - lua_pushnumber( L, -1); // UD KEY ENV -1 - lua_rawget( L, USR); // UD KEY ENV "error" - if( !lua_isnil( L, -1)) // an error was stored - { - // Note: Lua 5.1 interpreter is not prepared to show - // non-string errors, so we use 'tostring()' here - // to get meaningful output. --AKa 22-Jan-2009 - // - // Also, the stack dump we get is no good; it only - // lists our internal Lanes functions. There seems - // to be no way to switch it off, though. - // - // Level 3 should show the line where 'h[x]' was read - // but this only seems to work for string messages - // (Lua 5.1.4). No idea, why. --AKa 22-Jan-2009 - lua_getmetatable( L, UD); // UD KEY ENV "error" mt - lua_getfield( L, -1, "cached_error"); // UD KEY ENV "error" mt error() - lua_getfield( L, -2, "cached_tostring"); // UD KEY ENV "error" mt error() tostring() - lua_pushvalue( L, 4); // UD KEY ENV "error" mt error() tostring() "error" - lua_call( L, 1, 1); // tostring( errstring) -- just in case // UD KEY ENV "error" mt error() "error" - lua_pushinteger( L, 3); // UD KEY ENV "error" mt error() "error" 3 - lua_call( L, 2, 0); // error( tostring( errstring), 3) // UD KEY ENV "error" mt - } - else - { - lua_pop( L, 1); // back to our 3 arguments on the stack - } - } - lua_rawgeti( L, USR, (int)key); - } - return 1; - } - if( lua_type( L, KEY) == LUA_TSTRING) - { - char const * const keystr = lua_tostring( L, KEY); - lua_settop( L, 2); // keep only our original arguments on the stack - if( strcmp( keystr, "status") == 0) - { - return push_thread_status( L, s); // push the string representing the status - } - // return UD.metatable[key] - lua_getmetatable( L, UD); // UD KEY mt - lua_replace( L, -3); // mt KEY - lua_rawget( L, -2); // mt value - // only "cancel" and "join" are registered as functions, any other string will raise an error - if( lua_iscfunction( L, -1)) - { - return 1; - } - return luaL_error( L, "can't index a lane with '%s'", keystr); - } - // unknown key - lua_getmetatable( L, UD); - lua_getfield( L, -1, "cached_error"); - lua_pushliteral( L, "Unknown key: "); - lua_pushvalue( L, KEY); - lua_concat( L, 2); - lua_call( L, 1, 0); // error( "Unknown key: " .. key) -> doesn't return - return 0; + int const UD = 1; + int const KEY = 2; + int const USR = 3; + Lane* const s = lua_toLane( L, UD); + ASSERT_L( lua_gettop( L) == 2); + + STACK_GROW( L, 8); // up to 8 positions are needed in case of error propagation + + // If key is numeric, wait until the thread returns and populate the environment with the return values + if( lua_type( L, KEY) == LUA_TNUMBER) + { + // first, check that we don't already have an environment that holds the requested value + { + // If key is found in the uservalue, return it + lua_getiuservalue( L, UD, 1); + lua_pushvalue( L, KEY); + lua_rawget( L, USR); + if( !lua_isnil( L, -1)) + { + return 1; + } + lua_pop( L, 1); + } + { + // check if we already fetched the values from the thread or not + bool_t fetched; + lua_Integer key = lua_tointeger( L, KEY); + lua_pushinteger( L, 0); + lua_rawget( L, USR); + fetched = !lua_isnil( L, -1); + lua_pop( L, 1); // back to our 2 args + uservalue on the stack + if( !fetched) + { + lua_pushinteger( L, 0); + lua_pushboolean( L, 1); + lua_rawset( L, USR); + // wait until thread has completed + lua_pushcfunction( L, LG_thread_join); + lua_pushvalue( L, UD); + lua_call( L, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+ + switch( s->status) + { + default: + if( s->mstatus != KILLED) + { + // this is an internal error, we probably never get here + lua_settop( L, 0); + lua_pushliteral( L, "Unexpected status: "); + lua_pushstring( L, thread_status_string( s)); + lua_concat( L, 2); + lua_error( L); + break; + } + // fall through if we are killed, as we got nil, "killed" on the stack + + case DONE: // got regular return values + { + int i, nvalues = lua_gettop( L) - 3; + for( i = nvalues; i > 0; -- i) + { + // pop the last element of the stack, to store it in the uservalue at its proper index + lua_rawseti( L, USR, i); + } + } + break; + + case ERROR_ST: // got 3 values: nil, errstring, callstack table + // me[-2] could carry the stack table, but even + // me[-1] is rather unnecessary (and undocumented); + // use ':join()' instead. --AKa 22-Jan-2009 + ASSERT_L( lua_isnil( L, 4) && !lua_isnil( L, 5) && lua_istable( L, 6)); + // store errstring at key -1 + lua_pushnumber( L, -1); + lua_pushvalue( L, 5); + lua_rawset( L, USR); + break; + + case CANCELLED: + // do nothing + break; + } + } + lua_settop( L, 3); // UD KEY ENV + if( key != -1) + { + lua_pushnumber( L, -1); // UD KEY ENV -1 + lua_rawget( L, USR); // UD KEY ENV "error" + if( !lua_isnil( L, -1)) // an error was stored + { + // Note: Lua 5.1 interpreter is not prepared to show + // non-string errors, so we use 'tostring()' here + // to get meaningful output. --AKa 22-Jan-2009 + // + // Also, the stack dump we get is no good; it only + // lists our internal Lanes functions. There seems + // to be no way to switch it off, though. + // + // Level 3 should show the line where 'h[x]' was read + // but this only seems to work for string messages + // (Lua 5.1.4). No idea, why. --AKa 22-Jan-2009 + lua_getmetatable( L, UD); // UD KEY ENV "error" mt + lua_getfield( L, -1, "cached_error"); // UD KEY ENV "error" mt error() + lua_getfield( L, -2, "cached_tostring"); // UD KEY ENV "error" mt error() tostring() + lua_pushvalue( L, 4); // UD KEY ENV "error" mt error() tostring() "error" + lua_call( L, 1, 1); // tostring( errstring) -- just in case // UD KEY ENV "error" mt error() "error" + lua_pushinteger( L, 3); // UD KEY ENV "error" mt error() "error" 3 + lua_call( L, 2, 0); // error( tostring( errstring), 3) // UD KEY ENV "error" mt + } + else + { + lua_pop( L, 1); // back to our 3 arguments on the stack + } + } + lua_rawgeti( L, USR, (int)key); + } + return 1; + } + if( lua_type( L, KEY) == LUA_TSTRING) + { + char const * const keystr = lua_tostring( L, KEY); + lua_settop( L, 2); // keep only our original arguments on the stack + if( strcmp( keystr, "status") == 0) + { + return push_thread_status( L, s); // push the string representing the status + } + // return UD.metatable[key] + lua_getmetatable( L, UD); // UD KEY mt + lua_replace( L, -3); // mt KEY + lua_rawget( L, -2); // mt value + // only "cancel" and "join" are registered as functions, any other string will raise an error + if( lua_iscfunction( L, -1)) + { + return 1; + } + return luaL_error( L, "can't index a lane with '%s'", keystr); + } + // unknown key + lua_getmetatable( L, UD); + lua_getfield( L, -1, "cached_error"); + lua_pushliteral( L, "Unknown key: "); + lua_pushvalue( L, KEY); + lua_concat( L, 2); + lua_call( L, 1, 0); // error( "Unknown key: " .. key) -> doesn't return + return 0; } #if HAVE_LANE_TRACKING @@ -1771,36 +1771,36 @@ static const struct luaL_Reg lanes_functions [] = { static void init_once_LOCKED( void) { #if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) - now_secs(); // initialize 'now_secs()' internal offset + now_secs(); // initialize 'now_secs()' internal offset #endif #if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU) - chudInitialize(); + chudInitialize(); #endif - //--- - // Linux needs SCHED_RR to change thread priorities, and that is only - // allowed for sudo'ers. SCHED_OTHER (default) has no priorities. - // SCHED_OTHER threads are always lower priority than SCHED_RR. - // - // ^-- those apply to 2.6 kernel. IF **wishful thinking** these - // constraints will change in the future, non-sudo priorities can - // be enabled also for Linux. - // + //--- + // Linux needs SCHED_RR to change thread priorities, and that is only + // allowed for sudo'ers. SCHED_OTHER (default) has no priorities. + // SCHED_OTHER threads are always lower priority than SCHED_RR. + // + // ^-- those apply to 2.6 kernel. IF **wishful thinking** these + // constraints will change in the future, non-sudo priorities can + // be enabled also for Linux. + // #ifdef PLATFORM_LINUX - sudo = (geteuid() == 0); // we are root? + sudo = (geteuid() == 0); // we are root? - // If lower priorities (-2..-1) are wanted, we need to lift the main - // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below - // the launched threads (even -2). - // + // If lower priorities (-2..-1) are wanted, we need to lift the main + // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below + // the launched threads (even -2). + // #ifdef LINUX_SCHED_RR - if( sudo) - { - struct sched_param sp; - sp.sched_priority = _PRIO_0; - PT_CALL( pthread_setschedparam( pthread_self(), SCHED_RR, &sp)); - } + if( sudo) + { + struct sched_param sp; + sp.sched_priority = _PRIO_0; + PT_CALL( pthread_setschedparam( pthread_self(), SCHED_RR, &sp)); + } #endif // LINUX_SCHED_RR #endif // PLATFORM_LINUX } @@ -1812,210 +1812,210 @@ static volatile long s_initCount = 0; // param 1: settings table LUAG_FUNC( configure) { - Universe* U = universe_get( L); - bool_t const from_master_state = (U == NULL); - char const* name = luaL_checkstring( L, lua_upvalueindex( 1)); - _ASSERT_L( L, lua_type( L, 1) == LUA_TTABLE); - - /* - ** Making one-time initializations. - ** - ** When the host application is single-threaded (and all threading happens via Lanes) - ** there is no problem. But if the host is multithreaded, we need to lock around the - ** initializations. - */ + Universe* U = universe_get( L); + bool_t const from_master_state = (U == NULL); + char const* name = luaL_checkstring( L, lua_upvalueindex( 1)); + _ASSERT_L( L, lua_type( L, 1) == LUA_TTABLE); + + /* + ** Making one-time initializations. + ** + ** When the host application is single-threaded (and all threading happens via Lanes) + ** there is no problem. But if the host is multithreaded, we need to lock around the + ** initializations. + */ #if THREADAPI == THREADAPI_WINDOWS - { - static volatile int /*bool*/ go_ahead; // = 0 - if( InterlockedCompareExchange( &s_initCount, 1, 0) == 0) - { - init_once_LOCKED(); - go_ahead = 1; // let others pass - } - else - { - while( !go_ahead) { Sleep(1); } // changes threads - } - } + { + static volatile int /*bool*/ go_ahead; // = 0 + if( InterlockedCompareExchange( &s_initCount, 1, 0) == 0) + { + init_once_LOCKED(); + go_ahead = 1; // let others pass + } + else + { + while( !go_ahead) { Sleep(1); } // changes threads + } + } #else // THREADAPI == THREADAPI_PTHREAD - if( s_initCount == 0) - { - static pthread_mutex_t my_lock = PTHREAD_MUTEX_INITIALIZER; - pthread_mutex_lock( &my_lock); - { - // Recheck now that we're within the lock - // - if( s_initCount == 0) - { - init_once_LOCKED(); - s_initCount = 1; - } - } - pthread_mutex_unlock( &my_lock); - } + if( s_initCount == 0) + { + static pthread_mutex_t my_lock = PTHREAD_MUTEX_INITIALIZER; + pthread_mutex_lock( &my_lock); + { + // Recheck now that we're within the lock + // + if( s_initCount == 0) + { + init_once_LOCKED(); + s_initCount = 1; + } + } + pthread_mutex_unlock( &my_lock); + } #endif // THREADAPI == THREADAPI_PTHREAD - STACK_GROW( L, 4); - STACK_CHECK_ABS( L, 1); // settings - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); - DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth); - - if( U == NULL) - { - U = universe_create( L); // settings universe - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - lua_newtable( L); // settings universe mt - lua_getfield( L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout - lua_pushcclosure( L, selfdestruct_gc, 1); // settings universe mt selfdestruct_gc - lua_setfield( L, -2, "__gc"); // settings universe mt - lua_setmetatable( L, -2); // settings universe - lua_pop( L, 1); // settings - lua_getfield( L, 1, "verbose_errors"); // settings verbose_errors - U->verboseErrors = lua_toboolean( L, -1); - lua_pop( L, 1); // settings - lua_getfield( L, 1, "demote_full_userdata"); // settings demote_full_userdata - U->demoteFullUserdata = lua_toboolean( L, -1); - lua_pop( L, 1); // settings + STACK_GROW( L, 4); + STACK_CHECK_ABS( L, 1); // settings + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); + DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth); + + if( U == NULL) + { + U = universe_create( L); // settings universe + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + lua_newtable( L); // settings universe mt + lua_getfield( L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout + lua_pushcclosure( L, selfdestruct_gc, 1); // settings universe mt selfdestruct_gc + lua_setfield( L, -2, "__gc"); // settings universe mt + lua_setmetatable( L, -2); // settings universe + lua_pop( L, 1); // settings + lua_getfield( L, 1, "verbose_errors"); // settings verbose_errors + U->verboseErrors = lua_toboolean( L, -1); + lua_pop( L, 1); // settings + lua_getfield( L, 1, "demote_full_userdata"); // settings demote_full_userdata + U->demoteFullUserdata = lua_toboolean( L, -1); + lua_pop( L, 1); // settings #if HAVE_LANE_TRACKING - MUTEX_INIT( &U->tracking_cs); - lua_getfield( L, 1, "track_lanes"); // settings track_lanes - U->tracking_first = lua_toboolean( L, -1) ? TRACKING_END : NULL; - lua_pop( L, 1); // settings + MUTEX_INIT( &U->tracking_cs); + lua_getfield( L, 1, "track_lanes"); // settings track_lanes + U->tracking_first = lua_toboolean( L, -1) ? TRACKING_END : NULL; + lua_pop( L, 1); // settings #endif // HAVE_LANE_TRACKING - // Linked chains handling - MUTEX_INIT( &U->selfdestruct_cs); - MUTEX_RECURSIVE_INIT( &U->require_cs); - // Locks for 'tools.c' inc/dec counters - MUTEX_INIT( &U->deep_lock); - MUTEX_INIT( &U->mtid_lock); - U->selfdestruct_first = SELFDESTRUCT_END; - initialize_allocator_function( U, L); - initialize_on_state_create( U, L); - init_keepers( U, L); - STACK_MID( L, 1); - - // Initialize 'timer_deep'; a common Linda object shared by all states - lua_pushcfunction( L, LG_linda); // settings lanes.linda - lua_pushliteral( L, "lanes-timer"); // settings lanes.linda "lanes-timer" - lua_call( L, 1, 1); // settings linda - STACK_MID( L, 2); - - // Proxy userdata contents is only a 'DEEP_PRELUDE*' pointer - U->timer_deep = *(DeepPrelude**) lua_touserdata( L, -1); - // increment refcount so that this linda remains alive as long as the universe exists. - ++ U->timer_deep->refcount; - lua_pop( L, 1); // settings - } - STACK_MID( L, 1); - - // Serialize calls to 'require' from now on, also in the primary state - serialize_require( DEBUGSPEW_PARAM_COMMA( U) L); - - // Retrieve main module interface table - lua_pushvalue( L, lua_upvalueindex( 2)); // settings M - // remove configure() (this function) from the module interface - lua_pushnil( L); // settings M nil - lua_setfield( L, -2, "configure"); // settings M - // add functions to the module's table - luaG_registerlibfuncs( L, lanes_functions); + // Linked chains handling + MUTEX_INIT( &U->selfdestruct_cs); + MUTEX_RECURSIVE_INIT( &U->require_cs); + // Locks for 'tools.c' inc/dec counters + MUTEX_INIT( &U->deep_lock); + MUTEX_INIT( &U->mtid_lock); + U->selfdestruct_first = SELFDESTRUCT_END; + initialize_allocator_function( U, L); + initialize_on_state_create( U, L); + init_keepers( U, L); + STACK_MID( L, 1); + + // Initialize 'timer_deep'; a common Linda object shared by all states + lua_pushcfunction( L, LG_linda); // settings lanes.linda + lua_pushliteral( L, "lanes-timer"); // settings lanes.linda "lanes-timer" + lua_call( L, 1, 1); // settings linda + STACK_MID( L, 2); + + // Proxy userdata contents is only a 'DEEP_PRELUDE*' pointer + U->timer_deep = *(DeepPrelude**) lua_touserdata( L, -1); + // increment refcount so that this linda remains alive as long as the universe exists. + ++ U->timer_deep->refcount; + lua_pop( L, 1); // settings + } + STACK_MID( L, 1); + + // Serialize calls to 'require' from now on, also in the primary state + serialize_require( DEBUGSPEW_PARAM_COMMA( U) L); + + // Retrieve main module interface table + lua_pushvalue( L, lua_upvalueindex( 2)); // settings M + // remove configure() (this function) from the module interface + lua_pushnil( L); // settings M nil + lua_setfield( L, -2, "configure"); // settings M + // add functions to the module's table + luaG_registerlibfuncs( L, lanes_functions); #if HAVE_LANE_TRACKING - // register core.threads() only if settings say it should be available - if( U->tracking_first != NULL) - { - lua_pushcfunction( L, LG_threads); // settings M LG_threads() - lua_setfield( L, -2, "threads"); // settings M - } + // register core.threads() only if settings say it should be available + if( U->tracking_first != NULL) + { + lua_pushcfunction( L, LG_threads); // settings M LG_threads() + lua_setfield( L, -2, "threads"); // settings M + } #endif // HAVE_LANE_TRACKING - STACK_MID( L, 2); - - { - char const* errmsg; - errmsg = push_deep_proxy( U, L, (DeepPrelude*) U->timer_deep, 0, eLM_LaneBody); // settings M timer_deep - if( errmsg != NULL) - { - return luaL_error( L, errmsg); - } - lua_setfield( L, -2, "timer_gateway"); // settings M - } - STACK_MID( L, 2); - - // prepare the metatable for threads - // contains keys: { __gc, __index, cached_error, cached_tostring, cancel, join, get_debug_threadname } - // - if( luaL_newmetatable( L, "Lane")) // settings M mt - { - lua_pushcfunction( L, LG_thread_gc); // settings M mt LG_thread_gc - lua_setfield( L, -2, "__gc"); // settings M mt - lua_pushcfunction( L, LG_thread_index); // settings M mt LG_thread_index - lua_setfield( L, -2, "__index"); // settings M mt - lua_getglobal( L, "error"); // settings M mt error - ASSERT_L( lua_isfunction( L, -1)); - lua_setfield( L, -2, "cached_error"); // settings M mt - lua_getglobal( L, "tostring"); // settings M mt tostring - ASSERT_L( lua_isfunction( L, -1)); - lua_setfield( L, -2, "cached_tostring"); // settings M mt - lua_pushcfunction( L, LG_thread_join); // settings M mt LG_thread_join - lua_setfield( L, -2, "join"); // settings M mt - lua_pushcfunction( L, LG_get_debug_threadname); // settings M mt LG_get_debug_threadname - lua_setfield( L, -2, "get_debug_threadname"); // settings M mt - lua_pushcfunction( L, LG_thread_cancel); // settings M mt LG_thread_cancel - lua_setfield( L, -2, "cancel"); // settings M mt - lua_pushliteral( L, "Lane"); // settings M mt "Lane" - lua_setfield( L, -2, "__metatable"); // settings M mt - } - - lua_pushcclosure( L, LG_lane_new, 1); // settings M lane_new - lua_setfield( L, -2, "lane_new"); // settings M - - // we can't register 'lanes.require' normally because we want to create an upvalued closure - lua_getglobal( L, "require"); // settings M require - lua_pushcclosure( L, LG_require, 1); // settings M lanes.require - lua_setfield( L, -2, "require"); // settings M - - lua_pushfstring( - L, "%d.%d.%d" - , LANES_VERSION_MAJOR, LANES_VERSION_MINOR, LANES_VERSION_PATCH - ); // settings M VERSION - lua_setfield( L, -2, "version"); // settings M - - lua_pushinteger(L, THREAD_PRIO_MAX); // settings M THREAD_PRIO_MAX - lua_setfield( L, -2, "max_prio"); // settings M - - push_unique_key( L, CANCEL_ERROR); // settings M CANCEL_ERROR - lua_setfield( L, -2, "cancel_error"); // settings M - - STACK_MID( L, 2); // reference stack contains only the function argument 'settings' - // we'll need this every time we transfer some C function from/to this state - REGISTRY_SET( L, LOOKUP_REGKEY, lua_newtable( L)); - STACK_MID( L, 2); - - // register all native functions found in that module in the transferable functions database - // we process it before _G because we don't want to find the module when scanning _G (this would generate longer names) - // for example in package.loaded["lanes.core"].* - populate_func_lookup_table( L, -1, name); - STACK_MID( L, 2); - - // record all existing C/JIT-fast functions - // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack - if( from_master_state) - { - // don't do this when called during the initialization of a new lane, - // because we will do it after on_state_create() is called, - // and we don't want to skip _G because of caching in case globals are created then - lua_pushglobaltable( L); // settings M _G - populate_func_lookup_table( L, -1, NULL); - lua_pop( L, 1); // settings M - } - lua_pop( L, 1); // settings - - // set _R[CONFIG_REGKEY] = settings - REGISTRY_SET( L, CONFIG_REGKEY, lua_pushvalue( L, -2)); // -2 because CONFIG_REGKEY is pushed before the value itself - STACK_END( L, 1); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L)); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - // Return the settings table - return 1; + STACK_MID( L, 2); + + { + char const* errmsg; + errmsg = push_deep_proxy( U, L, (DeepPrelude*) U->timer_deep, 0, eLM_LaneBody); // settings M timer_deep + if( errmsg != NULL) + { + return luaL_error( L, errmsg); + } + lua_setfield( L, -2, "timer_gateway"); // settings M + } + STACK_MID( L, 2); + + // prepare the metatable for threads + // contains keys: { __gc, __index, cached_error, cached_tostring, cancel, join, get_debug_threadname } + // + if( luaL_newmetatable( L, "Lane")) // settings M mt + { + lua_pushcfunction( L, LG_thread_gc); // settings M mt LG_thread_gc + lua_setfield( L, -2, "__gc"); // settings M mt + lua_pushcfunction( L, LG_thread_index); // settings M mt LG_thread_index + lua_setfield( L, -2, "__index"); // settings M mt + lua_getglobal( L, "error"); // settings M mt error + ASSERT_L( lua_isfunction( L, -1)); + lua_setfield( L, -2, "cached_error"); // settings M mt + lua_getglobal( L, "tostring"); // settings M mt tostring + ASSERT_L( lua_isfunction( L, -1)); + lua_setfield( L, -2, "cached_tostring"); // settings M mt + lua_pushcfunction( L, LG_thread_join); // settings M mt LG_thread_join + lua_setfield( L, -2, "join"); // settings M mt + lua_pushcfunction( L, LG_get_debug_threadname); // settings M mt LG_get_debug_threadname + lua_setfield( L, -2, "get_debug_threadname"); // settings M mt + lua_pushcfunction( L, LG_thread_cancel); // settings M mt LG_thread_cancel + lua_setfield( L, -2, "cancel"); // settings M mt + lua_pushliteral( L, "Lane"); // settings M mt "Lane" + lua_setfield( L, -2, "__metatable"); // settings M mt + } + + lua_pushcclosure( L, LG_lane_new, 1); // settings M lane_new + lua_setfield( L, -2, "lane_new"); // settings M + + // we can't register 'lanes.require' normally because we want to create an upvalued closure + lua_getglobal( L, "require"); // settings M require + lua_pushcclosure( L, LG_require, 1); // settings M lanes.require + lua_setfield( L, -2, "require"); // settings M + + lua_pushfstring( + L, "%d.%d.%d" + , LANES_VERSION_MAJOR, LANES_VERSION_MINOR, LANES_VERSION_PATCH + ); // settings M VERSION + lua_setfield( L, -2, "version"); // settings M + + lua_pushinteger(L, THREAD_PRIO_MAX); // settings M THREAD_PRIO_MAX + lua_setfield( L, -2, "max_prio"); // settings M + + push_unique_key( L, CANCEL_ERROR); // settings M CANCEL_ERROR + lua_setfield( L, -2, "cancel_error"); // settings M + + STACK_MID( L, 2); // reference stack contains only the function argument 'settings' + // we'll need this every time we transfer some C function from/to this state + REGISTRY_SET( L, LOOKUP_REGKEY, lua_newtable( L)); + STACK_MID( L, 2); + + // register all native functions found in that module in the transferable functions database + // we process it before _G because we don't want to find the module when scanning _G (this would generate longer names) + // for example in package.loaded["lanes.core"].* + populate_func_lookup_table( L, -1, name); + STACK_MID( L, 2); + + // record all existing C/JIT-fast functions + // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack + if( from_master_state) + { + // don't do this when called during the initialization of a new lane, + // because we will do it after on_state_create() is called, + // and we don't want to skip _G because of caching in case globals are created then + lua_pushglobaltable( L); // settings M _G + populate_func_lookup_table( L, -1, NULL); + lua_pop( L, 1); // settings M + } + lua_pop( L, 1); // settings + + // set _R[CONFIG_REGKEY] = settings + REGISTRY_SET( L, CONFIG_REGKEY, lua_pushvalue( L, -2)); // -2 because CONFIG_REGKEY is pushed before the value itself + STACK_END( L, 1); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L)); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + // Return the settings table + return 1; } #if defined PLATFORM_WIN32 && !defined NDEBUG @@ -2024,11 +2024,11 @@ LUAG_FUNC( configure) void signal_handler( int signal) { - if( signal == SIGABRT) - { - _cprintf( "caught abnormal termination!"); - abort(); - } + if( signal == SIGABRT) + { + _cprintf( "caught abnormal termination!"); + abort(); + } } // helper to have correct callstacks when crashing a Win32 running on 64 bits Windows @@ -2037,88 +2037,88 @@ static volatile long s_ecoc_initCount = 0; static volatile int s_ecoc_go_ahead = 0; static void EnableCrashingOnCrashes( void) { - if( InterlockedCompareExchange( &s_ecoc_initCount, 1, 0) == 0) - { - typedef BOOL (WINAPI* tGetPolicy)( LPDWORD lpFlags); - typedef BOOL (WINAPI* tSetPolicy)( DWORD dwFlags); - const DWORD EXCEPTION_SWALLOWING = 0x1; - - HMODULE kernel32 = LoadLibraryA("kernel32.dll"); - tGetPolicy pGetPolicy = (tGetPolicy)GetProcAddress(kernel32, "GetProcessUserModeExceptionPolicy"); - tSetPolicy pSetPolicy = (tSetPolicy)GetProcAddress(kernel32, "SetProcessUserModeExceptionPolicy"); - if( pGetPolicy && pSetPolicy) - { - DWORD dwFlags; - if( pGetPolicy( &dwFlags)) - { - // Turn off the filter - pSetPolicy( dwFlags & ~EXCEPTION_SWALLOWING); - } - } - //typedef void (* SignalHandlerPointer)( int); - /*SignalHandlerPointer previousHandler =*/ signal( SIGABRT, signal_handler); - - s_ecoc_go_ahead = 1; // let others pass - } - else - { - while( !s_ecoc_go_ahead) { Sleep(1); } // changes threads - } + if( InterlockedCompareExchange( &s_ecoc_initCount, 1, 0) == 0) + { + typedef BOOL (WINAPI* tGetPolicy)( LPDWORD lpFlags); + typedef BOOL (WINAPI* tSetPolicy)( DWORD dwFlags); + const DWORD EXCEPTION_SWALLOWING = 0x1; + + HMODULE kernel32 = LoadLibraryA("kernel32.dll"); + tGetPolicy pGetPolicy = (tGetPolicy)GetProcAddress(kernel32, "GetProcessUserModeExceptionPolicy"); + tSetPolicy pSetPolicy = (tSetPolicy)GetProcAddress(kernel32, "SetProcessUserModeExceptionPolicy"); + if( pGetPolicy && pSetPolicy) + { + DWORD dwFlags; + if( pGetPolicy( &dwFlags)) + { + // Turn off the filter + pSetPolicy( dwFlags & ~EXCEPTION_SWALLOWING); + } + } + //typedef void (* SignalHandlerPointer)( int); + /*SignalHandlerPointer previousHandler =*/ signal( SIGABRT, signal_handler); + + s_ecoc_go_ahead = 1; // let others pass + } + else + { + while( !s_ecoc_go_ahead) { Sleep(1); } // changes threads + } } #endif // PLATFORM_WIN32 int LANES_API luaopen_lanes_core( lua_State* L) { #if defined PLATFORM_WIN32 && !defined NDEBUG - EnableCrashingOnCrashes(); + EnableCrashingOnCrashes(); #endif // defined PLATFORM_WIN32 && !defined NDEBUG - STACK_GROW( L, 4); - STACK_CHECK( L, 0); - - // Create main module interface table - // we only have 1 closure, which must be called to configure Lanes - lua_newtable( L); // M - lua_pushvalue( L, 1); // M "lanes.core" - lua_pushvalue( L, -2); // M "lanes.core" M - lua_pushcclosure( L, LG_configure, 2); // M LG_configure() - REGISTRY_GET( L, CONFIG_REGKEY); // M LG_configure() settings - if( !lua_isnil( L, -1)) // this is not the first require "lanes.core": call configure() immediately - { - lua_pushvalue( L, -1); // M LG_configure() settings settings - lua_setfield( L, -4, "settings"); // M LG_configure() settings - lua_call( L, 1, 0); // M - } - else - { - // will do nothing on first invocation, as we haven't stored settings in the registry yet - lua_setfield( L, -3, "settings"); // M LG_configure() - lua_setfield( L, -2, "configure"); // M - } - - STACK_END( L, 1); - return 1; + STACK_GROW( L, 4); + STACK_CHECK( L, 0); + + // Create main module interface table + // we only have 1 closure, which must be called to configure Lanes + lua_newtable( L); // M + lua_pushvalue( L, 1); // M "lanes.core" + lua_pushvalue( L, -2); // M "lanes.core" M + lua_pushcclosure( L, LG_configure, 2); // M LG_configure() + REGISTRY_GET( L, CONFIG_REGKEY); // M LG_configure() settings + if( !lua_isnil( L, -1)) // this is not the first require "lanes.core": call configure() immediately + { + lua_pushvalue( L, -1); // M LG_configure() settings settings + lua_setfield( L, -4, "settings"); // M LG_configure() settings + lua_call( L, 1, 0); // M + } + else + { + // will do nothing on first invocation, as we haven't stored settings in the registry yet + lua_setfield( L, -3, "settings"); // M LG_configure() + lua_setfield( L, -2, "configure"); // M + } + + STACK_END( L, 1); + return 1; } static int default_luaopen_lanes( lua_State* L) { - int rc = luaL_loadfile( L, "lanes.lua") || lua_pcall( L, 0, 1, 0); - if( rc != LUA_OK) - { - return luaL_error( L, "failed to initialize embedded Lanes"); - } - return 1; + int rc = luaL_loadfile( L, "lanes.lua") || lua_pcall( L, 0, 1, 0); + if( rc != LUA_OK) + { + return luaL_error( L, "failed to initialize embedded Lanes"); + } + return 1; } // call this instead of luaopen_lanes_core() when embedding Lua and Lanes in a custom application void LANES_API luaopen_lanes_embedded( lua_State* L, lua_CFunction _luaopen_lanes) { - STACK_CHECK( L, 0); - // pre-require lanes.core so that when lanes.lua calls require "lanes.core" it finds it is already loaded - luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); // ... lanes.core - lua_pop( L, 1); // ... - STACK_MID( L, 0); - // call user-provided function that runs the chunk "lanes.lua" from wherever they stored it - luaL_requiref( L, "lanes", _luaopen_lanes ? _luaopen_lanes : default_luaopen_lanes, 0); // ... lanes - STACK_END( L, 1); + STACK_CHECK( L, 0); + // pre-require lanes.core so that when lanes.lua calls require "lanes.core" it finds it is already loaded + luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); // ... lanes.core + lua_pop( L, 1); // ... + STACK_MID( L, 0); + // call user-provided function that runs the chunk "lanes.lua" from wherever they stored it + luaL_requiref( L, "lanes", _luaopen_lanes ? _luaopen_lanes : default_luaopen_lanes, 0); // ... lanes + STACK_END( L, 1); } diff --git a/src/lanes.lua b/src/lanes.lua index 4d6deac..2f06137 100644 --- a/src/lanes.lua +++ b/src/lanes.lua @@ -46,704 +46,704 @@ local lanes = setmetatable( {}, lanesMeta) -- this function is available in the public interface until it is called, after which it disappears lanes.configure = function( settings_) - -- This check is for sublanes requiring Lanes - -- - -- TBD: We could also have the C level expose 'string.gmatch' for us. But this is simpler. - -- - if not string then - error( "To use 'lanes', you will also need to have 'string' available.", 2) - end - -- Configure called so remove metatable from lanes - setmetatable( lanes, nil) - -- - -- Cache globals for code that might run under sandboxing - -- - local assert = assert( assert) - local string_gmatch = assert( string.gmatch) - local string_format = assert( string.format) - local select = assert( select) - local type = assert( type) - local pairs = assert( pairs) - local tostring = assert( tostring) - local error = assert( error) - - local default_params = - { - nb_keepers = 1, - on_state_create = nil, - shutdown_timeout = 0.25, - with_timers = true, - track_lanes = false, - demote_full_userdata = nil, - verbose_errors = false, - allocator = nil - } - local boolean_param_checker = function( val_) - -- non-'boolean-false' should be 'boolean-true' or nil - return val_ and (val_ == true) or true - end - local param_checkers = - { - nb_keepers = function( val_) - -- nb_keepers should be a number > 0 - return type( val_) == "number" and val_ > 0 - end, - with_timers = boolean_param_checker, - allocator = function( val_) - -- can be nil, "protected", or a function - return val_ and (type( val_) == "function" or val_ == "protected") or true - end, - on_state_create = function( val_) - -- on_state_create may be nil or a function - return val_ and type( val_) == "function" or true - end, - shutdown_timeout = function( val_) - -- shutdown_timeout should be a number >= 0 - return type( val_) == "number" and val_ >= 0 - end, - track_lanes = boolean_param_checker, - demote_full_userdata = boolean_param_checker, - verbose_errors = boolean_param_checker - } - - local params_checker = function( settings_) - if not settings_ then - return default_params - end - -- make a copy of the table to leave the provided one unchanged, *and* to help ensure it won't change behind our back - local settings = {} - if type( settings_) ~= "table" then - error "Bad parameter #1 to lanes.configure(), should be a table" - end - -- any setting unknown to Lanes raises an error - for setting, _ in pairs( settings_) do - if not param_checkers[setting] then - error( "Unknown parameter '" .. setting .. "' in configure options") - end - end - -- any setting not present in the provided parameters takes the default value - for key, checker in pairs( param_checkers) do - local my_param = settings_[key] - local param - if my_param ~= nil then - param = my_param - else - param = default_params[key] - end - if not checker( param) then - error( "Bad " .. key .. ": " .. tostring( param), 2) - end - settings[key] = param - end - return settings - end - local settings = core.configure and core.configure( params_checker( settings_)) or core.settings - local core_lane_new = assert( core.lane_new) - local max_prio = assert( core.max_prio) - - lanes.ABOUT = - { - author= "Asko Kauppi , Benoit Germain ", - description= "Running multiple Lua states in parallel", - license= "MIT/X11", - copyright= "Copyright (c) 2007-10, Asko Kauppi; (c) 2011-19, Benoit Germain", - version = assert( core.version) - } - - - -- Making copies of necessary system libs will pass them on as upvalues; - -- only the first state doing "require 'lanes'" will need to have 'string' - -- and 'table' visible. - -- - local function WR(str) - io.stderr:write( str.."\n" ) - end - - local function DUMP( tbl ) - if not tbl then return end - local str="" - for k,v in pairs(tbl) do - str= str..k.."="..tostring(v).."\n" - end - WR(str) - end - - - ---=== Laning ===--- - - -- lane_h[1..n]: lane results, same as via 'lane_h:join()' - -- lane_h[0]: can be read to make sure a thread has finished (always gives 'true') - -- lane_h[-1]: error message, without propagating the error - -- - -- Reading a Lane result (or [0]) propagates a possible error in the lane - -- (and execution does not return). Cancelled lanes give 'nil' values. - -- - -- lane_h.state: "pending"/"running"/"waiting"/"done"/"error"/"cancelled" - -- - -- Note: Would be great to be able to have '__ipairs' metamethod, that gets - -- called by 'ipairs()' function to custom iterate objects. We'd use it - -- for making sure a lane has ended (results are available); not requiring - -- the user to precede a loop by explicit 'h[0]' or 'h:join()'. - -- - -- Or, even better, 'ipairs()' should start valuing '__index' instead - -- of using raw reads that bypass it. - -- - ----- - -- lanes.gen( [libs_str|opt_tbl [, ...],] lane_func ) ( [...] ) -> h - -- - -- 'libs': nil: no libraries available (default) - -- "": only base library ('assert', 'print', 'unpack' etc.) - -- "math,os": math + os + base libraries (named ones + base) - -- "*": all standard libraries available - -- - -- 'opt': .priority: int (-3..+3) smaller is lower priority (0 = default) - -- - -- .globals: table of globals to set for a new thread (passed by value) - -- - -- .required: table of packages to require - -- - -- .gc_cb: function called when the lane handle is collected - -- - -- ... (more options may be introduced later) ... - -- - -- Calling with a function parameter ('lane_func') ends the string/table - -- modifiers, and prepares a lane generator. - - local valid_libs = - { - ["package"] = true, - ["table"] = true, - ["io"] = true, - ["os"] = true, - ["string"] = true, - ["math"] = true, - ["debug"] = true, - ["bit32"] = true, -- Lua 5.2 only, ignored silently under 5.1 - ["utf8"] = true, -- Lua 5.3 only, ignored silently under 5.1 and 5.2 - ["bit"] = true, -- LuaJIT only, ignored silently under PUC-Lua - ["jit"] = true, -- LuaJIT only, ignored silently under PUC-Lua - ["ffi"] = true, -- LuaJIT only, ignored silently under PUC-Lua - -- - ["base"] = true, - ["coroutine"] = true, -- part of "base" in Lua 5.1 - ["lanes.core"] = true - } - - local raise_option_error = function( name_, tv_, v_) - error( "Bad '" .. name_ .. "' option: " .. tv_ .. " " .. string_format( "%q", tostring( v_)), 4) - end - - local opt_validators = - { - priority = function( v_) - local tv = type( v_) - return (tv == "number") and v_ or raise_option_error( "priority", tv, v_) - end, - globals = function( v_) - local tv = type( v_) - return (tv == "table") and v_ or raise_option_error( "globals", tv, v_) - end, - package = function( v_) - local tv = type( v_) - return (tv == "table") and v_ or raise_option_error( "package", tv, v_) - end, - required = function( v_) - local tv = type( v_) - return (tv == "table") and v_ or raise_option_error( "required", tv, v_) - end, - gc_cb = function( v_) - local tv = type( v_) - return (tv == "function") and v_ or raise_option_error( "gc_cb", tv, v_) - end - } - - -- PUBLIC LANES API - -- receives a sequence of strings and tables, plus a function - local gen = function( ...) - -- aggregrate all strings together, separated by "," as well as tables - -- the strings are a list of libraries to open - -- the tables contain the lane options - local opt = {} - local libs = nil - - local n = select( '#', ...) - - -- we need at least a function - if n == 0 then - error( "No parameters!", 2) - end - - -- all arguments but the last must be nil, strings, or tables - for i = 1, n - 1 do - local v = select( i, ...) - local tv = type( v) - if tv == "string" then - libs = libs and libs .. "," .. v or v - elseif tv == "table" then - for k, vv in pairs( v) do - opt[k]= vv - end - elseif v == nil then - -- skip - else - error( "Bad parameter " .. i .. ": " .. tv .. " " .. string_format( "%q", tostring( v)), 2) - end - end - - -- the last argument should be a function or a string - local func = select( n, ...) - local functype = type( func) - if functype ~= "function" and functype ~= "string" then - error( "Last parameter not function or string: " .. functype .. " " .. string_format( "%q", tostring( func)), 2) - end - - -- check that the caller only provides reserved library names, and those only once - -- "*" is a special case that doesn't require individual checking - if libs and libs ~= "*" then - local found = {} - for s in string_gmatch(libs, "[%a%d.]+") do - if not valid_libs[s] then - error( "Bad library name: " .. s, 2) - else - found[s] = (found[s] or 0) + 1 - if found[s] > 1 then - error( "libs specification contains '" .. s .. "' more than once", 2) - end - end - end - end - - -- validate that each option is known and properly valued - for k, v in pairs( opt) do - local validator = opt_validators[k] - if not validator then - error( (type( k) == "number" and "Unkeyed option: " .. type( v) .. " " .. string_format( "%q", tostring( v)) or "Bad '" .. tostring( k) .. "' option"), 2) - else - opt[k] = validator( v) - end - end - - local priority, globals, package, required, gc_cb = opt.priority, opt.globals, opt.package or package, opt.required, opt.gc_cb - return function( ...) - -- must pass functions args last else they will be truncated to the first one - return core_lane_new( func, libs, priority, globals, package, required, gc_cb, ...) - end - end -- gen() - - ---=== Timers ===--- - - -- PUBLIC LANES API - local timer = function() error "timers are not active" end - local timers = timer - local timer_lane = nil - - -- timer_gateway should always exist, even when the settings disable the timers - local timer_gateway = assert( core.timer_gateway) - - ----- - -- = sleep( [seconds_]) - -- - -- PUBLIC LANES API - local sleep = function( seconds_) - seconds_ = seconds_ or 0.0 -- this causes false and nil to be a valid input, equivalent to 0.0, but that's ok - if type( seconds_) ~= "number" then - error( "invalid duration " .. string_format( "%q", tostring(seconds_))) - end - -- receive data on a channel no-one ever sends anything, thus blocking for the specified duration - return timer_gateway:receive( seconds_, "ac100de1-a696-4619-b2f0-a26de9d58ab8") - end - - - if settings.with_timers ~= false then - - -- - -- On first 'require "lanes"', a timer lane is spawned that will maintain - -- timer tables and sleep in between the timer events. All interaction with - -- the timer lane happens via a 'timer_gateway' Linda, which is common to - -- all that 'require "lanes"'. - -- - -- Linda protocol to timer lane: - -- - -- TGW_KEY: linda_h, key, [wakeup_at_secs], [repeat_secs] - -- - local TGW_KEY= "(timer control)" -- the key does not matter, a 'weird' key may help debugging - local TGW_QUERY, TGW_REPLY = "(timer query)", "(timer reply)" - local first_time_key= "first time" - - local first_time = timer_gateway:get( first_time_key) == nil - timer_gateway:set( first_time_key, true) - - -- - -- Timer lane; initialize only on the first 'require "lanes"' instance (which naturally - -- has 'table' always declared) - -- - if first_time then - - local now_secs = core.now_secs - assert( type( now_secs) == "function") - ----- - -- Snore loop (run as a lane on the background) - -- - -- High priority, to get trustworthy timings. - -- - -- We let the timer lane be a "free running" thread; no handle to it - -- remains. - -- - local timer_body = function() - set_debug_threadname( "LanesTimer") - -- - -- { [deep_linda_lightuserdata]= { [deep_linda_lightuserdata]=linda_h, - -- [key]= { wakeup_secs [,period_secs] } [, ...] }, - -- } - -- - -- Collection of all running timers, indexed with linda's & key. - -- - -- Note that we need to use the deep lightuserdata identifiers, instead - -- of 'linda_h' themselves as table indices. Otherwise, we'd get multiple - -- entries for the same timer. - -- - -- The 'hidden' reference to Linda proxy is used in 'check_timers()' but - -- also important to keep the Linda alive, even if all outside world threw - -- away pointers to it (which would ruin uniqueness of the deep pointer). - -- Now we're safe. - -- - local collection = {} - local table_insert = assert( table.insert) - - local get_timers = function() - local r = {} - for deep, t in pairs( collection) do - -- WR( tostring( deep)) - local l = t[deep] - for key, timer_data in pairs( t) do - if key ~= deep then - table_insert( r, {l, key, timer_data}) - end - end - end - return r - end -- get_timers() - - -- - -- set_timer( linda_h, key [,wakeup_at_secs [,period_secs]] ) - -- - local set_timer = function( linda, key, wakeup_at, period) - assert( wakeup_at == nil or wakeup_at > 0.0) - assert( period == nil or period > 0.0) - - local linda_deep = linda:deep() - assert( linda_deep) - - -- Find or make a lookup for this timer - -- - local t1 = collection[linda_deep] - if not t1 then - t1 = { [linda_deep] = linda} -- proxy to use the Linda - collection[linda_deep] = t1 - end - - if wakeup_at == nil then - -- Clear the timer - -- - t1[key]= nil - - -- Remove empty tables from collection; speeds timer checks and - -- lets our 'safety reference' proxy be gc:ed as well. - -- - local empty = true - for k, _ in pairs( t1) do - if k ~= linda_deep then - empty = false - break - end - end - if empty then - collection[linda_deep] = nil - end - - -- Note: any unread timer value is left at 'linda[key]' intensionally; - -- clearing a timer just stops it. - else - -- New timer or changing the timings - -- - local t2 = t1[key] - if not t2 then - t2= {} - t1[key]= t2 - end - - t2[1] = wakeup_at - t2[2] = period -- can be 'nil' - end - end -- set_timer() - - ----- - -- [next_wakeup_at]= check_timers() - -- Check timers, and wake up the ones expired (if any) - -- Returns the closest upcoming (remaining) wakeup time (or 'nil' if none). - local check_timers = function() - local now = now_secs() - local next_wakeup - - for linda_deep,t1 in pairs(collection) do - for key,t2 in pairs(t1) do - -- - if key==linda_deep then - -- no 'continue' in Lua :/ - else - -- 't2': { wakeup_at_secs [,period_secs] } - -- - local wakeup_at= t2[1] - local period= t2[2] -- may be 'nil' - - if wakeup_at <= now then - local linda= t1[linda_deep] - assert(linda) - - linda:set( key, now ) - - -- 'pairs()' allows the values to be modified (and even - -- removed) as far as keys are not touched - - if not period then - -- one-time timer; gone - -- - t1[key]= nil - wakeup_at= nil -- no 'continue' in Lua :/ - else - -- repeating timer; find next wakeup (may jump multiple repeats) - -- - repeat - wakeup_at= wakeup_at+period - until wakeup_at > now - - t2[1]= wakeup_at - end - end - - if wakeup_at and ((not next_wakeup) or (wakeup_at < next_wakeup)) then - next_wakeup= wakeup_at - end - end - end -- t2 loop - end -- t1 loop - - return next_wakeup -- may be 'nil' - end -- check_timers() - - local timer_gateway_batched = timer_gateway.batched - set_finalizer( function( err, stk) - if err and type( err) ~= "userdata" then - WR( "LanesTimer error: "..tostring(err)) - --elseif type( err) == "userdata" then - -- WR( "LanesTimer after cancel" ) - --else - -- WR("LanesTimer finalized") - end - end) - while true do - local next_wakeup = check_timers() - - -- Sleep until next timer to wake up, or a set/clear command - -- - local secs - if next_wakeup then - secs = next_wakeup - now_secs() - if secs < 0 then secs = 0 end - end - local key, what = timer_gateway:receive( secs, TGW_KEY, TGW_QUERY) - - if key == TGW_KEY then - assert( getmetatable( what) == "Linda") -- 'what' should be a linda on which the client sets a timer - local _, key, wakeup_at, period = timer_gateway:receive( 0, timer_gateway_batched, TGW_KEY, 3) - assert( key) - set_timer( what, key, wakeup_at, period and period > 0 and period or nil) - elseif key == TGW_QUERY then - if what == "get_timers" then - timer_gateway:send( TGW_REPLY, get_timers()) - else - timer_gateway:send( TGW_REPLY, "unknown query " .. what) - end - --elseif secs == nil then -- got no value while block-waiting? - -- WR( "timer lane: no linda, aborted?") - end - end - end -- timer_body() - timer_lane = gen( "*", { package= {}, priority = max_prio}, timer_body)() -- "*" instead of "io,package" for LuaJIT compatibility... - end -- first_time - - ----- - -- = timer( linda_h, key_val, date_tbl|first_secs [,period_secs] ) - -- - -- PUBLIC LANES API - timer = function( linda, key, a, period ) - if getmetatable( linda) ~= "Linda" then - error "expecting a Linda" - end - if a == 0.0 then - -- Caller expects to get current time stamp in Linda, on return - -- (like the timer had expired instantly); it would be good to set this - -- as late as possible (to give most current time) but also we want it - -- to precede any possible timers that might start striking. - -- - linda:set( key, core.now_secs()) - - if not period or period==0.0 then - timer_gateway:send( TGW_KEY, linda, key, nil, nil ) -- clear the timer - return -- nothing more to do - end - a= period - end - - local wakeup_at= type(a)=="table" and core.wakeup_conv(a) -- given point of time - or (a and core.now_secs()+a or nil) - -- queue to timer - -- - timer_gateway:send( TGW_KEY, linda, key, wakeup_at, period ) - end - - ----- - -- {[{linda, slot, when, period}[,...]]} = timers() - -- - -- PUBLIC LANES API - timers = function() - timer_gateway:send( TGW_QUERY, "get_timers") - local _, r = timer_gateway:receive( TGW_REPLY) - return r - end - - end -- settings.with_timers - - -- avoid pulling the whole core module as upvalue when cancel_error is enough - local cancel_error = assert( core.cancel_error) - - ---=== Lock & atomic generators ===--- - - -- These functions are just surface sugar, but make solutions easier to read. - -- Not many applications should even need explicit locks or atomic counters. - - -- - -- [true [, ...]= trues(uint) - -- - local function trues( n) - if n > 0 then - return true, trues( n - 1) - end - end - - -- - -- lock_f = lanes.genlock( linda_h, key [,N_uint=1] ) - -- - -- = lock_f( +M ) -- acquire M - -- ...locked... - -- = lock_f( -M ) -- release M - -- - -- Returns an access function that allows 'N' simultaneous entries between - -- acquire (+M) and release (-M). For binary locks, use M==1. - -- - -- PUBLIC LANES API - local genlock = function( linda, key, N) - -- clear existing data and set the limit - N = N or 1 - if linda:set( key) == cancel_error or linda:limit( key, N) == cancel_error then - return cancel_error - end - - -- use an optimized version for case N == 1 - return (N == 1) and - function( M, mode_) - local timeout = (mode_ == "try") and 0 or nil - if M > 0 then - -- 'nil' timeout allows 'key' to be numeric - return linda:send( timeout, key, true) -- suspends until been able to push them - else - local k = linda:receive( nil, key) - -- propagate cancel_error if we got it, else return true or false - return k and ((k ~= cancel_error) and true or k) or false - end - end - or - function( M, mode_) - local timeout = (mode_ == "try") and 0 or nil - if M > 0 then - -- 'nil' timeout allows 'key' to be numeric - return linda:send( timeout, key, trues(M)) -- suspends until been able to push them - else - local k = linda:receive( nil, linda.batched, key, -M) - -- propagate cancel_error if we got it, else return true or false - return k and ((k ~= cancel_error) and true or k) or false - end - end - end - - - -- - -- atomic_f = lanes.genatomic( linda_h, key [,initial_num=0.0]) - -- - -- int|cancel_error = atomic_f( [diff_num = 1.0]) - -- - -- Returns an access function that allows atomic increment/decrement of the - -- number in 'key'. - -- - -- PUBLIC LANES API - local genatomic = function( linda, key, initial_val) - -- clears existing data (also queue). the slot may contain the stored value, and an additional boolean value - if linda:limit( key, 2) == cancel_error or linda:set( key, initial_val or 0.0) == cancel_error then - return cancel_error - end - - return function( diff) - -- 'nil' allows 'key' to be numeric - -- suspends until our 'true' is in - if linda:send( nil, key, true) == cancel_error then - return cancel_error - end - local val = linda:get( key) - if val ~= cancel_error then - val = val + (diff or 1.0) - -- set() releases the lock by emptying queue - if linda:set( key, val) == cancel_error then - val = cancel_error - end - end - return val - end - end - - -- activate full interface - lanes.require = core.require - lanes.register = core.register - lanes.gen = gen - lanes.linda = core.linda - lanes.cancel_error = core.cancel_error - lanes.nameof = core.nameof - lanes.set_singlethreaded = core.set_singlethreaded - lanes.threads = core.threads or function() error "lane tracking is not available" end -- core.threads isn't registered if settings.track_lanes is false - lanes.set_thread_priority = core.set_thread_priority - lanes.set_thread_affinity = core.set_thread_affinity - lanes.timer = timer - lanes.timer_lane = timer_lane - lanes.timers = timers - lanes.sleep = sleep - lanes.genlock = genlock - lanes.now_secs = core.now_secs - lanes.genatomic = genatomic - lanes.configure = nil -- no need to call configure() ever again - return lanes + -- This check is for sublanes requiring Lanes + -- + -- TBD: We could also have the C level expose 'string.gmatch' for us. But this is simpler. + -- + if not string then + error( "To use 'lanes', you will also need to have 'string' available.", 2) + end + -- Configure called so remove metatable from lanes + setmetatable( lanes, nil) + -- + -- Cache globals for code that might run under sandboxing + -- + local assert = assert( assert) + local string_gmatch = assert( string.gmatch) + local string_format = assert( string.format) + local select = assert( select) + local type = assert( type) + local pairs = assert( pairs) + local tostring = assert( tostring) + local error = assert( error) + + local default_params = + { + nb_keepers = 1, + on_state_create = nil, + shutdown_timeout = 0.25, + with_timers = true, + track_lanes = false, + demote_full_userdata = nil, + verbose_errors = false, + allocator = nil + } + local boolean_param_checker = function( val_) + -- non-'boolean-false' should be 'boolean-true' or nil + return val_ and (val_ == true) or true + end + local param_checkers = + { + nb_keepers = function( val_) + -- nb_keepers should be a number > 0 + return type( val_) == "number" and val_ > 0 + end, + with_timers = boolean_param_checker, + allocator = function( val_) + -- can be nil, "protected", or a function + return val_ and (type( val_) == "function" or val_ == "protected") or true + end, + on_state_create = function( val_) + -- on_state_create may be nil or a function + return val_ and type( val_) == "function" or true + end, + shutdown_timeout = function( val_) + -- shutdown_timeout should be a number >= 0 + return type( val_) == "number" and val_ >= 0 + end, + track_lanes = boolean_param_checker, + demote_full_userdata = boolean_param_checker, + verbose_errors = boolean_param_checker + } + + local params_checker = function( settings_) + if not settings_ then + return default_params + end + -- make a copy of the table to leave the provided one unchanged, *and* to help ensure it won't change behind our back + local settings = {} + if type( settings_) ~= "table" then + error "Bad parameter #1 to lanes.configure(), should be a table" + end + -- any setting unknown to Lanes raises an error + for setting, _ in pairs( settings_) do + if not param_checkers[setting] then + error( "Unknown parameter '" .. setting .. "' in configure options") + end + end + -- any setting not present in the provided parameters takes the default value + for key, checker in pairs( param_checkers) do + local my_param = settings_[key] + local param + if my_param ~= nil then + param = my_param + else + param = default_params[key] + end + if not checker( param) then + error( "Bad " .. key .. ": " .. tostring( param), 2) + end + settings[key] = param + end + return settings + end + local settings = core.configure and core.configure( params_checker( settings_)) or core.settings + local core_lane_new = assert( core.lane_new) + local max_prio = assert( core.max_prio) + + lanes.ABOUT = + { + author= "Asko Kauppi , Benoit Germain ", + description= "Running multiple Lua states in parallel", + license= "MIT/X11", + copyright= "Copyright (c) 2007-10, Asko Kauppi; (c) 2011-19, Benoit Germain", + version = assert( core.version) + } + + + -- Making copies of necessary system libs will pass them on as upvalues; + -- only the first state doing "require 'lanes'" will need to have 'string' + -- and 'table' visible. + -- + local function WR(str) + io.stderr:write( str.."\n" ) + end + + local function DUMP( tbl ) + if not tbl then return end + local str="" + for k,v in pairs(tbl) do + str= str..k.."="..tostring(v).."\n" + end + WR(str) + end + + + ---=== Laning ===--- + + -- lane_h[1..n]: lane results, same as via 'lane_h:join()' + -- lane_h[0]: can be read to make sure a thread has finished (always gives 'true') + -- lane_h[-1]: error message, without propagating the error + -- + -- Reading a Lane result (or [0]) propagates a possible error in the lane + -- (and execution does not return). Cancelled lanes give 'nil' values. + -- + -- lane_h.state: "pending"/"running"/"waiting"/"done"/"error"/"cancelled" + -- + -- Note: Would be great to be able to have '__ipairs' metamethod, that gets + -- called by 'ipairs()' function to custom iterate objects. We'd use it + -- for making sure a lane has ended (results are available); not requiring + -- the user to precede a loop by explicit 'h[0]' or 'h:join()'. + -- + -- Or, even better, 'ipairs()' should start valuing '__index' instead + -- of using raw reads that bypass it. + -- + ----- + -- lanes.gen( [libs_str|opt_tbl [, ...],] lane_func ) ( [...] ) -> h + -- + -- 'libs': nil: no libraries available (default) + -- "": only base library ('assert', 'print', 'unpack' etc.) + -- "math,os": math + os + base libraries (named ones + base) + -- "*": all standard libraries available + -- + -- 'opt': .priority: int (-3..+3) smaller is lower priority (0 = default) + -- + -- .globals: table of globals to set for a new thread (passed by value) + -- + -- .required: table of packages to require + -- + -- .gc_cb: function called when the lane handle is collected + -- + -- ... (more options may be introduced later) ... + -- + -- Calling with a function parameter ('lane_func') ends the string/table + -- modifiers, and prepares a lane generator. + + local valid_libs = + { + ["package"] = true, + ["table"] = true, + ["io"] = true, + ["os"] = true, + ["string"] = true, + ["math"] = true, + ["debug"] = true, + ["bit32"] = true, -- Lua 5.2 only, ignored silently under 5.1 + ["utf8"] = true, -- Lua 5.3 only, ignored silently under 5.1 and 5.2 + ["bit"] = true, -- LuaJIT only, ignored silently under PUC-Lua + ["jit"] = true, -- LuaJIT only, ignored silently under PUC-Lua + ["ffi"] = true, -- LuaJIT only, ignored silently under PUC-Lua + -- + ["base"] = true, + ["coroutine"] = true, -- part of "base" in Lua 5.1 + ["lanes.core"] = true + } + + local raise_option_error = function( name_, tv_, v_) + error( "Bad '" .. name_ .. "' option: " .. tv_ .. " " .. string_format( "%q", tostring( v_)), 4) + end + + local opt_validators = + { + priority = function( v_) + local tv = type( v_) + return (tv == "number") and v_ or raise_option_error( "priority", tv, v_) + end, + globals = function( v_) + local tv = type( v_) + return (tv == "table") and v_ or raise_option_error( "globals", tv, v_) + end, + package = function( v_) + local tv = type( v_) + return (tv == "table") and v_ or raise_option_error( "package", tv, v_) + end, + required = function( v_) + local tv = type( v_) + return (tv == "table") and v_ or raise_option_error( "required", tv, v_) + end, + gc_cb = function( v_) + local tv = type( v_) + return (tv == "function") and v_ or raise_option_error( "gc_cb", tv, v_) + end + } + + -- PUBLIC LANES API + -- receives a sequence of strings and tables, plus a function + local gen = function( ...) + -- aggregrate all strings together, separated by "," as well as tables + -- the strings are a list of libraries to open + -- the tables contain the lane options + local opt = {} + local libs = nil + + local n = select( '#', ...) + + -- we need at least a function + if n == 0 then + error( "No parameters!", 2) + end + + -- all arguments but the last must be nil, strings, or tables + for i = 1, n - 1 do + local v = select( i, ...) + local tv = type( v) + if tv == "string" then + libs = libs and libs .. "," .. v or v + elseif tv == "table" then + for k, vv in pairs( v) do + opt[k]= vv + end + elseif v == nil then + -- skip + else + error( "Bad parameter " .. i .. ": " .. tv .. " " .. string_format( "%q", tostring( v)), 2) + end + end + + -- the last argument should be a function or a string + local func = select( n, ...) + local functype = type( func) + if functype ~= "function" and functype ~= "string" then + error( "Last parameter not function or string: " .. functype .. " " .. string_format( "%q", tostring( func)), 2) + end + + -- check that the caller only provides reserved library names, and those only once + -- "*" is a special case that doesn't require individual checking + if libs and libs ~= "*" then + local found = {} + for s in string_gmatch(libs, "[%a%d.]+") do + if not valid_libs[s] then + error( "Bad library name: " .. s, 2) + else + found[s] = (found[s] or 0) + 1 + if found[s] > 1 then + error( "libs specification contains '" .. s .. "' more than once", 2) + end + end + end + end + + -- validate that each option is known and properly valued + for k, v in pairs( opt) do + local validator = opt_validators[k] + if not validator then + error( (type( k) == "number" and "Unkeyed option: " .. type( v) .. " " .. string_format( "%q", tostring( v)) or "Bad '" .. tostring( k) .. "' option"), 2) + else + opt[k] = validator( v) + end + end + + local priority, globals, package, required, gc_cb = opt.priority, opt.globals, opt.package or package, opt.required, opt.gc_cb + return function( ...) + -- must pass functions args last else they will be truncated to the first one + return core_lane_new( func, libs, priority, globals, package, required, gc_cb, ...) + end + end -- gen() + + ---=== Timers ===--- + + -- PUBLIC LANES API + local timer = function() error "timers are not active" end + local timers = timer + local timer_lane = nil + + -- timer_gateway should always exist, even when the settings disable the timers + local timer_gateway = assert( core.timer_gateway) + + ----- + -- = sleep( [seconds_]) + -- + -- PUBLIC LANES API + local sleep = function( seconds_) + seconds_ = seconds_ or 0.0 -- this causes false and nil to be a valid input, equivalent to 0.0, but that's ok + if type( seconds_) ~= "number" then + error( "invalid duration " .. string_format( "%q", tostring(seconds_))) + end + -- receive data on a channel no-one ever sends anything, thus blocking for the specified duration + return timer_gateway:receive( seconds_, "ac100de1-a696-4619-b2f0-a26de9d58ab8") + end + + + if settings.with_timers ~= false then + + -- + -- On first 'require "lanes"', a timer lane is spawned that will maintain + -- timer tables and sleep in between the timer events. All interaction with + -- the timer lane happens via a 'timer_gateway' Linda, which is common to + -- all that 'require "lanes"'. + -- + -- Linda protocol to timer lane: + -- + -- TGW_KEY: linda_h, key, [wakeup_at_secs], [repeat_secs] + -- + local TGW_KEY= "(timer control)" -- the key does not matter, a 'weird' key may help debugging + local TGW_QUERY, TGW_REPLY = "(timer query)", "(timer reply)" + local first_time_key= "first time" + + local first_time = timer_gateway:get( first_time_key) == nil + timer_gateway:set( first_time_key, true) + + -- + -- Timer lane; initialize only on the first 'require "lanes"' instance (which naturally + -- has 'table' always declared) + -- + if first_time then + + local now_secs = core.now_secs + assert( type( now_secs) == "function") + ----- + -- Snore loop (run as a lane on the background) + -- + -- High priority, to get trustworthy timings. + -- + -- We let the timer lane be a "free running" thread; no handle to it + -- remains. + -- + local timer_body = function() + set_debug_threadname( "LanesTimer") + -- + -- { [deep_linda_lightuserdata]= { [deep_linda_lightuserdata]=linda_h, + -- [key]= { wakeup_secs [,period_secs] } [, ...] }, + -- } + -- + -- Collection of all running timers, indexed with linda's & key. + -- + -- Note that we need to use the deep lightuserdata identifiers, instead + -- of 'linda_h' themselves as table indices. Otherwise, we'd get multiple + -- entries for the same timer. + -- + -- The 'hidden' reference to Linda proxy is used in 'check_timers()' but + -- also important to keep the Linda alive, even if all outside world threw + -- away pointers to it (which would ruin uniqueness of the deep pointer). + -- Now we're safe. + -- + local collection = {} + local table_insert = assert( table.insert) + + local get_timers = function() + local r = {} + for deep, t in pairs( collection) do + -- WR( tostring( deep)) + local l = t[deep] + for key, timer_data in pairs( t) do + if key ~= deep then + table_insert( r, {l, key, timer_data}) + end + end + end + return r + end -- get_timers() + + -- + -- set_timer( linda_h, key [,wakeup_at_secs [,period_secs]] ) + -- + local set_timer = function( linda, key, wakeup_at, period) + assert( wakeup_at == nil or wakeup_at > 0.0) + assert( period == nil or period > 0.0) + + local linda_deep = linda:deep() + assert( linda_deep) + + -- Find or make a lookup for this timer + -- + local t1 = collection[linda_deep] + if not t1 then + t1 = { [linda_deep] = linda} -- proxy to use the Linda + collection[linda_deep] = t1 + end + + if wakeup_at == nil then + -- Clear the timer + -- + t1[key]= nil + + -- Remove empty tables from collection; speeds timer checks and + -- lets our 'safety reference' proxy be gc:ed as well. + -- + local empty = true + for k, _ in pairs( t1) do + if k ~= linda_deep then + empty = false + break + end + end + if empty then + collection[linda_deep] = nil + end + + -- Note: any unread timer value is left at 'linda[key]' intensionally; + -- clearing a timer just stops it. + else + -- New timer or changing the timings + -- + local t2 = t1[key] + if not t2 then + t2= {} + t1[key]= t2 + end + + t2[1] = wakeup_at + t2[2] = period -- can be 'nil' + end + end -- set_timer() + + ----- + -- [next_wakeup_at]= check_timers() + -- Check timers, and wake up the ones expired (if any) + -- Returns the closest upcoming (remaining) wakeup time (or 'nil' if none). + local check_timers = function() + local now = now_secs() + local next_wakeup + + for linda_deep,t1 in pairs(collection) do + for key,t2 in pairs(t1) do + -- + if key==linda_deep then + -- no 'continue' in Lua :/ + else + -- 't2': { wakeup_at_secs [,period_secs] } + -- + local wakeup_at= t2[1] + local period= t2[2] -- may be 'nil' + + if wakeup_at <= now then + local linda= t1[linda_deep] + assert(linda) + + linda:set( key, now ) + + -- 'pairs()' allows the values to be modified (and even + -- removed) as far as keys are not touched + + if not period then + -- one-time timer; gone + -- + t1[key]= nil + wakeup_at= nil -- no 'continue' in Lua :/ + else + -- repeating timer; find next wakeup (may jump multiple repeats) + -- + repeat + wakeup_at= wakeup_at+period + until wakeup_at > now + + t2[1]= wakeup_at + end + end + + if wakeup_at and ((not next_wakeup) or (wakeup_at < next_wakeup)) then + next_wakeup= wakeup_at + end + end + end -- t2 loop + end -- t1 loop + + return next_wakeup -- may be 'nil' + end -- check_timers() + + local timer_gateway_batched = timer_gateway.batched + set_finalizer( function( err, stk) + if err and type( err) ~= "userdata" then + WR( "LanesTimer error: "..tostring(err)) + --elseif type( err) == "userdata" then + -- WR( "LanesTimer after cancel" ) + --else + -- WR("LanesTimer finalized") + end + end) + while true do + local next_wakeup = check_timers() + + -- Sleep until next timer to wake up, or a set/clear command + -- + local secs + if next_wakeup then + secs = next_wakeup - now_secs() + if secs < 0 then secs = 0 end + end + local key, what = timer_gateway:receive( secs, TGW_KEY, TGW_QUERY) + + if key == TGW_KEY then + assert( getmetatable( what) == "Linda") -- 'what' should be a linda on which the client sets a timer + local _, key, wakeup_at, period = timer_gateway:receive( 0, timer_gateway_batched, TGW_KEY, 3) + assert( key) + set_timer( what, key, wakeup_at, period and period > 0 and period or nil) + elseif key == TGW_QUERY then + if what == "get_timers" then + timer_gateway:send( TGW_REPLY, get_timers()) + else + timer_gateway:send( TGW_REPLY, "unknown query " .. what) + end + --elseif secs == nil then -- got no value while block-waiting? + -- WR( "timer lane: no linda, aborted?") + end + end + end -- timer_body() + timer_lane = gen( "*", { package= {}, priority = max_prio}, timer_body)() -- "*" instead of "io,package" for LuaJIT compatibility... + end -- first_time + + ----- + -- = timer( linda_h, key_val, date_tbl|first_secs [,period_secs] ) + -- + -- PUBLIC LANES API + timer = function( linda, key, a, period ) + if getmetatable( linda) ~= "Linda" then + error "expecting a Linda" + end + if a == 0.0 then + -- Caller expects to get current time stamp in Linda, on return + -- (like the timer had expired instantly); it would be good to set this + -- as late as possible (to give most current time) but also we want it + -- to precede any possible timers that might start striking. + -- + linda:set( key, core.now_secs()) + + if not period or period==0.0 then + timer_gateway:send( TGW_KEY, linda, key, nil, nil ) -- clear the timer + return -- nothing more to do + end + a= period + end + + local wakeup_at= type(a)=="table" and core.wakeup_conv(a) -- given point of time + or (a and core.now_secs()+a or nil) + -- queue to timer + -- + timer_gateway:send( TGW_KEY, linda, key, wakeup_at, period ) + end + + ----- + -- {[{linda, slot, when, period}[,...]]} = timers() + -- + -- PUBLIC LANES API + timers = function() + timer_gateway:send( TGW_QUERY, "get_timers") + local _, r = timer_gateway:receive( TGW_REPLY) + return r + end + + end -- settings.with_timers + + -- avoid pulling the whole core module as upvalue when cancel_error is enough + local cancel_error = assert( core.cancel_error) + + ---=== Lock & atomic generators ===--- + + -- These functions are just surface sugar, but make solutions easier to read. + -- Not many applications should even need explicit locks or atomic counters. + + -- + -- [true [, ...]= trues(uint) + -- + local function trues( n) + if n > 0 then + return true, trues( n - 1) + end + end + + -- + -- lock_f = lanes.genlock( linda_h, key [,N_uint=1] ) + -- + -- = lock_f( +M ) -- acquire M + -- ...locked... + -- = lock_f( -M ) -- release M + -- + -- Returns an access function that allows 'N' simultaneous entries between + -- acquire (+M) and release (-M). For binary locks, use M==1. + -- + -- PUBLIC LANES API + local genlock = function( linda, key, N) + -- clear existing data and set the limit + N = N or 1 + if linda:set( key) == cancel_error or linda:limit( key, N) == cancel_error then + return cancel_error + end + + -- use an optimized version for case N == 1 + return (N == 1) and + function( M, mode_) + local timeout = (mode_ == "try") and 0 or nil + if M > 0 then + -- 'nil' timeout allows 'key' to be numeric + return linda:send( timeout, key, true) -- suspends until been able to push them + else + local k = linda:receive( nil, key) + -- propagate cancel_error if we got it, else return true or false + return k and ((k ~= cancel_error) and true or k) or false + end + end + or + function( M, mode_) + local timeout = (mode_ == "try") and 0 or nil + if M > 0 then + -- 'nil' timeout allows 'key' to be numeric + return linda:send( timeout, key, trues(M)) -- suspends until been able to push them + else + local k = linda:receive( nil, linda.batched, key, -M) + -- propagate cancel_error if we got it, else return true or false + return k and ((k ~= cancel_error) and true or k) or false + end + end + end + + + -- + -- atomic_f = lanes.genatomic( linda_h, key [,initial_num=0.0]) + -- + -- int|cancel_error = atomic_f( [diff_num = 1.0]) + -- + -- Returns an access function that allows atomic increment/decrement of the + -- number in 'key'. + -- + -- PUBLIC LANES API + local genatomic = function( linda, key, initial_val) + -- clears existing data (also queue). the slot may contain the stored value, and an additional boolean value + if linda:limit( key, 2) == cancel_error or linda:set( key, initial_val or 0.0) == cancel_error then + return cancel_error + end + + return function( diff) + -- 'nil' allows 'key' to be numeric + -- suspends until our 'true' is in + if linda:send( nil, key, true) == cancel_error then + return cancel_error + end + local val = linda:get( key) + if val ~= cancel_error then + val = val + (diff or 1.0) + -- set() releases the lock by emptying queue + if linda:set( key, val) == cancel_error then + val = cancel_error + end + end + return val + end + end + + -- activate full interface + lanes.require = core.require + lanes.register = core.register + lanes.gen = gen + lanes.linda = core.linda + lanes.cancel_error = core.cancel_error + lanes.nameof = core.nameof + lanes.set_singlethreaded = core.set_singlethreaded + lanes.threads = core.threads or function() error "lane tracking is not available" end -- core.threads isn't registered if settings.track_lanes is false + lanes.set_thread_priority = core.set_thread_priority + lanes.set_thread_affinity = core.set_thread_affinity + lanes.timer = timer + lanes.timer_lane = timer_lane + lanes.timers = timers + lanes.sleep = sleep + lanes.genlock = genlock + lanes.now_secs = core.now_secs + lanes.genatomic = genatomic + lanes.configure = nil -- no need to call configure() ever again + return lanes end -- lanes.configure lanesMeta.__index = function( t, k) - -- This is called when some functionality is accessed without calling configure() - lanes.configure() -- initialize with default settings - -- Access the required key - return lanes[k] + -- This is called when some functionality is accessed without calling configure() + lanes.configure() -- initialize with default settings + -- Access the required key + return lanes[k] end -- no need to force calling configure() manually excepted the first time (other times will reuse the internally stored settings of the first call) if core.settings then - return lanes.configure() + return lanes.configure() else - return lanes + return lanes end --the end diff --git a/src/lanes_private.h b/src/lanes_private.h index 1a15969..7da3286 100644 --- a/src/lanes_private.h +++ b/src/lanes_private.h @@ -9,65 +9,65 @@ // struct s_Lane { - THREAD_T thread; - // - // M: sub-thread OS thread - // S: not used + THREAD_T thread; + // + // M: sub-thread OS thread + // S: not used - char const* debug_name; + char const* debug_name; - lua_State* L; - Universe* U; - // - // M: prepares the state, and reads results - // S: while S is running, M must keep out of modifying the state + lua_State* L; + Universe* U; + // + // M: prepares the state, and reads results + // S: while S is running, M must keep out of modifying the state - volatile enum e_status status; - // - // M: sets to PENDING (before launching) - // S: updates -> RUNNING/WAITING -> DONE/ERROR_ST/CANCELLED + volatile enum e_status status; + // + // M: sets to PENDING (before launching) + // S: updates -> RUNNING/WAITING -> DONE/ERROR_ST/CANCELLED - SIGNAL_T* volatile waiting_on; - // - // When status is WAITING, points on the linda's signal the thread waits on, else NULL + SIGNAL_T* volatile waiting_on; + // + // When status is WAITING, points on the linda's signal the thread waits on, else NULL - volatile enum e_cancel_request cancel_request; - // - // M: sets to FALSE, flags TRUE for cancel request - // S: reads to see if cancel is requested + volatile enum e_cancel_request cancel_request; + // + // M: sets to FALSE, flags TRUE for cancel request + // S: reads to see if cancel is requested #if THREADWAIT_METHOD == THREADWAIT_CONDVAR - SIGNAL_T done_signal; - // - // M: Waited upon at lane ending (if Posix with no PTHREAD_TIMEDJOIN) - // S: sets the signal once cancellation is noticed (avoids a kill) - - MUTEX_T done_lock; - // - // Lock required by 'done_signal' condition variable, protecting - // lane status changes to DONE/ERROR_ST/CANCELLED. + SIGNAL_T done_signal; + // + // M: Waited upon at lane ending (if Posix with no PTHREAD_TIMEDJOIN) + // S: sets the signal once cancellation is noticed (avoids a kill) + + MUTEX_T done_lock; + // + // Lock required by 'done_signal' condition variable, protecting + // lane status changes to DONE/ERROR_ST/CANCELLED. #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR - volatile enum - { - NORMAL, // normal master side state - KILLED // issued an OS kill - } mstatus; - // - // M: sets to NORMAL, if issued a kill changes to KILLED - // S: not used - - struct s_Lane* volatile selfdestruct_next; - // - // M: sets to non-NULL if facing lane handle '__gc' cycle but the lane - // is still running - // S: cleans up after itself if non-NULL at lane exit + volatile enum + { + NORMAL, // normal master side state + KILLED // issued an OS kill + } mstatus; + // + // M: sets to NORMAL, if issued a kill changes to KILLED + // S: not used + + struct s_Lane* volatile selfdestruct_next; + // + // M: sets to non-NULL if facing lane handle '__gc' cycle but the lane + // is still running + // S: cleans up after itself if non-NULL at lane exit #if HAVE_LANE_TRACKING - struct s_Lane* volatile tracking_next; + struct s_Lane* volatile tracking_next; #endif // HAVE_LANE_TRACKING - // - // For tracking only + // + // For tracking only }; typedef struct s_Lane Lane; @@ -79,14 +79,14 @@ typedef struct s_Lane Lane; static inline Lane* get_lane_from_registry( lua_State* L) { - Lane* s; - STACK_GROW( L, 1); - STACK_CHECK( L, 0); - REGISTRY_GET( L, CANCEL_TEST_KEY); - s = lua_touserdata( L, -1); // lightuserdata (true 's_lane' pointer) / nil - lua_pop( L, 1); - STACK_END( L, 0); - return s; + Lane* s; + STACK_GROW( L, 1); + STACK_CHECK( L, 0); + REGISTRY_GET( L, CANCEL_TEST_KEY); + s = lua_touserdata( L, -1); // lightuserdata (true 's_lane' pointer) / nil + lua_pop( L, 1); + STACK_END( L, 0); + return s; } int push_thread_status( lua_State* L, Lane* s); diff --git a/src/linda.c b/src/linda.c index d3ed8a0..a9c9710 100644 --- a/src/linda.c +++ b/src/linda.c @@ -48,13 +48,13 @@ THE SOFTWARE. */ struct s_Linda { - DeepPrelude prelude; // Deep userdata MUST start with this header - SIGNAL_T read_happened; - SIGNAL_T write_happened; - Universe* U; // the universe this linda belongs to - ptrdiff_t group; // a group to control keeper allocation between lindas - enum e_cancel_request simulate_cancel; - char name[1]; + DeepPrelude prelude; // Deep userdata MUST start with this header + SIGNAL_T read_happened; + SIGNAL_T write_happened; + Universe* U; // the universe this linda belongs to + ptrdiff_t group; // a group to control keeper allocation between lindas + enum e_cancel_request simulate_cancel; + char name[1]; }; #define LINDA_KEEPER_HASHSEED( linda) (linda->group ? linda->group : (ptrdiff_t)linda) @@ -62,51 +62,51 @@ static void* linda_id( lua_State*, DeepOp); static inline struct s_Linda* lua_toLinda( lua_State* L, int idx_) { - struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_); - luaL_argcheck( L, linda != NULL, idx_, "expecting a linda object"); - return linda; + struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_); + luaL_argcheck( L, linda != NULL, idx_, "expecting a linda object"); + return linda; } static void check_key_types( lua_State* L, int start_, int end_) { - int i; - for( i = start_; i <= end_; ++ i) - { - int t = lua_type( L, i); - if( t == LUA_TBOOLEAN || t == LUA_TNUMBER || t == LUA_TSTRING || t == LUA_TLIGHTUSERDATA) - { - continue; - } - (void) luaL_error( L, "argument #%d: invalid key type (not a boolean, string, number or light userdata)", i); - } + int i; + for( i = start_; i <= end_; ++ i) + { + int t = lua_type( L, i); + if( t == LUA_TBOOLEAN || t == LUA_TNUMBER || t == LUA_TSTRING || t == LUA_TLIGHTUSERDATA) + { + continue; + } + (void) luaL_error( L, "argument #%d: invalid key type (not a boolean, string, number or light userdata)", i); + } } LUAG_FUNC( linda_protected_call) { - int rc = LUA_OK; - struct s_Linda* linda = lua_toLinda( L, 1); - - // acquire the keeper - Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED(linda)); - lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK' - if( KL == NULL) return 0; - - // retrieve the actual function to be called and move it before the arguments - lua_pushvalue( L, lua_upvalueindex( 1)); - lua_insert( L, 1); - // do a protected call - rc = lua_pcall( L, lua_gettop( L) - 1, LUA_MULTRET, 0); - - // release the keeper - keeper_release( K); - - // if there was an error, forward it - if( rc != LUA_OK) - { - return lua_error( L); - } - // return whatever the actual operation provided - return lua_gettop( L); + int rc = LUA_OK; + struct s_Linda* linda = lua_toLinda( L, 1); + + // acquire the keeper + Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED(linda)); + lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK' + if( KL == NULL) return 0; + + // retrieve the actual function to be called and move it before the arguments + lua_pushvalue( L, lua_upvalueindex( 1)); + lua_insert( L, 1); + // do a protected call + rc = lua_pcall( L, lua_gettop( L) - 1, LUA_MULTRET, 0); + + // release the keeper + keeper_release( K); + + // if there was an error, forward it + if( rc != LUA_OK) + { + return lua_error( L); + } + // return whatever the actual operation provided + return lua_gettop( L); } /* @@ -120,142 +120,142 @@ LUAG_FUNC( linda_protected_call) */ LUAG_FUNC( linda_send) { - struct s_Linda* linda = lua_toLinda( L, 1); - bool_t ret = FALSE; - enum e_cancel_request cancel = CANCEL_NONE; - int pushed; - time_d timeout = -1.0; - uint_t key_i = 2; // index of first key, if timeout not there - bool_t as_nil_sentinel; // if not NULL, send() will silently send a single nil if nothing is provided - - if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion - { - timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2)); - ++ key_i; - } - else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key - { - ++ key_i; - } - - as_nil_sentinel = equal_unique_key( L, key_i, NIL_SENTINEL); - if( as_nil_sentinel) - { - // the real key to send data to is after the NIL_SENTINEL marker - ++ key_i; - } - - // make sure the key is of a valid type - check_key_types( L, key_i, key_i); - - STACK_GROW( L, 1); - - // make sure there is something to send - if( (uint_t)lua_gettop( L) == key_i) - { - if( as_nil_sentinel) - { - // send a single nil if nothing is provided - push_unique_key( L, NIL_SENTINEL); - } - else - { - return luaL_error( L, "no data to send"); - } - } - - // convert nils to some special non-nil sentinel in sent values - keeper_toggle_nil_sentinels( L, key_i + 1, eLM_ToKeeper); - - { - bool_t try_again = TRUE; - Lane* const s = get_lane_from_registry( L); - Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK' - if( KL == NULL) return 0; - STACK_CHECK( KL, 0); - for( ;;) - { - if( s != NULL) - { - cancel = s->cancel_request; - } - cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel; - // if user wants to cancel, or looped because of a timeout, the call returns without sending anything - if( !try_again || cancel != CANCEL_NONE) - { - pushed = 0; - break; - } - - STACK_MID( KL, 0); - pushed = keeper_call( linda->U, KL, KEEPER_API( send), L, linda, key_i); - if( pushed < 0) - { - break; - } - ASSERT_L( pushed == 1); - - ret = lua_toboolean( L, -1); - lua_pop( L, 1); - - if( ret) - { - // Wake up ALL waiting threads - SIGNAL_ALL( &linda->write_happened); - break; - } - - // instant timout to bypass the wait syscall - if( timeout == 0.0) - { - break; /* no wait; instant timeout */ - } - - // storage limit hit, wait until timeout or signalled that we should try again - { - enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings - if( s != NULL) - { - // change status of lane to "waiting" - prev_status = s->status; // RUNNING, most likely - ASSERT_L( prev_status == RUNNING); // but check, just in case - s->status = WAITING; - ASSERT_L( s->waiting_on == NULL); - s->waiting_on = &linda->read_happened; - } - // could not send because no room: wait until some data was read before trying again, or until timeout is reached - try_again = SIGNAL_WAIT( &linda->read_happened, &K->keeper_cs, timeout); - if( s != NULL) - { - s->waiting_on = NULL; - s->status = prev_status; - } - } - } - STACK_END( KL, 0); - } - - if( pushed < 0) - { - return luaL_error( L, "tried to copy unsupported types"); - } - - switch( cancel) - { - case CANCEL_SOFT: - // if user wants to soft-cancel, the call returns lanes.cancel_error - push_unique_key( L, CANCEL_ERROR); - return 1; - - case CANCEL_HARD: - // raise an error interrupting execution only in case of hard cancel - return cancel_error( L); // raises an error and doesn't return - - default: - lua_pushboolean( L, ret); // true (success) or false (timeout) - return 1; - } + struct s_Linda* linda = lua_toLinda( L, 1); + bool_t ret = FALSE; + enum e_cancel_request cancel = CANCEL_NONE; + int pushed; + time_d timeout = -1.0; + uint_t key_i = 2; // index of first key, if timeout not there + bool_t as_nil_sentinel; // if not NULL, send() will silently send a single nil if nothing is provided + + if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion + { + timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2)); + ++ key_i; + } + else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key + { + ++ key_i; + } + + as_nil_sentinel = equal_unique_key( L, key_i, NIL_SENTINEL); + if( as_nil_sentinel) + { + // the real key to send data to is after the NIL_SENTINEL marker + ++ key_i; + } + + // make sure the key is of a valid type + check_key_types( L, key_i, key_i); + + STACK_GROW( L, 1); + + // make sure there is something to send + if( (uint_t)lua_gettop( L) == key_i) + { + if( as_nil_sentinel) + { + // send a single nil if nothing is provided + push_unique_key( L, NIL_SENTINEL); + } + else + { + return luaL_error( L, "no data to send"); + } + } + + // convert nils to some special non-nil sentinel in sent values + keeper_toggle_nil_sentinels( L, key_i + 1, eLM_ToKeeper); + + { + bool_t try_again = TRUE; + Lane* const s = get_lane_from_registry( L); + Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK' + if( KL == NULL) return 0; + STACK_CHECK( KL, 0); + for( ;;) + { + if( s != NULL) + { + cancel = s->cancel_request; + } + cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel; + // if user wants to cancel, or looped because of a timeout, the call returns without sending anything + if( !try_again || cancel != CANCEL_NONE) + { + pushed = 0; + break; + } + + STACK_MID( KL, 0); + pushed = keeper_call( linda->U, KL, KEEPER_API( send), L, linda, key_i); + if( pushed < 0) + { + break; + } + ASSERT_L( pushed == 1); + + ret = lua_toboolean( L, -1); + lua_pop( L, 1); + + if( ret) + { + // Wake up ALL waiting threads + SIGNAL_ALL( &linda->write_happened); + break; + } + + // instant timout to bypass the wait syscall + if( timeout == 0.0) + { + break; /* no wait; instant timeout */ + } + + // storage limit hit, wait until timeout or signalled that we should try again + { + enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings + if( s != NULL) + { + // change status of lane to "waiting" + prev_status = s->status; // RUNNING, most likely + ASSERT_L( prev_status == RUNNING); // but check, just in case + s->status = WAITING; + ASSERT_L( s->waiting_on == NULL); + s->waiting_on = &linda->read_happened; + } + // could not send because no room: wait until some data was read before trying again, or until timeout is reached + try_again = SIGNAL_WAIT( &linda->read_happened, &K->keeper_cs, timeout); + if( s != NULL) + { + s->waiting_on = NULL; + s->status = prev_status; + } + } + } + STACK_END( KL, 0); + } + + if( pushed < 0) + { + return luaL_error( L, "tried to copy unsupported types"); + } + + switch( cancel) + { + case CANCEL_SOFT: + // if user wants to soft-cancel, the call returns lanes.cancel_error + push_unique_key( L, CANCEL_ERROR); + return 1; + + case CANCEL_HARD: + // raise an error interrupting execution only in case of hard cancel + return cancel_error( L); // raises an error and doesn't return + + default: + lua_pushboolean( L, ret); // true (success) or false (timeout) + return 1; + } } @@ -273,143 +273,143 @@ LUAG_FUNC( linda_send) #define BATCH_SENTINEL "270e6c9d-280f-4983-8fee-a7ecdda01475" LUAG_FUNC( linda_receive) { - struct s_Linda* linda = lua_toLinda( L, 1); - int pushed, expected_pushed_min, expected_pushed_max; - enum e_cancel_request cancel = CANCEL_NONE; - keeper_api_t keeper_receive; - - time_d timeout = -1.0; - uint_t key_i = 2; - - if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion - { - timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2)); - ++ key_i; - } - else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key - { - ++ key_i; - } - - // are we in batched mode? - { - int is_batched; - lua_pushliteral( L, BATCH_SENTINEL); - is_batched = lua501_equal( L, key_i, -1); - lua_pop( L, 1); - if( is_batched) - { - // no need to pass linda.batched in the keeper state - ++ key_i; - // make sure the keys are of a valid type - check_key_types( L, key_i, key_i); - // receive multiple values from a single slot - keeper_receive = KEEPER_API( receive_batched); - // we expect a user-defined amount of return value - expected_pushed_min = (int)luaL_checkinteger( L, key_i + 1); - expected_pushed_max = (int)luaL_optinteger( L, key_i + 2, expected_pushed_min); - // don't forget to count the key in addition to the values - ++ expected_pushed_min; - ++ expected_pushed_max; - if( expected_pushed_min > expected_pushed_max) - { - return luaL_error( L, "batched min/max error"); - } - } - else - { - // make sure the keys are of a valid type - check_key_types( L, key_i, lua_gettop( L)); - // receive a single value, checking multiple slots - keeper_receive = KEEPER_API( receive); - // we expect a single (value, key) pair of returned values - expected_pushed_min = expected_pushed_max = 2; - } - } - - { - bool_t try_again = TRUE; - Lane* const s = get_lane_from_registry( L); - Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - if( K == NULL) return 0; - for( ;;) - { - if( s != NULL) - { - cancel = s->cancel_request; - } - cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel; - // if user wants to cancel, or looped because of a timeout, the call returns without sending anything - if( !try_again || cancel != CANCEL_NONE) - { - pushed = 0; - break; - } - - // all arguments of receive() but the first are passed to the keeper's receive function - pushed = keeper_call( linda->U, K->L, keeper_receive, L, linda, key_i); - if( pushed < 0) - { - break; - } - if( pushed > 0) - { - ASSERT_L( pushed >= expected_pushed_min && pushed <= expected_pushed_max); - // replace sentinels with real nils - keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper); - // To be done from within the 'K' locking area - // - SIGNAL_ALL( &linda->read_happened); - break; - } - - if( timeout == 0.0) - { - break; /* instant timeout */ - } - - // nothing received, wait until timeout or signalled that we should try again - { - enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings - if( s != NULL) - { - // change status of lane to "waiting" - prev_status = s->status; // RUNNING, most likely - ASSERT_L( prev_status == RUNNING); // but check, just in case - s->status = WAITING; - ASSERT_L( s->waiting_on == NULL); - s->waiting_on = &linda->write_happened; - } - // not enough data to read: wakeup when data was sent, or when timeout is reached - try_again = SIGNAL_WAIT( &linda->write_happened, &K->keeper_cs, timeout); - if( s != NULL) - { - s->waiting_on = NULL; - s->status = prev_status; - } - } - } - } - - if( pushed < 0) - { - return luaL_error( L, "tried to copy unsupported types"); - } - - switch( cancel) - { - case CANCEL_SOFT: - // if user wants to soft-cancel, the call returns CANCEL_ERROR - push_unique_key( L, CANCEL_ERROR); - return 1; - - case CANCEL_HARD: - // raise an error interrupting execution only in case of hard cancel - return cancel_error( L); // raises an error and doesn't return - - default: - return pushed; - } + struct s_Linda* linda = lua_toLinda( L, 1); + int pushed, expected_pushed_min, expected_pushed_max; + enum e_cancel_request cancel = CANCEL_NONE; + keeper_api_t keeper_receive; + + time_d timeout = -1.0; + uint_t key_i = 2; + + if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion + { + timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2)); + ++ key_i; + } + else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key + { + ++ key_i; + } + + // are we in batched mode? + { + int is_batched; + lua_pushliteral( L, BATCH_SENTINEL); + is_batched = lua501_equal( L, key_i, -1); + lua_pop( L, 1); + if( is_batched) + { + // no need to pass linda.batched in the keeper state + ++ key_i; + // make sure the keys are of a valid type + check_key_types( L, key_i, key_i); + // receive multiple values from a single slot + keeper_receive = KEEPER_API( receive_batched); + // we expect a user-defined amount of return value + expected_pushed_min = (int)luaL_checkinteger( L, key_i + 1); + expected_pushed_max = (int)luaL_optinteger( L, key_i + 2, expected_pushed_min); + // don't forget to count the key in addition to the values + ++ expected_pushed_min; + ++ expected_pushed_max; + if( expected_pushed_min > expected_pushed_max) + { + return luaL_error( L, "batched min/max error"); + } + } + else + { + // make sure the keys are of a valid type + check_key_types( L, key_i, lua_gettop( L)); + // receive a single value, checking multiple slots + keeper_receive = KEEPER_API( receive); + // we expect a single (value, key) pair of returned values + expected_pushed_min = expected_pushed_max = 2; + } + } + + { + bool_t try_again = TRUE; + Lane* const s = get_lane_from_registry( L); + Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + if( K == NULL) return 0; + for( ;;) + { + if( s != NULL) + { + cancel = s->cancel_request; + } + cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel; + // if user wants to cancel, or looped because of a timeout, the call returns without sending anything + if( !try_again || cancel != CANCEL_NONE) + { + pushed = 0; + break; + } + + // all arguments of receive() but the first are passed to the keeper's receive function + pushed = keeper_call( linda->U, K->L, keeper_receive, L, linda, key_i); + if( pushed < 0) + { + break; + } + if( pushed > 0) + { + ASSERT_L( pushed >= expected_pushed_min && pushed <= expected_pushed_max); + // replace sentinels with real nils + keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper); + // To be done from within the 'K' locking area + // + SIGNAL_ALL( &linda->read_happened); + break; + } + + if( timeout == 0.0) + { + break; /* instant timeout */ + } + + // nothing received, wait until timeout or signalled that we should try again + { + enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings + if( s != NULL) + { + // change status of lane to "waiting" + prev_status = s->status; // RUNNING, most likely + ASSERT_L( prev_status == RUNNING); // but check, just in case + s->status = WAITING; + ASSERT_L( s->waiting_on == NULL); + s->waiting_on = &linda->write_happened; + } + // not enough data to read: wakeup when data was sent, or when timeout is reached + try_again = SIGNAL_WAIT( &linda->write_happened, &K->keeper_cs, timeout); + if( s != NULL) + { + s->waiting_on = NULL; + s->status = prev_status; + } + } + } + } + + if( pushed < 0) + { + return luaL_error( L, "tried to copy unsupported types"); + } + + switch( cancel) + { + case CANCEL_SOFT: + // if user wants to soft-cancel, the call returns CANCEL_ERROR + push_unique_key( L, CANCEL_ERROR); + return 1; + + case CANCEL_HARD: + // raise an error interrupting execution only in case of hard cancel + return cancel_error( L); // raises an error and doesn't return + + default: + return pushed; + } } @@ -423,51 +423,51 @@ LUAG_FUNC( linda_receive) */ LUAG_FUNC( linda_set) { - struct s_Linda* const linda = lua_toLinda( L, 1); - int pushed; - bool_t has_value = lua_gettop( L) > 2; - - // make sure the key is of a valid type (throws an error if not the case) - check_key_types( L, 2, 2); - - { - Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - - if( linda->simulate_cancel == CANCEL_NONE) - { - if( has_value) - { - // convert nils to some special non-nil sentinel in sent values - keeper_toggle_nil_sentinels( L, 3, eLM_ToKeeper); - } - pushed = keeper_call( linda->U, K->L, KEEPER_API( set), L, linda, 2); - if( pushed >= 0) // no error? - { - ASSERT_L( pushed == 0 || pushed == 1); - - if( has_value) - { - // we put some data in the slot, tell readers that they should wake - SIGNAL_ALL( &linda->write_happened); // To be done from within the 'K' locking area - } - if( pushed == 1) - { - // the key was full, but it is no longer the case, tell writers they should wake - ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1); - SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area - } - } - } - else // linda is cancelled - { - // do nothing and return lanes.cancel_error - push_unique_key( L, CANCEL_ERROR); - pushed = 1; - } - } - - // must trigger any error after keeper state has been released - return (pushed < 0) ? luaL_error( L, "tried to copy unsupported types") : pushed; + struct s_Linda* const linda = lua_toLinda( L, 1); + int pushed; + bool_t has_value = lua_gettop( L) > 2; + + // make sure the key is of a valid type (throws an error if not the case) + check_key_types( L, 2, 2); + + { + Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + + if( linda->simulate_cancel == CANCEL_NONE) + { + if( has_value) + { + // convert nils to some special non-nil sentinel in sent values + keeper_toggle_nil_sentinels( L, 3, eLM_ToKeeper); + } + pushed = keeper_call( linda->U, K->L, KEEPER_API( set), L, linda, 2); + if( pushed >= 0) // no error? + { + ASSERT_L( pushed == 0 || pushed == 1); + + if( has_value) + { + // we put some data in the slot, tell readers that they should wake + SIGNAL_ALL( &linda->write_happened); // To be done from within the 'K' locking area + } + if( pushed == 1) + { + // the key was full, but it is no longer the case, tell writers they should wake + ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1); + SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area + } + } + } + else // linda is cancelled + { + // do nothing and return lanes.cancel_error + push_unique_key( L, CANCEL_ERROR); + pushed = 1; + } + } + + // must trigger any error after keeper state has been released + return (pushed < 0) ? luaL_error( L, "tried to copy unsupported types") : pushed; } @@ -478,21 +478,21 @@ LUAG_FUNC( linda_set) */ LUAG_FUNC( linda_count) { - struct s_Linda* linda = lua_toLinda( L, 1); - int pushed; - - // make sure the keys are of a valid type - check_key_types( L, 2, lua_gettop( L)); - - { - Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - pushed = keeper_call( linda->U, K->L, KEEPER_API( count), L, linda, 2); - if( pushed < 0) - { - return luaL_error( L, "tried to count an invalid key"); - } - } - return pushed; + struct s_Linda* linda = lua_toLinda( L, 1); + int pushed; + + // make sure the keys are of a valid type + check_key_types( L, 2, lua_gettop( L)); + + { + Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + pushed = keeper_call( linda->U, K->L, KEEPER_API( count), L, linda, 2); + if( pushed < 0) + { + return luaL_error( L, "tried to count an invalid key"); + } + } + return pushed; } @@ -503,39 +503,39 @@ LUAG_FUNC( linda_count) */ LUAG_FUNC( linda_get) { - struct s_Linda* const linda = lua_toLinda( L, 1); - int pushed; - lua_Integer count = luaL_optinteger( L, 3, 1); - luaL_argcheck( L, count >= 1, 3, "count should be >= 1"); - luaL_argcheck( L, lua_gettop( L) <= 3, 4, "too many arguments"); - - // make sure the key is of a valid type (throws an error if not the case) - check_key_types( L, 2, 2); - { - Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - - if( linda->simulate_cancel == CANCEL_NONE) - { - pushed = keeper_call( linda->U, K->L, KEEPER_API( get), L, linda, 2); - if( pushed > 0) - { - keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper); - } - } - else // linda is cancelled - { - // do nothing and return lanes.cancel_error - push_unique_key( L, CANCEL_ERROR); - pushed = 1; - } - // an error can be raised if we attempt to read an unregistered function - if( pushed < 0) - { - return luaL_error( L, "tried to copy unsupported types"); - } - } - - return pushed; + struct s_Linda* const linda = lua_toLinda( L, 1); + int pushed; + lua_Integer count = luaL_optinteger( L, 3, 1); + luaL_argcheck( L, count >= 1, 3, "count should be >= 1"); + luaL_argcheck( L, lua_gettop( L) <= 3, 4, "too many arguments"); + + // make sure the key is of a valid type (throws an error if not the case) + check_key_types( L, 2, 2); + { + Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + + if( linda->simulate_cancel == CANCEL_NONE) + { + pushed = keeper_call( linda->U, K->L, KEEPER_API( get), L, linda, 2); + if( pushed > 0) + { + keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper); + } + } + else // linda is cancelled + { + // do nothing and return lanes.cancel_error + push_unique_key( L, CANCEL_ERROR); + pushed = 1; + } + // an error can be raised if we attempt to read an unregistered function + if( pushed < 0) + { + return luaL_error( L, "tried to copy unsupported types"); + } + } + + return pushed; } @@ -547,38 +547,38 @@ LUAG_FUNC( linda_get) */ LUAG_FUNC( linda_limit) { - struct s_Linda* linda = lua_toLinda( L, 1); - int pushed; - - // make sure we got 3 arguments: the linda, a key and a limit - luaL_argcheck( L, lua_gettop( L) == 3, 2, "wrong number of arguments"); - // make sure we got a numeric limit - luaL_checknumber( L, 3); - // make sure the key is of a valid type - check_key_types( L, 2, 2); - - { - Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - - if( linda->simulate_cancel == CANCEL_NONE) - { - pushed = keeper_call( linda->U, K->L, KEEPER_API( limit), L, linda, 2); - ASSERT_L( pushed == 0 || pushed == 1); // no error, optional boolean value saying if we should wake blocked writer threads - if( pushed == 1) - { - ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1); - SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area - } - } - else // linda is cancelled - { - // do nothing and return lanes.cancel_error - push_unique_key( L, CANCEL_ERROR); - pushed = 1; - } - } - // propagate pushed boolean if any - return pushed; + struct s_Linda* linda = lua_toLinda( L, 1); + int pushed; + + // make sure we got 3 arguments: the linda, a key and a limit + luaL_argcheck( L, lua_gettop( L) == 3, 2, "wrong number of arguments"); + // make sure we got a numeric limit + luaL_checknumber( L, 3); + // make sure the key is of a valid type + check_key_types( L, 2, 2); + + { + Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + + if( linda->simulate_cancel == CANCEL_NONE) + { + pushed = keeper_call( linda->U, K->L, KEEPER_API( limit), L, linda, 2); + ASSERT_L( pushed == 0 || pushed == 1); // no error, optional boolean value saying if we should wake blocked writer threads + if( pushed == 1) + { + ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1); + SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area + } + } + else // linda is cancelled + { + // do nothing and return lanes.cancel_error + push_unique_key( L, CANCEL_ERROR); + pushed = 1; + } + } + // propagate pushed boolean if any + return pushed; } @@ -589,35 +589,35 @@ LUAG_FUNC( linda_limit) */ LUAG_FUNC( linda_cancel) { - struct s_Linda* linda = lua_toLinda( L, 1); - char const* who = luaL_optstring( L, 2, "both"); - - // make sure we got 3 arguments: the linda, a key and a limit - luaL_argcheck( L, lua_gettop( L) <= 2, 2, "wrong number of arguments"); - - linda->simulate_cancel = CANCEL_SOFT; - if( strcmp( who, "both") == 0) // tell everyone writers to wake up - { - SIGNAL_ALL( &linda->write_happened); - SIGNAL_ALL( &linda->read_happened); - } - else if( strcmp( who, "none") == 0) // reset flag - { - linda->simulate_cancel = CANCEL_NONE; - } - else if( strcmp( who, "read") == 0) // tell blocked readers to wake up - { - SIGNAL_ALL( &linda->write_happened); - } - else if( strcmp( who, "write") == 0) // tell blocked writers to wake up - { - SIGNAL_ALL( &linda->read_happened); - } - else - { - return luaL_error( L, "unknown wake hint '%s'", who); - } - return 0; + struct s_Linda* linda = lua_toLinda( L, 1); + char const* who = luaL_optstring( L, 2, "both"); + + // make sure we got 3 arguments: the linda, a key and a limit + luaL_argcheck( L, lua_gettop( L) <= 2, 2, "wrong number of arguments"); + + linda->simulate_cancel = CANCEL_SOFT; + if( strcmp( who, "both") == 0) // tell everyone writers to wake up + { + SIGNAL_ALL( &linda->write_happened); + SIGNAL_ALL( &linda->read_happened); + } + else if( strcmp( who, "none") == 0) // reset flag + { + linda->simulate_cancel = CANCEL_NONE; + } + else if( strcmp( who, "read") == 0) // tell blocked readers to wake up + { + SIGNAL_ALL( &linda->write_happened); + } + else if( strcmp( who, "write") == 0) // tell blocked writers to wake up + { + SIGNAL_ALL( &linda->read_happened); + } + else + { + return luaL_error( L, "unknown wake hint '%s'", who); + } + return 0; } @@ -633,9 +633,9 @@ LUAG_FUNC( linda_cancel) */ LUAG_FUNC( linda_deep) { - struct s_Linda* linda= lua_toLinda( L, 1); - lua_pushlightuserdata( L, linda); // just the address - return 1; + struct s_Linda* linda= lua_toLinda( L, 1); + lua_pushlightuserdata( L, linda); // just the address + return 1; } @@ -649,28 +649,28 @@ LUAG_FUNC( linda_deep) static int linda_tostring( lua_State* L, int idx_, bool_t opt_) { - struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_); - if( !opt_) - { - luaL_argcheck( L, linda, idx_, "expecting a linda object"); - } - if( linda != NULL) - { - char text[128]; - int len; - if( linda->name[0]) - len = sprintf( text, "Linda: %.*s", (int)sizeof(text) - 8, linda->name); - else - len = sprintf( text, "Linda: %p", linda); - lua_pushlstring( L, text, len); - return 1; - } - return 0; + struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_); + if( !opt_) + { + luaL_argcheck( L, linda, idx_, "expecting a linda object"); + } + if( linda != NULL) + { + char text[128]; + int len; + if( linda->name[0]) + len = sprintf( text, "Linda: %.*s", (int)sizeof(text) - 8, linda->name); + else + len = sprintf( text, "Linda: %p", linda); + lua_pushlstring( L, text, len); + return 1; + } + return 0; } LUAG_FUNC( linda_tostring) { - return linda_tostring( L, 1, FALSE); + return linda_tostring( L, 1, FALSE); } @@ -683,24 +683,24 @@ LUAG_FUNC( linda_tostring) */ LUAG_FUNC( linda_concat) { // linda1? linda2? - bool_t atLeastOneLinda = FALSE; - // Lua semantics enforce that one of the 2 arguments is a Linda, but not necessarily both. - if( linda_tostring( L, 1, TRUE)) - { - atLeastOneLinda = TRUE; - lua_replace( L, 1); - } - if( linda_tostring( L, 2, TRUE)) - { - atLeastOneLinda = TRUE; - lua_replace( L, 2); - } - if( !atLeastOneLinda) // should not be possible - { - return luaL_error( L, "internal error: linda_concat called on non-Linda"); - } - lua_concat( L, 2); - return 1; + bool_t atLeastOneLinda = FALSE; + // Lua semantics enforce that one of the 2 arguments is a Linda, but not necessarily both. + if( linda_tostring( L, 1, TRUE)) + { + atLeastOneLinda = TRUE; + lua_replace( L, 1); + } + if( linda_tostring( L, 2, TRUE)) + { + atLeastOneLinda = TRUE; + lua_replace( L, 2); + } + if( !atLeastOneLinda) // should not be possible + { + return luaL_error( L, "internal error: linda_concat called on non-Linda"); + } + lua_concat( L, 2); + return 1; } /* @@ -709,9 +709,9 @@ LUAG_FUNC( linda_concat) */ LUAG_FUNC( linda_dump) { - struct s_Linda* linda = lua_toLinda( L, 1); - ASSERT_L( linda->U == universe_get( L)); - return keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda)); + struct s_Linda* linda = lua_toLinda( L, 1); + ASSERT_L( linda->U == universe_get( L)); + return keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda)); } /* @@ -720,16 +720,16 @@ LUAG_FUNC( linda_dump) */ LUAG_FUNC( linda_towatch) { - struct s_Linda* linda = lua_toLinda( L, 1); - int pushed; - ASSERT_L( linda->U == universe_get( L)); - pushed = keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda)); - if( pushed == 0) - { - // if the linda is empty, don't return nil - pushed = linda_tostring( L, 1, FALSE); - } - return pushed; + struct s_Linda* linda = lua_toLinda( L, 1); + int pushed; + ASSERT_L( linda->U == universe_get( L)); + pushed = keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda)); + if( pushed == 0) + { + // if the linda is empty, don't return nil + pushed = linda_tostring( L, 1, FALSE); + } + return pushed; } /* @@ -758,160 +758,160 @@ LUAG_FUNC( linda_towatch) */ static void* linda_id( lua_State* L, DeepOp op_) { - switch( op_) - { - case eDO_new: - { - struct s_Linda* s; - size_t name_len = 0; - char const* linda_name = NULL; - unsigned long linda_group = 0; - // should have a string and/or a number of the stack as parameters (name and group) - switch( lua_gettop( L)) - { - default: // 0 - break; - - case 1: // 1 parameter, either a name or a group - if( lua_type( L, -1) == LUA_TSTRING) - { - linda_name = lua_tolstring( L, -1, &name_len); - } - else - { - linda_group = (unsigned long) lua_tointeger( L, -1); - } - break; - - case 2: // 2 parameters, a name and group, in that order - linda_name = lua_tolstring( L, -2, &name_len); - linda_group = (unsigned long) lua_tointeger( L, -1); - break; - } - - /* The deep data is allocated separately of Lua stack; we might no - * longer be around when last reference to it is being released. - * One can use any memory allocation scheme. - * just don't use L's allocF because we don't know which state will get the honor of GCing the linda - */ - s = (struct s_Linda*) malloc( sizeof(struct s_Linda) + name_len); // terminating 0 is already included - if( s) - { - s->prelude.magic.value = DEEP_VERSION.value; - SIGNAL_INIT( &s->read_happened); - SIGNAL_INIT( &s->write_happened); - s->U = universe_get( L); - s->simulate_cancel = CANCEL_NONE; - s->group = linda_group << KEEPER_MAGIC_SHIFT; - s->name[0] = 0; - memcpy( s->name, linda_name, name_len ? name_len + 1 : 0); - } - return s; - } - - case eDO_delete: - { - Keeper* K; - struct s_Linda* linda = lua_touserdata( L, 1); - ASSERT_L( linda); - - // Clean associated structures in the keeper state. - K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); - if( K && K->L) // can be NULL if this happens during main state shutdown (lanes is GC'ed -> no keepers -> no need to cleanup) - { - // hopefully this won't ever raise an error as we would jump to the closest pcall site while forgetting to release the keeper mutex... - keeper_call( linda->U, K->L, KEEPER_API( clear), L, linda, 0); - } - keeper_release( K); - - // There aren't any lanes waiting on these lindas, since all proxies have been gc'ed. Right? - SIGNAL_FREE( &linda->read_happened); - SIGNAL_FREE( &linda->write_happened); - free( linda); - return NULL; - } - - case eDO_metatable: - { - - STACK_CHECK( L, 0); - lua_newtable( L); - // metatable is its own index - lua_pushvalue( L, -1); - lua_setfield( L, -2, "__index"); - - // protect metatable from external access - lua_pushliteral( L, "Linda"); - lua_setfield( L, -2, "__metatable"); - - lua_pushcfunction( L, LG_linda_tostring); - lua_setfield( L, -2, "__tostring"); - - // Decoda __towatch support - lua_pushcfunction( L, LG_linda_towatch); - lua_setfield( L, -2, "__towatch"); - - lua_pushcfunction( L, LG_linda_concat); - lua_setfield( L, -2, "__concat"); - - // protected calls, to ensure associated keeper is always released even in case of error - // all function are the protected call wrapper, where the actual operation is provided as upvalue - // note that this kind of thing can break function lookup as we use the function pointer here and there - - lua_pushcfunction( L, LG_linda_send); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "send"); - - lua_pushcfunction( L, LG_linda_receive); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "receive"); - - lua_pushcfunction( L, LG_linda_limit); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "limit"); - - lua_pushcfunction( L, LG_linda_set); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "set"); - - lua_pushcfunction( L, LG_linda_count); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "count"); - - lua_pushcfunction( L, LG_linda_get); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "get"); - - lua_pushcfunction( L, LG_linda_cancel); - lua_setfield( L, -2, "cancel"); - - lua_pushcfunction( L, LG_linda_deep); - lua_setfield( L, -2, "deep"); - - lua_pushcfunction( L, LG_linda_dump); - lua_pushcclosure( L, LG_linda_protected_call, 1); - lua_setfield( L, -2, "dump"); - - // some constants - lua_pushliteral( L, BATCH_SENTINEL); - lua_setfield( L, -2, "batched"); - - push_unique_key( L, NIL_SENTINEL); - lua_setfield( L, -2, "null"); - - STACK_END( L, 1); - return NULL; - } - - case eDO_module: - // linda is a special case because we know lanes must be loaded from the main lua state - // to be able to ever get here, so we know it will remain loaded as long a the main state is around - // in other words, forever. - default: - { - return NULL; - } - } + switch( op_) + { + case eDO_new: + { + struct s_Linda* s; + size_t name_len = 0; + char const* linda_name = NULL; + unsigned long linda_group = 0; + // should have a string and/or a number of the stack as parameters (name and group) + switch( lua_gettop( L)) + { + default: // 0 + break; + + case 1: // 1 parameter, either a name or a group + if( lua_type( L, -1) == LUA_TSTRING) + { + linda_name = lua_tolstring( L, -1, &name_len); + } + else + { + linda_group = (unsigned long) lua_tointeger( L, -1); + } + break; + + case 2: // 2 parameters, a name and group, in that order + linda_name = lua_tolstring( L, -2, &name_len); + linda_group = (unsigned long) lua_tointeger( L, -1); + break; + } + + /* The deep data is allocated separately of Lua stack; we might no + * longer be around when last reference to it is being released. + * One can use any memory allocation scheme. + * just don't use L's allocF because we don't know which state will get the honor of GCing the linda + */ + s = (struct s_Linda*) malloc( sizeof(struct s_Linda) + name_len); // terminating 0 is already included + if( s) + { + s->prelude.magic.value = DEEP_VERSION.value; + SIGNAL_INIT( &s->read_happened); + SIGNAL_INIT( &s->write_happened); + s->U = universe_get( L); + s->simulate_cancel = CANCEL_NONE; + s->group = linda_group << KEEPER_MAGIC_SHIFT; + s->name[0] = 0; + memcpy( s->name, linda_name, name_len ? name_len + 1 : 0); + } + return s; + } + + case eDO_delete: + { + Keeper* K; + struct s_Linda* linda = lua_touserdata( L, 1); + ASSERT_L( linda); + + // Clean associated structures in the keeper state. + K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + if( K && K->L) // can be NULL if this happens during main state shutdown (lanes is GC'ed -> no keepers -> no need to cleanup) + { + // hopefully this won't ever raise an error as we would jump to the closest pcall site while forgetting to release the keeper mutex... + keeper_call( linda->U, K->L, KEEPER_API( clear), L, linda, 0); + } + keeper_release( K); + + // There aren't any lanes waiting on these lindas, since all proxies have been gc'ed. Right? + SIGNAL_FREE( &linda->read_happened); + SIGNAL_FREE( &linda->write_happened); + free( linda); + return NULL; + } + + case eDO_metatable: + { + + STACK_CHECK( L, 0); + lua_newtable( L); + // metatable is its own index + lua_pushvalue( L, -1); + lua_setfield( L, -2, "__index"); + + // protect metatable from external access + lua_pushliteral( L, "Linda"); + lua_setfield( L, -2, "__metatable"); + + lua_pushcfunction( L, LG_linda_tostring); + lua_setfield( L, -2, "__tostring"); + + // Decoda __towatch support + lua_pushcfunction( L, LG_linda_towatch); + lua_setfield( L, -2, "__towatch"); + + lua_pushcfunction( L, LG_linda_concat); + lua_setfield( L, -2, "__concat"); + + // protected calls, to ensure associated keeper is always released even in case of error + // all function are the protected call wrapper, where the actual operation is provided as upvalue + // note that this kind of thing can break function lookup as we use the function pointer here and there + + lua_pushcfunction( L, LG_linda_send); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "send"); + + lua_pushcfunction( L, LG_linda_receive); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "receive"); + + lua_pushcfunction( L, LG_linda_limit); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "limit"); + + lua_pushcfunction( L, LG_linda_set); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "set"); + + lua_pushcfunction( L, LG_linda_count); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "count"); + + lua_pushcfunction( L, LG_linda_get); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "get"); + + lua_pushcfunction( L, LG_linda_cancel); + lua_setfield( L, -2, "cancel"); + + lua_pushcfunction( L, LG_linda_deep); + lua_setfield( L, -2, "deep"); + + lua_pushcfunction( L, LG_linda_dump); + lua_pushcclosure( L, LG_linda_protected_call, 1); + lua_setfield( L, -2, "dump"); + + // some constants + lua_pushliteral( L, BATCH_SENTINEL); + lua_setfield( L, -2, "batched"); + + push_unique_key( L, NIL_SENTINEL); + lua_setfield( L, -2, "null"); + + STACK_END( L, 1); + return NULL; + } + + case eDO_module: + // linda is a special case because we know lanes must be loaded from the main lua state + // to be able to ever get here, so we know it will remain loaded as long a the main state is around + // in other words, forever. + default: + { + return NULL; + } + } } /* @@ -921,17 +921,17 @@ static void* linda_id( lua_State* L, DeepOp op_) */ LUAG_FUNC( linda) { - int const top = lua_gettop( L); - luaL_argcheck( L, top <= 2, top, "too many arguments"); - if( top == 1) - { - int const t = lua_type( L, 1); - luaL_argcheck( L, t == LUA_TSTRING || t == LUA_TNUMBER, 1, "wrong parameter (should be a string or a number)"); - } - else if( top == 2) - { - luaL_checktype( L, 1, LUA_TSTRING); - luaL_checktype( L, 2, LUA_TNUMBER); - } - return luaG_newdeepuserdata( L, linda_id, 0); + int const top = lua_gettop( L); + luaL_argcheck( L, top <= 2, top, "too many arguments"); + if( top == 1) + { + int const t = lua_type( L, 1); + luaL_argcheck( L, t == LUA_TSTRING || t == LUA_TNUMBER, 1, "wrong parameter (should be a string or a number)"); + } + else if( top == 2) + { + luaL_checktype( L, 1, LUA_TSTRING); + luaL_checktype( L, 2, LUA_TNUMBER); + } + return luaG_newdeepuserdata( L, linda_id, 0); } diff --git a/src/macros_and_utils.h b/src/macros_and_utils.h index dba0010..b67a7c4 100644 --- a/src/macros_and_utils.h +++ b/src/macros_and_utils.h @@ -40,40 +40,40 @@ extern char const* debugspew_indent; #define _ASSERT_L( L, cond_) if( (cond_) == 0) { (void) luaL_error( L, "ASSERT failed: %s:%d '%s'", __FILE__, __LINE__, #cond_);} #define STACK_CHECK( L, offset_) \ - { \ - int const L##_delta = offset_; \ - if( (L##_delta < 0) || (lua_gettop( L) < L##_delta)) \ - { \ - assert( FALSE); \ - (void) luaL_error( L, "STACK INIT ASSERT failed (%d not %d): %s:%d", lua_gettop( L), L##_delta, __FILE__, __LINE__); \ - } \ - int const L##_oldtop = lua_gettop( L) - L##_delta + { \ + int const L##_delta = offset_; \ + if( (L##_delta < 0) || (lua_gettop( L) < L##_delta)) \ + { \ + assert( FALSE); \ + (void) luaL_error( L, "STACK INIT ASSERT failed (%d not %d): %s:%d", lua_gettop( L), L##_delta, __FILE__, __LINE__); \ + } \ + int const L##_oldtop = lua_gettop( L) - L##_delta #define STACK_CHECK_ABS( L, offset_) \ - { \ - int const L##_pos = offset_; \ - if( lua_gettop( L) < L##_pos) \ - { \ - assert( FALSE); \ - (void) luaL_error( L, "STACK INIT ASSERT failed (%d not %d): %s:%d", lua_gettop( L), L##_pos, __FILE__, __LINE__); \ - } \ - int const L##_oldtop = 0 + { \ + int const L##_pos = offset_; \ + if( lua_gettop( L) < L##_pos) \ + { \ + assert( FALSE); \ + (void) luaL_error( L, "STACK INIT ASSERT failed (%d not %d): %s:%d", lua_gettop( L), L##_pos, __FILE__, __LINE__); \ + } \ + int const L##_oldtop = 0 #define STACK_MID( L, change) \ - do if( change != LUA_MULTRET) \ - { \ - int stack_check_a = lua_gettop( L) - L##_oldtop; \ - int stack_check_b = (change); \ - if( stack_check_a != stack_check_b) \ - { \ - assert( FALSE); \ - luaL_error( L, "STACK ASSERT failed (%d not %d): %s:%d", stack_check_a, stack_check_b, __FILE__, __LINE__); \ - } \ - } while( 0) + do if( change != LUA_MULTRET) \ + { \ + int stack_check_a = lua_gettop( L) - L##_oldtop; \ + int stack_check_b = (change); \ + if( stack_check_a != stack_check_b) \ + { \ + assert( FALSE); \ + luaL_error( L, "STACK ASSERT failed (%d not %d): %s:%d", stack_check_a, stack_check_b, __FILE__, __LINE__); \ + } \ + } while( 0) #define STACK_END( L, change) \ - STACK_MID( L, change); \ - } + STACK_MID( L, change); \ + } #define STACK_DUMP( L) luaG_dump( L) @@ -86,15 +86,15 @@ extern char const* debugspew_indent; // non-string keyed registry access #define REGISTRY_SET( L, key_, value_) \ { \ - push_unique_key( L, key_); \ - value_; \ - lua_rawset( L, LUA_REGISTRYINDEX); \ + push_unique_key( L, key_); \ + value_; \ + lua_rawset( L, LUA_REGISTRYINDEX); \ } #define REGISTRY_GET( L, key_) \ { \ - push_unique_key( L, key_); \ - lua_rawget( L, LUA_REGISTRYINDEX); \ + push_unique_key( L, key_); \ + lua_rawget( L, LUA_REGISTRYINDEX); \ } #define LUAG_FUNC( func_name) int LG_##func_name( lua_State* L) diff --git a/src/state.c b/src/state.c index 81371b7..9075c02 100644 --- a/src/state.c +++ b/src/state.c @@ -59,32 +59,32 @@ THE SOFTWARE. // static int luaG_new_require( lua_State* L) { - int rc; - int const args = lua_gettop( L); // args - Universe* U = universe_get( L); - //char const* modname = luaL_checkstring( L, 1); - - STACK_GROW( L, 1); - - lua_pushvalue( L, lua_upvalueindex( 1)); // args require - lua_insert( L, 1); // require args - - // Using 'lua_pcall()' to catch errors; otherwise a failing 'require' would - // leave us locked, blocking any future 'require' calls from other lanes. - - MUTEX_LOCK( &U->require_cs); - // starting with Lua 5.4, require may return a second optional value, so we need LUA_MULTRET - rc = lua_pcall( L, args, LUA_MULTRET, 0 /*errfunc*/ ); // err|result(s) - MUTEX_UNLOCK( &U->require_cs); - - // the required module (or an error message) is left on the stack as returned value by original require function - - if( rc != LUA_OK) // LUA_ERRRUN / LUA_ERRMEM ? - { - return lua_error( L); - } - // should be 1 for Lua <= 5.3, 1 or 2 starting with Lua 5.4 - return lua_gettop(L); // result(s) + int rc; + int const args = lua_gettop( L); // args + Universe* U = universe_get( L); + //char const* modname = luaL_checkstring( L, 1); + + STACK_GROW( L, 1); + + lua_pushvalue( L, lua_upvalueindex( 1)); // args require + lua_insert( L, 1); // require args + + // Using 'lua_pcall()' to catch errors; otherwise a failing 'require' would + // leave us locked, blocking any future 'require' calls from other lanes. + + MUTEX_LOCK( &U->require_cs); + // starting with Lua 5.4, require may return a second optional value, so we need LUA_MULTRET + rc = lua_pcall( L, args, LUA_MULTRET, 0 /*errfunc*/ ); // err|result(s) + MUTEX_UNLOCK( &U->require_cs); + + // the required module (or an error message) is left on the stack as returned value by original require function + + if( rc != LUA_OK) // LUA_ERRRUN / LUA_ERRMEM ? + { + return lua_error( L); + } + // should be 1 for Lua <= 5.3, 1 or 2 starting with Lua 5.4 + return lua_gettop(L); // result(s) } /* @@ -92,26 +92,26 @@ static int luaG_new_require( lua_State* L) */ void serialize_require( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L) { - STACK_GROW( L, 1); - STACK_CHECK( L, 0); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "serializing require()\n" INDENT_END)); - - // Check 'require' is there and not already wrapped; if not, do nothing - // - lua_getglobal( L, "require"); - if( lua_isfunction( L, -1) && lua_tocfunction( L, -1) != luaG_new_require) - { - // [-1]: original 'require' function - lua_pushcclosure( L, luaG_new_require, 1 /*upvalues*/); - lua_setglobal( L, "require"); - } - else - { - // [-1]: nil - lua_pop( L, 1); - } - - STACK_END( L, 0); + STACK_GROW( L, 1); + STACK_CHECK( L, 0); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "serializing require()\n" INDENT_END)); + + // Check 'require' is there and not already wrapped; if not, do nothing + // + lua_getglobal( L, "require"); + if( lua_isfunction( L, -1) && lua_tocfunction( L, -1) != luaG_new_require) + { + // [-1]: original 'require' function + lua_pushcclosure( L, luaG_new_require, 1 /*upvalues*/); + lua_setglobal( L, "require"); + } + else + { + // [-1]: nil + lua_pop( L, 1); + } + + STACK_END( L, 0); } // ################################################################################################ @@ -120,15 +120,15 @@ void serialize_require( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L) static int require_lanes_core( lua_State* L) { - // leaves a copy of 'lanes.core' module table on the stack - luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); - return 1; + // leaves a copy of 'lanes.core' module table on the stack + luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); + return 1; } static const luaL_Reg libs[] = { - { LUA_LOADLIBNAME, luaopen_package}, + { LUA_LOADLIBNAME, luaopen_package}, { LUA_TABLIBNAME, luaopen_table}, { LUA_STRLIBNAME, luaopen_string}, { LUA_MATHLIBNAME, luaopen_math}, @@ -157,152 +157,152 @@ static const luaL_Reg libs[] = { LUA_DBLIBNAME, luaopen_debug}, { "lanes.core", require_lanes_core}, // So that we can open it like any base library (possible since we have access to the init function) - // + // { "base", NULL}, // ignore "base" (already acquired it) { NULL, NULL } }; static void open1lib( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, char const* name_, size_t len_) { - int i; - for( i = 0; libs[i].name; ++ i) - { - if( strncmp( name_, libs[i].name, len_) == 0) - { - lua_CFunction libfunc = libs[i].func; - name_ = libs[i].name; // note that the provided name_ doesn't necessarily ends with '\0', hence len_ - if( libfunc != NULL) - { - bool_t const isLanesCore = (libfunc == require_lanes_core) ? TRUE : FALSE; // don't want to create a global for "lanes.core" - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening %.*s library\n" INDENT_END, (int) len_, name_)); - STACK_CHECK( L, 0); - // open the library as if through require(), and create a global as well if necessary (the library table is left on the stack) - luaL_requiref( L, name_, libfunc, !isLanesCore); - // lanes.core doesn't declare a global, so scan it here and now - if( isLanesCore == TRUE) - { - populate_func_lookup_table( L, -1, name_); - } - lua_pop( L, 1); - STACK_END( L, 0); - } - break; - } - } + int i; + for( i = 0; libs[i].name; ++ i) + { + if( strncmp( name_, libs[i].name, len_) == 0) + { + lua_CFunction libfunc = libs[i].func; + name_ = libs[i].name; // note that the provided name_ doesn't necessarily ends with '\0', hence len_ + if( libfunc != NULL) + { + bool_t const isLanesCore = (libfunc == require_lanes_core) ? TRUE : FALSE; // don't want to create a global for "lanes.core" + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening %.*s library\n" INDENT_END, (int) len_, name_)); + STACK_CHECK( L, 0); + // open the library as if through require(), and create a global as well if necessary (the library table is left on the stack) + luaL_requiref( L, name_, libfunc, !isLanesCore); + // lanes.core doesn't declare a global, so scan it here and now + if( isLanesCore == TRUE) + { + populate_func_lookup_table( L, -1, name_); + } + lua_pop( L, 1); + STACK_END( L, 0); + } + break; + } + } } // just like lua_xmove, args are (from, to) static void copy_one_time_settings( Universe* U, lua_State* L, lua_State* L2) { - STACK_GROW( L, 2); - STACK_CHECK( L, 0); - STACK_CHECK( L2, 0); - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "copy_one_time_settings()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - - REGISTRY_GET( L, CONFIG_REGKEY); // config - // copy settings from from source to destination registry - if( luaG_inter_move( U, L, L2, 1, eLM_LaneBody) < 0) // // config - { - (void) luaL_error( L, "failed to copy settings when loading lanes.core"); - } - // set L2:_R[CONFIG_REGKEY] = settings - REGISTRY_SET( L2, CONFIG_REGKEY, lua_insert( L2, -2)); // - STACK_END( L2, 0); - STACK_END( L, 0); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + STACK_GROW( L, 2); + STACK_CHECK( L, 0); + STACK_CHECK( L2, 0); + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "copy_one_time_settings()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + + REGISTRY_GET( L, CONFIG_REGKEY); // config + // copy settings from from source to destination registry + if( luaG_inter_move( U, L, L2, 1, eLM_LaneBody) < 0) // // config + { + (void) luaL_error( L, "failed to copy settings when loading lanes.core"); + } + // set L2:_R[CONFIG_REGKEY] = settings + REGISTRY_SET( L2, CONFIG_REGKEY, lua_insert( L2, -2)); // + STACK_END( L2, 0); + STACK_END( L, 0); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); } void initialize_on_state_create( Universe* U, lua_State* L) { - STACK_CHECK( L, 0); - lua_getfield( L, -1, "on_state_create"); // settings on_state_create|nil - if( !lua_isnil( L, -1)) - { - // store C function pointer in an internal variable - U->on_state_create_func = lua_tocfunction( L, -1); // settings on_state_create - if( U->on_state_create_func != NULL) - { - // make sure the function doesn't have upvalues - char const* upname = lua_getupvalue( L, -1, 1); // settings on_state_create upval? - if( upname != NULL) // should be "" for C functions with upvalues if any - { - (void) luaL_error( L, "on_state_create shouldn't have upvalues"); - } - // remove this C function from the config table so that it doesn't cause problems - // when we transfer the config table in newly created Lua states - lua_pushnil( L); // settings on_state_create nil - lua_setfield( L, -3, "on_state_create"); // settings on_state_create - } - else - { - // optim: store marker saying we have such a function in the config table - U->on_state_create_func = (lua_CFunction) initialize_on_state_create; - } - } - lua_pop( L, 1); // settings - STACK_END( L, 0); + STACK_CHECK( L, 0); + lua_getfield( L, -1, "on_state_create"); // settings on_state_create|nil + if( !lua_isnil( L, -1)) + { + // store C function pointer in an internal variable + U->on_state_create_func = lua_tocfunction( L, -1); // settings on_state_create + if( U->on_state_create_func != NULL) + { + // make sure the function doesn't have upvalues + char const* upname = lua_getupvalue( L, -1, 1); // settings on_state_create upval? + if( upname != NULL) // should be "" for C functions with upvalues if any + { + (void) luaL_error( L, "on_state_create shouldn't have upvalues"); + } + // remove this C function from the config table so that it doesn't cause problems + // when we transfer the config table in newly created Lua states + lua_pushnil( L); // settings on_state_create nil + lua_setfield( L, -3, "on_state_create"); // settings on_state_create + } + else + { + // optim: store marker saying we have such a function in the config table + U->on_state_create_func = (lua_CFunction) initialize_on_state_create; + } + } + lua_pop( L, 1); // settings + STACK_END( L, 0); } lua_State* create_state( Universe* U, lua_State* from_) { - lua_State* L; - if( U->provide_allocator != NULL) - { - lua_pushcclosure( from_, U->provide_allocator, 0); - lua_call( from_, 0, 1); - { - AllocatorDefinition* def = lua_touserdata( from_, -1); - L = lua_newstate( def->allocF, def->allocUD); - } - lua_pop( from_, 1); - } - else - { - L = luaL_newstate(); - } - - if( L == NULL) - { - (void) luaL_error( from_, "luaG_newstate() failed while creating state; out of memory"); - } - return L; + lua_State* L; + if( U->provide_allocator != NULL) + { + lua_pushcclosure( from_, U->provide_allocator, 0); + lua_call( from_, 0, 1); + { + AllocatorDefinition* def = lua_touserdata( from_, -1); + L = lua_newstate( def->allocF, def->allocUD); + } + lua_pop( from_, 1); + } + else + { + L = luaL_newstate(); + } + + if( L == NULL) + { + (void) luaL_error( from_, "luaG_newstate() failed while creating state; out of memory"); + } + return L; } void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMode mode_) { - if( U->on_state_create_func != NULL) - { - STACK_CHECK( L, 0); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "calling on_state_create()\n" INDENT_END)); - if( U->on_state_create_func != (lua_CFunction) initialize_on_state_create) - { - // C function: recreate a closure in the new state, bypassing the lookup scheme - lua_pushcfunction( L, U->on_state_create_func); // on_state_create() - } - else // Lua function located in the config table, copied when we opened "lanes.core" - { - if( mode_ != eLM_LaneBody) - { - // if attempting to call in a keeper state, do nothing because the function doesn't exist there - // this doesn't count as an error though - return; - } - REGISTRY_GET( L, CONFIG_REGKEY); // {} - STACK_MID( L, 1); - lua_getfield( L, -1, "on_state_create"); // {} on_state_create() - lua_remove( L, -2); // on_state_create() - } - STACK_MID( L, 1); - // capture error and raise it in caller state - if( lua_pcall( L, 0, 0, 0) != LUA_OK) - { - luaL_error( from_, "on_state_create failed: \"%s\"", lua_isstring( L, -1) ? lua_tostring( L, -1) : lua_typename( L, lua_type( L, -1))); - } - STACK_END( L, 0); - } + if( U->on_state_create_func != NULL) + { + STACK_CHECK( L, 0); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "calling on_state_create()\n" INDENT_END)); + if( U->on_state_create_func != (lua_CFunction) initialize_on_state_create) + { + // C function: recreate a closure in the new state, bypassing the lookup scheme + lua_pushcfunction( L, U->on_state_create_func); // on_state_create() + } + else // Lua function located in the config table, copied when we opened "lanes.core" + { + if( mode_ != eLM_LaneBody) + { + // if attempting to call in a keeper state, do nothing because the function doesn't exist there + // this doesn't count as an error though + return; + } + REGISTRY_GET( L, CONFIG_REGKEY); // {} + STACK_MID( L, 1); + lua_getfield( L, -1, "on_state_create"); // {} on_state_create() + lua_remove( L, -2); // on_state_create() + } + STACK_MID( L, 1); + // capture error and raise it in caller state + if( lua_pcall( L, 0, 0, 0) != LUA_OK) + { + luaL_error( from_, "on_state_create failed: \"%s\"", lua_isstring( L, -1) ? lua_tostring( L, -1) : lua_typename( L, lua_type( L, -1))); + } + STACK_END( L, 0); + } } /* @@ -320,116 +320,116 @@ void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMo */ lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_) { - lua_State* L = create_state( U, from_); - - STACK_GROW( L, 2); - STACK_CHECK_ABS( L, 0); - - // copy the universe as a light userdata (only the master state holds the full userdata) - // that way, if Lanes is required in this new state, we'll know we are part of this universe - universe_store( L, U); - STACK_MID( L, 0); - - // we'll need this every time we transfer some C function from/to this state - REGISTRY_SET( L, LOOKUP_REGKEY, lua_newtable( L)); - STACK_MID( L, 0); - - // neither libs (not even 'base') nor special init func: we are done - if( libs_ == NULL && U->on_state_create_func == NULL) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate(NULL)\n" INDENT_END)); - return L; - } - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - - // copy settings (for example because it may contain a Lua on_state_create function) - copy_one_time_settings( U, from_, L); - - // 'lua.c' stops GC during initialization so perhaps its a good idea. :) - lua_gc( L, LUA_GCSTOP, 0); - - - // Anything causes 'base' to be taken in - // - if( libs_ != NULL) - { - // special "*" case (mainly to help with LuaJIT compatibility) - // as we are called from luaopen_lanes_core() already, and that would deadlock - if( libs_[0] == '*' && libs_[1] == 0) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening ALL standard libraries\n" INDENT_END)); - luaL_openlibs( L); - // don't forget lanes.core for regular lane states - open1lib( DEBUGSPEW_PARAM_COMMA( U) L, "lanes.core", 10); - libs_ = NULL; // done with libs - } - else - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening base library\n" INDENT_END)); + lua_State* L = create_state( U, from_); + + STACK_GROW( L, 2); + STACK_CHECK_ABS( L, 0); + + // copy the universe as a light userdata (only the master state holds the full userdata) + // that way, if Lanes is required in this new state, we'll know we are part of this universe + universe_store( L, U); + STACK_MID( L, 0); + + // we'll need this every time we transfer some C function from/to this state + REGISTRY_SET( L, LOOKUP_REGKEY, lua_newtable( L)); + STACK_MID( L, 0); + + // neither libs (not even 'base') nor special init func: we are done + if( libs_ == NULL && U->on_state_create_func == NULL) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate(NULL)\n" INDENT_END)); + return L; + } + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + + // copy settings (for example because it may contain a Lua on_state_create function) + copy_one_time_settings( U, from_, L); + + // 'lua.c' stops GC during initialization so perhaps its a good idea. :) + lua_gc( L, LUA_GCSTOP, 0); + + + // Anything causes 'base' to be taken in + // + if( libs_ != NULL) + { + // special "*" case (mainly to help with LuaJIT compatibility) + // as we are called from luaopen_lanes_core() already, and that would deadlock + if( libs_[0] == '*' && libs_[1] == 0) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening ALL standard libraries\n" INDENT_END)); + luaL_openlibs( L); + // don't forget lanes.core for regular lane states + open1lib( DEBUGSPEW_PARAM_COMMA( U) L, "lanes.core", 10); + libs_ = NULL; // done with libs + } + else + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening base library\n" INDENT_END)); #if LUA_VERSION_NUM >= 502 - // open base library the same way as in luaL_openlibs() - luaL_requiref( L, "_G", luaopen_base, 1); - lua_pop( L, 1); + // open base library the same way as in luaL_openlibs() + luaL_requiref( L, "_G", luaopen_base, 1); + lua_pop( L, 1); #else // LUA_VERSION_NUM - lua_pushcfunction( L, luaopen_base); - lua_pushstring( L, ""); - lua_call( L, 1, 0); + lua_pushcfunction( L, luaopen_base); + lua_pushstring( L, ""); + lua_call( L, 1, 0); #endif // LUA_VERSION_NUM - } - } - STACK_END( L, 0); - - // scan all libraries, open them one by one - if( libs_) - { - char const* p; - unsigned int len = 0; - for( p = libs_; *p; p += len) - { - // skip delimiters ('.' can be part of name for "lanes.core") - while( *p && !isalnum( *p) && *p != '.') - ++ p; - // skip name - len = 0; - while( isalnum( p[len]) || p[len] == '.') - ++ len; - // open library - open1lib( DEBUGSPEW_PARAM_COMMA( U) L, p, len); - } - } - lua_gc( L, LUA_GCRESTART, 0); - - serialize_require( DEBUGSPEW_PARAM_COMMA( U) L); - - // call this after the base libraries are loaded and GC is restarted - // will raise an error in from_ in case of problem - call_on_state_create( U, L, from_, eLM_LaneBody); - - STACK_CHECK( L, 0); - // after all this, register everything we find in our name<->function database - lua_pushglobaltable( L); // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack - populate_func_lookup_table( L, -1, NULL); + } + } + STACK_END( L, 0); + + // scan all libraries, open them one by one + if( libs_) + { + char const* p; + unsigned int len = 0; + for( p = libs_; *p; p += len) + { + // skip delimiters ('.' can be part of name for "lanes.core") + while( *p && !isalnum( *p) && *p != '.') + ++ p; + // skip name + len = 0; + while( isalnum( p[len]) || p[len] == '.') + ++ len; + // open library + open1lib( DEBUGSPEW_PARAM_COMMA( U) L, p, len); + } + } + lua_gc( L, LUA_GCRESTART, 0); + + serialize_require( DEBUGSPEW_PARAM_COMMA( U) L); + + // call this after the base libraries are loaded and GC is restarted + // will raise an error in from_ in case of problem + call_on_state_create( U, L, from_, eLM_LaneBody); + + STACK_CHECK( L, 0); + // after all this, register everything we find in our name<->function database + lua_pushglobaltable( L); // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack + populate_func_lookup_table( L, -1, NULL); #if 0 && USE_DEBUG_SPEW - // dump the lookup database contents - lua_getfield( L, LUA_REGISTRYINDEX, LOOKUP_REGKEY); // {} - lua_pushnil( L); // {} nil - while( lua_next( L, -2)) // {} k v - { - lua_getglobal( L, "print"); // {} k v print - lua_pushlstring( L, debugspew_indent, U->debugspew_indent_depth); // {} k v print " " - lua_pushvalue( L, -4); // {} k v print " " k - lua_pushvalue( L, -4); // {} k v print " " k v - lua_call( L, 3, 0); // {} k v - lua_pop( L, 1); // {} k - } - lua_pop( L, 1); // {} + // dump the lookup database contents + lua_getfield( L, LUA_REGISTRYINDEX, LOOKUP_REGKEY); // {} + lua_pushnil( L); // {} nil + while( lua_next( L, -2)) // {} k v + { + lua_getglobal( L, "print"); // {} k v print + lua_pushlstring( L, debugspew_indent, U->debugspew_indent_depth); // {} k v print " " + lua_pushvalue( L, -4); // {} k v print " " k + lua_pushvalue( L, -4); // {} k v print " " k v + lua_call( L, 3, 0); // {} k v + lua_pop( L, 1); // {} k + } + lua_pop( L, 1); // {} #endif // USE_DEBUG_SPEW - lua_pop( L, 1); - STACK_END( L, 0); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - return L; + lua_pop( L, 1); + STACK_END( L, 0); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + return L; } diff --git a/src/threading.c b/src/threading.c index 84a6fcd..183dc87 100644 --- a/src/threading.c +++ b/src/threading.c @@ -104,16 +104,16 @@ THE SOFTWARE. static void FAIL( char const* funcname, int rc) { #if defined( PLATFORM_XBOX) - fprintf( stderr, "%s() failed! (%d)\n", funcname, rc ); + fprintf( stderr, "%s() failed! (%d)\n", funcname, rc ); #else // PLATFORM_XBOX - char buf[256]; - FormatMessageA( FORMAT_MESSAGE_FROM_SYSTEM, NULL, rc, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, 256, NULL); - fprintf( stderr, "%s() failed! [GetLastError() -> %d] '%s'", funcname, rc, buf); + char buf[256]; + FormatMessageA( FORMAT_MESSAGE_FROM_SYSTEM, NULL, rc, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, 256, NULL); + fprintf( stderr, "%s() failed! [GetLastError() -> %d] '%s'", funcname, rc, buf); #endif // PLATFORM_XBOX #ifdef _MSC_VER - __debugbreak(); // give a chance to the debugger! + __debugbreak(); // give a chance to the debugger! #endif // _MSC_VER - abort(); + abort(); } #endif // win32 build @@ -278,14 +278,14 @@ static void prepare_timeout( struct timespec *ts, time_d abs_secs ) { if (!CloseHandle(*ref)) FAIL( "CloseHandle (mutex)", GetLastError() ); *ref= NULL; } - void MUTEX_LOCK( MUTEX_T *ref ) - { - DWORD rc = WaitForSingleObject( *ref, INFINITE); - // ERROR_WAIT_NO_CHILDREN means a thread was killed (lane terminated because of error raised during a linda transfer for example) while having grabbed this mutex - // this is not a big problem as we will grab it just the same, so ignore this particular error - if( rc != 0 && rc != ERROR_WAIT_NO_CHILDREN) - FAIL( "WaitForSingleObject", (rc == WAIT_FAILED) ? GetLastError() : rc); - } + void MUTEX_LOCK( MUTEX_T *ref ) + { + DWORD rc = WaitForSingleObject( *ref, INFINITE); + // ERROR_WAIT_NO_CHILDREN means a thread was killed (lane terminated because of error raised during a linda transfer for example) while having grabbed this mutex + // this is not a big problem as we will grab it just the same, so ignore this particular error + if( rc != 0 && rc != ERROR_WAIT_NO_CHILDREN) + FAIL( "WaitForSingleObject", (rc == WAIT_FAILED) ? GetLastError() : rc); + } void MUTEX_UNLOCK( MUTEX_T *ref ) { if (!ReleaseMutex(*ref)) FAIL( "ReleaseMutex", GetLastError() ); @@ -294,13 +294,13 @@ static void prepare_timeout( struct timespec *ts, time_d abs_secs ) { static int const gs_prio_remap[] = { - THREAD_PRIORITY_IDLE, - THREAD_PRIORITY_LOWEST, - THREAD_PRIORITY_BELOW_NORMAL, - THREAD_PRIORITY_NORMAL, - THREAD_PRIORITY_ABOVE_NORMAL, - THREAD_PRIORITY_HIGHEST, - THREAD_PRIORITY_TIME_CRITICAL + THREAD_PRIORITY_IDLE, + THREAD_PRIORITY_LOWEST, + THREAD_PRIORITY_BELOW_NORMAL, + THREAD_PRIORITY_NORMAL, + THREAD_PRIORITY_ABOVE_NORMAL, + THREAD_PRIORITY_HIGHEST, + THREAD_PRIORITY_TIME_CRITICAL }; /* MSDN: "If you would like to use the CRT in ThreadProc, use the @@ -310,43 +310,43 @@ MSDN: "you can create at most 2028 threads" // Note: Visual C++ requires '__stdcall' where it is void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (__stdcall *func)( void*), void* data, int prio /* -3..+3 */) { - HANDLE h = (HANDLE) _beginthreadex( NULL, // security - _THREAD_STACK_SIZE, - func, - data, - 0, // flags (0/CREATE_SUSPENDED) - NULL // thread id (not used) - ); - - if( h == NULL) // _beginthreadex returns 0L on failure instead of -1L (like _beginthread) - { - FAIL( "CreateThread", GetLastError()); - } - - if (!SetThreadPriority( h, gs_prio_remap[prio + 3])) - { - FAIL( "SetThreadPriority", GetLastError()); - } - - *ref = h; + HANDLE h = (HANDLE) _beginthreadex( NULL, // security + _THREAD_STACK_SIZE, + func, + data, + 0, // flags (0/CREATE_SUSPENDED) + NULL // thread id (not used) + ); + + if( h == NULL) // _beginthreadex returns 0L on failure instead of -1L (like _beginthread) + { + FAIL( "CreateThread", GetLastError()); + } + + if (!SetThreadPriority( h, gs_prio_remap[prio + 3])) + { + FAIL( "SetThreadPriority", GetLastError()); + } + + *ref = h; } void THREAD_SET_PRIORITY( int prio) { - // prio range [-3,+3] was checked by the caller - if (!SetThreadPriority( GetCurrentThread(), gs_prio_remap[prio + 3])) - { - FAIL( "THREAD_SET_PRIORITY", GetLastError()); - } + // prio range [-3,+3] was checked by the caller + if (!SetThreadPriority( GetCurrentThread(), gs_prio_remap[prio + 3])) + { + FAIL( "THREAD_SET_PRIORITY", GetLastError()); + } } void THREAD_SET_AFFINITY( unsigned int aff) { - if( !SetThreadAffinityMask( GetCurrentThread(), aff)) - { - FAIL( "THREAD_SET_AFFINITY", GetLastError()); - } + if( !SetThreadAffinityMask( GetCurrentThread(), aff)) + { + FAIL( "THREAD_SET_AFFINITY", GetLastError()); + } } bool_t THREAD_WAIT_IMPL( THREAD_T *ref, double secs) @@ -366,200 +366,200 @@ bool_t THREAD_WAIT_IMPL( THREAD_T *ref, double secs) return TRUE; } // - void THREAD_KILL( THREAD_T *ref ) - { - // nonexistent on Xbox360, simply disable until a better solution is found - #if !defined( PLATFORM_XBOX) - // in theory no-one should call this as it is very dangerous (memory and mutex leaks, no notification of DLLs, etc.) - if (!TerminateThread( *ref, 0 )) FAIL("TerminateThread", GetLastError()); - #endif // PLATFORM_XBOX - *ref= NULL; - } - - void THREAD_MAKE_ASYNCH_CANCELLABLE() {} // nothing to do for windows threads, we can cancel them anytime we want + void THREAD_KILL( THREAD_T *ref ) + { + // nonexistent on Xbox360, simply disable until a better solution is found + #if !defined( PLATFORM_XBOX) + // in theory no-one should call this as it is very dangerous (memory and mutex leaks, no notification of DLLs, etc.) + if (!TerminateThread( *ref, 0 )) FAIL("TerminateThread", GetLastError()); + #endif // PLATFORM_XBOX + *ref= NULL; + } + + void THREAD_MAKE_ASYNCH_CANCELLABLE() {} // nothing to do for windows threads, we can cancel them anytime we want #if !defined __GNUC__ - //see http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx - #define MS_VC_EXCEPTION 0x406D1388 - #pragma pack(push,8) - typedef struct tagTHREADNAME_INFO - { - DWORD dwType; // Must be 0x1000. - LPCSTR szName; // Pointer to name (in user addr space). - DWORD dwThreadID; // Thread ID (-1=caller thread). - DWORD dwFlags; // Reserved for future use, must be zero. - } THREADNAME_INFO; - #pragma pack(pop) + //see http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx + #define MS_VC_EXCEPTION 0x406D1388 + #pragma pack(push,8) + typedef struct tagTHREADNAME_INFO + { + DWORD dwType; // Must be 0x1000. + LPCSTR szName; // Pointer to name (in user addr space). + DWORD dwThreadID; // Thread ID (-1=caller thread). + DWORD dwFlags; // Reserved for future use, must be zero. + } THREADNAME_INFO; + #pragma pack(pop) #endif // !__GNUC__ - void THREAD_SETNAME( char const* _name) - { + void THREAD_SETNAME( char const* _name) + { #if !defined __GNUC__ - THREADNAME_INFO info; - info.dwType = 0x1000; - info.szName = _name; - info.dwThreadID = GetCurrentThreadId(); - info.dwFlags = 0; - - __try - { - RaiseException( MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info ); - } - __except(EXCEPTION_EXECUTE_HANDLER) - { - } + THREADNAME_INFO info; + info.dwType = 0x1000; + info.szName = _name; + info.dwThreadID = GetCurrentThreadId(); + info.dwFlags = 0; + + __try + { + RaiseException( MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info ); + } + __except(EXCEPTION_EXECUTE_HANDLER) + { + } #endif // !__GNUC__ - } + } #if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available - void SIGNAL_INIT( SIGNAL_T* ref) - { - InitializeCriticalSection( &ref->signalCS); - InitializeCriticalSection( &ref->countCS); - if( 0 == (ref->waitEvent = CreateEvent( 0, TRUE, FALSE, 0))) // manual-reset - FAIL( "CreateEvent", GetLastError()); - if( 0 == (ref->waitDoneEvent = CreateEvent( 0, FALSE, FALSE, 0))) // auto-reset - FAIL( "CreateEvent", GetLastError()); - ref->waitersCount = 0; - } - - void SIGNAL_FREE( SIGNAL_T* ref) - { - CloseHandle( ref->waitDoneEvent); - CloseHandle( ref->waitEvent); - DeleteCriticalSection( &ref->countCS); - DeleteCriticalSection( &ref->signalCS); - } - - bool_t SIGNAL_WAIT( SIGNAL_T* ref, MUTEX_T* mu_ref, time_d abs_secs) - { - DWORD errc; - DWORD ms; - - if( abs_secs < 0.0) - ms = INFINITE; - else if( abs_secs == 0.0) - ms = 0; - else - { - time_d msd = (abs_secs - now_secs()) * 1000.0 + 0.5; - // If the time already passed, still try once (ms==0). A short timeout - // may have turned negative or 0 because of the two time samples done. - ms = msd <= 0.0 ? 0 : (DWORD)msd; - } - - EnterCriticalSection( &ref->signalCS); - EnterCriticalSection( &ref->countCS); - ++ ref->waitersCount; - LeaveCriticalSection( &ref->countCS); - LeaveCriticalSection( &ref->signalCS); - - errc = SignalObjectAndWait( *mu_ref, ref->waitEvent, ms, FALSE); - - EnterCriticalSection( &ref->countCS); - if( 0 == -- ref->waitersCount) - { - // we're the last one leaving... - ResetEvent( ref->waitEvent); - SetEvent( ref->waitDoneEvent); - } - LeaveCriticalSection( &ref->countCS); - MUTEX_LOCK( mu_ref); - - switch( errc) - { - case WAIT_TIMEOUT: - return FALSE; - case WAIT_OBJECT_0: - return TRUE; - } - - FAIL( "SignalObjectAndWait", GetLastError()); - return FALSE; - } - - void SIGNAL_ALL( SIGNAL_T* ref) - { - DWORD errc = WAIT_OBJECT_0; - - EnterCriticalSection( &ref->signalCS); - EnterCriticalSection( &ref->countCS); - - if( ref->waitersCount > 0) - { - ResetEvent( ref->waitDoneEvent); - SetEvent( ref->waitEvent); - LeaveCriticalSection( &ref->countCS); - errc = WaitForSingleObject( ref->waitDoneEvent, INFINITE); - } - else - { - LeaveCriticalSection( &ref->countCS); - } - - LeaveCriticalSection( &ref->signalCS); - - if( WAIT_OBJECT_0 != errc) - FAIL( "WaitForSingleObject", GetLastError()); - } + void SIGNAL_INIT( SIGNAL_T* ref) + { + InitializeCriticalSection( &ref->signalCS); + InitializeCriticalSection( &ref->countCS); + if( 0 == (ref->waitEvent = CreateEvent( 0, TRUE, FALSE, 0))) // manual-reset + FAIL( "CreateEvent", GetLastError()); + if( 0 == (ref->waitDoneEvent = CreateEvent( 0, FALSE, FALSE, 0))) // auto-reset + FAIL( "CreateEvent", GetLastError()); + ref->waitersCount = 0; + } + + void SIGNAL_FREE( SIGNAL_T* ref) + { + CloseHandle( ref->waitDoneEvent); + CloseHandle( ref->waitEvent); + DeleteCriticalSection( &ref->countCS); + DeleteCriticalSection( &ref->signalCS); + } + + bool_t SIGNAL_WAIT( SIGNAL_T* ref, MUTEX_T* mu_ref, time_d abs_secs) + { + DWORD errc; + DWORD ms; + + if( abs_secs < 0.0) + ms = INFINITE; + else if( abs_secs == 0.0) + ms = 0; + else + { + time_d msd = (abs_secs - now_secs()) * 1000.0 + 0.5; + // If the time already passed, still try once (ms==0). A short timeout + // may have turned negative or 0 because of the two time samples done. + ms = msd <= 0.0 ? 0 : (DWORD)msd; + } + + EnterCriticalSection( &ref->signalCS); + EnterCriticalSection( &ref->countCS); + ++ ref->waitersCount; + LeaveCriticalSection( &ref->countCS); + LeaveCriticalSection( &ref->signalCS); + + errc = SignalObjectAndWait( *mu_ref, ref->waitEvent, ms, FALSE); + + EnterCriticalSection( &ref->countCS); + if( 0 == -- ref->waitersCount) + { + // we're the last one leaving... + ResetEvent( ref->waitEvent); + SetEvent( ref->waitDoneEvent); + } + LeaveCriticalSection( &ref->countCS); + MUTEX_LOCK( mu_ref); + + switch( errc) + { + case WAIT_TIMEOUT: + return FALSE; + case WAIT_OBJECT_0: + return TRUE; + } + + FAIL( "SignalObjectAndWait", GetLastError()); + return FALSE; + } + + void SIGNAL_ALL( SIGNAL_T* ref) + { + DWORD errc = WAIT_OBJECT_0; + + EnterCriticalSection( &ref->signalCS); + EnterCriticalSection( &ref->countCS); + + if( ref->waitersCount > 0) + { + ResetEvent( ref->waitDoneEvent); + SetEvent( ref->waitEvent); + LeaveCriticalSection( &ref->countCS); + errc = WaitForSingleObject( ref->waitDoneEvent, INFINITE); + } + else + { + LeaveCriticalSection( &ref->countCS); + } + + LeaveCriticalSection( &ref->signalCS); + + if( WAIT_OBJECT_0 != errc) + FAIL( "WaitForSingleObject", GetLastError()); + } #else // CONDITION_VARIABLE are available, use them - // - void SIGNAL_INIT( SIGNAL_T *ref ) - { - InitializeConditionVariable( ref); - } - - void SIGNAL_FREE( SIGNAL_T *ref ) - { - // nothing to do - (void)ref; - } - - bool_t SIGNAL_WAIT( SIGNAL_T *ref, MUTEX_T *mu_ref, time_d abs_secs) - { - long ms; - - if( abs_secs < 0.0) - ms = INFINITE; - else if( abs_secs == 0.0) - ms = 0; - else - { - ms = (long) ((abs_secs - now_secs())*1000.0 + 0.5); - - // If the time already passed, still try once (ms==0). A short timeout - // may have turned negative or 0 because of the two time samples done. - // - if( ms < 0) - ms = 0; - } - - if( !SleepConditionVariableCS( ref, mu_ref, ms)) - { - if( GetLastError() == ERROR_TIMEOUT) - { - return FALSE; - } - else - { - FAIL( "SleepConditionVariableCS", GetLastError()); - } - } - return TRUE; - } - - void SIGNAL_ONE( SIGNAL_T *ref ) - { - WakeConditionVariable( ref); - } - - void SIGNAL_ALL( SIGNAL_T *ref ) - { - WakeAllConditionVariable( ref); - } + // + void SIGNAL_INIT( SIGNAL_T *ref ) + { + InitializeConditionVariable( ref); + } + + void SIGNAL_FREE( SIGNAL_T *ref ) + { + // nothing to do + (void)ref; + } + + bool_t SIGNAL_WAIT( SIGNAL_T *ref, MUTEX_T *mu_ref, time_d abs_secs) + { + long ms; + + if( abs_secs < 0.0) + ms = INFINITE; + else if( abs_secs == 0.0) + ms = 0; + else + { + ms = (long) ((abs_secs - now_secs())*1000.0 + 0.5); + + // If the time already passed, still try once (ms==0). A short timeout + // may have turned negative or 0 because of the two time samples done. + // + if( ms < 0) + ms = 0; + } + + if( !SleepConditionVariableCS( ref, mu_ref, ms)) + { + if( GetLastError() == ERROR_TIMEOUT) + { + return FALSE; + } + else + { + FAIL( "SleepConditionVariableCS", GetLastError()); + } + } + return TRUE; + } + + void SIGNAL_ONE( SIGNAL_T *ref ) + { + WakeConditionVariable( ref); + } + + void SIGNAL_ALL( SIGNAL_T *ref ) + { + WakeAllConditionVariable( ref); + } #endif // CONDITION_VARIABLE are available @@ -574,20 +574,20 @@ bool_t THREAD_WAIT_IMPL( THREAD_T *ref, double secs) # if (defined(__MINGW32__) || defined(__MINGW64__)) && defined pthread_attr_setschedpolicy # if pthread_attr_setschedpolicy( A, S) == ENOTSUP - // from the mingw-w64 team: - // Well, we support pthread_setschedparam by which you can specify - // threading-policy. Nevertheless, yes we lack this function. In - // general its implementation is pretty much trivial, as on Win32 target - // just SCHED_OTHER can be supported. - #undef pthread_attr_setschedpolicy - static int pthread_attr_setschedpolicy( pthread_attr_t* attr, int policy) - { - if( policy != SCHED_OTHER) - { - return ENOTSUP; - } - return 0; - } + // from the mingw-w64 team: + // Well, we support pthread_setschedparam by which you can specify + // threading-policy. Nevertheless, yes we lack this function. In + // general its implementation is pretty much trivial, as on Win32 target + // just SCHED_OTHER can be supported. + #undef pthread_attr_setschedpolicy + static int pthread_attr_setschedpolicy( pthread_attr_t* attr, int policy) + { + if( policy != SCHED_OTHER) + { + return ENOTSUP; + } + return 0; + } # endif // pthread_attr_setschedpolicy() # endif // defined(__MINGW32__) || defined(__MINGW64__) @@ -646,94 +646,94 @@ bool_t THREAD_WAIT_IMPL( THREAD_T *ref, double secs) // array of 7 thread priority values, hand-tuned by platform so that we offer a uniform [-3,+3] public priority range static int const gs_prio_remap[] = { - // NB: PThreads priority handling is about as twisty as one can get it - // (and then some). DON*T TRUST ANYTHING YOU READ ON THE NET!!! - - //--- - // "Select the scheduling policy for the thread: one of SCHED_OTHER - // (regular, non-real-time scheduling), SCHED_RR (real-time, - // round-robin) or SCHED_FIFO (real-time, first-in first-out)." - // - // "Using the RR policy ensures that all threads having the same - // priority level will be scheduled equally, regardless of their activity." - // - // "For SCHED_FIFO and SCHED_RR, the only required member of the - // sched_param structure is the priority sched_priority. For SCHED_OTHER, - // the affected scheduling parameters are implementation-defined." - // - // "The priority of a thread is specified as a delta which is added to - // the priority of the process." - // - // ".. priority is an integer value, in the range from 1 to 127. - // 1 is the least-favored priority, 127 is the most-favored." - // - // "Priority level 0 cannot be used: it is reserved for the system." - // - // "When you use specify a priority of -99 in a call to - // pthread_setschedparam(), the priority of the target thread is - // lowered to the lowest possible value." - // - // ... - - // ** CONCLUSION ** - // - // PThread priorities are _hugely_ system specific, and we need at - // least OS specific settings. Hopefully, Linuxes and OS X versions - // are uniform enough, among each other... - // + // NB: PThreads priority handling is about as twisty as one can get it + // (and then some). DON*T TRUST ANYTHING YOU READ ON THE NET!!! + + //--- + // "Select the scheduling policy for the thread: one of SCHED_OTHER + // (regular, non-real-time scheduling), SCHED_RR (real-time, + // round-robin) or SCHED_FIFO (real-time, first-in first-out)." + // + // "Using the RR policy ensures that all threads having the same + // priority level will be scheduled equally, regardless of their activity." + // + // "For SCHED_FIFO and SCHED_RR, the only required member of the + // sched_param structure is the priority sched_priority. For SCHED_OTHER, + // the affected scheduling parameters are implementation-defined." + // + // "The priority of a thread is specified as a delta which is added to + // the priority of the process." + // + // ".. priority is an integer value, in the range from 1 to 127. + // 1 is the least-favored priority, 127 is the most-favored." + // + // "Priority level 0 cannot be used: it is reserved for the system." + // + // "When you use specify a priority of -99 in a call to + // pthread_setschedparam(), the priority of the target thread is + // lowered to the lowest possible value." + // + // ... + + // ** CONCLUSION ** + // + // PThread priorities are _hugely_ system specific, and we need at + // least OS specific settings. Hopefully, Linuxes and OS X versions + // are uniform enough, among each other... + // # if defined PLATFORM_OSX - // AK 10-Apr-07 (OS X PowerPC 10.4.9): - // - // With SCHED_RR, 26 seems to be the "normal" priority, where setting - // it does not seem to affect the order of threads processed. - // - // With SCHED_OTHER, the range 25..32 is normal (maybe the same 26, - // but the difference is not so clear with OTHER). - // - // 'sched_get_priority_min()' and '..max()' give 15, 47 as the - // priority limits. This could imply, user mode applications won't - // be able to use values outside of that range. - // + // AK 10-Apr-07 (OS X PowerPC 10.4.9): + // + // With SCHED_RR, 26 seems to be the "normal" priority, where setting + // it does not seem to affect the order of threads processed. + // + // With SCHED_OTHER, the range 25..32 is normal (maybe the same 26, + // but the difference is not so clear with OTHER). + // + // 'sched_get_priority_min()' and '..max()' give 15, 47 as the + // priority limits. This could imply, user mode applications won't + // be able to use values outside of that range. + // # define _PRIO_MODE SCHED_OTHER - // OS X 10.4.9 (PowerPC) gives ENOTSUP for process scope - //#define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS + // OS X 10.4.9 (PowerPC) gives ENOTSUP for process scope + //#define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS # define _PRIO_HI 32 // seems to work (_carefully_ picked!) # define _PRIO_0 26 // detected # define _PRIO_LO 1 // seems to work (tested) # elif defined PLATFORM_LINUX - // (based on Ubuntu Linux 2.6.15 kernel) - // - // SCHED_OTHER is the default policy, but does not allow for priorities. - // SCHED_RR allows priorities, all of which (1..99) are higher than - // a thread with SCHED_OTHER policy. - // - // - // - // - // - // Manuals suggest checking #ifdef _POSIX_THREAD_PRIORITY_SCHEDULING, - // but even Ubuntu does not seem to define it. - // + // (based on Ubuntu Linux 2.6.15 kernel) + // + // SCHED_OTHER is the default policy, but does not allow for priorities. + // SCHED_RR allows priorities, all of which (1..99) are higher than + // a thread with SCHED_OTHER policy. + // + // + // + // + // + // Manuals suggest checking #ifdef _POSIX_THREAD_PRIORITY_SCHEDULING, + // but even Ubuntu does not seem to define it. + // # define _PRIO_MODE SCHED_RR - // NTLP 2.5: only system scope allowed (being the basic reason why - // root privileges are required..) - //#define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS + // NTLP 2.5: only system scope allowed (being the basic reason why + // root privileges are required..) + //#define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS # define _PRIO_HI 99 # define _PRIO_0 50 # define _PRIO_LO 1 # elif defined(PLATFORM_BSD) - // - // - // - // "When control over the thread scheduling is desired, then FreeBSD - // with the libpthread implementation is by far the best choice .." - // + // + // + // + // "When control over the thread scheduling is desired, then FreeBSD + // with the libpthread implementation is by far the best choice .." + // # define _PRIO_MODE SCHED_OTHER # define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS # define _PRIO_HI 31 @@ -741,16 +741,16 @@ static int const gs_prio_remap[] = # define _PRIO_LO 1 # elif defined(PLATFORM_CYGWIN) - // - // TBD: Find right values for Cygwin - // + // + // TBD: Find right values for Cygwin + // # elif defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) - // any other value not supported by win32-pthread as of version 2.9.1 + // any other value not supported by win32-pthread as of version 2.9.1 # define _PRIO_MODE SCHED_OTHER - // PTHREAD_SCOPE_PROCESS not supported by win32-pthread as of version 2.9.1 - //#define _PRIO_SCOPE PTHREAD_SCOPE_SYSTEM // but do we need this at all to start with? - THREAD_PRIORITY_IDLE, THREAD_PRIORITY_LOWEST, THREAD_PRIORITY_BELOW_NORMAL, THREAD_PRIORITY_NORMAL, THREAD_PRIORITY_ABOVE_NORMAL, THREAD_PRIORITY_HIGHEST, THREAD_PRIORITY_TIME_CRITICAL + // PTHREAD_SCOPE_PROCESS not supported by win32-pthread as of version 2.9.1 + //#define _PRIO_SCOPE PTHREAD_SCOPE_SYSTEM // but do we need this at all to start with? + THREAD_PRIORITY_IDLE, THREAD_PRIORITY_LOWEST, THREAD_PRIORITY_BELOW_NORMAL, THREAD_PRIORITY_NORMAL, THREAD_PRIORITY_ABOVE_NORMAL, THREAD_PRIORITY_HIGHEST, THREAD_PRIORITY_TIME_CRITICAL # else # error "Unknown OS: not implemented!" @@ -760,163 +760,163 @@ static int const gs_prio_remap[] = # define _PRIO_AN (_PRIO_0 + ((_PRIO_HI-_PRIO_0)/2)) # define _PRIO_BN (_PRIO_LO + ((_PRIO_0-_PRIO_LO)/2)) - _PRIO_LO, _PRIO_LO, _PRIO_BN, _PRIO_0, _PRIO_AN, _PRIO_HI, _PRIO_HI + _PRIO_LO, _PRIO_LO, _PRIO_BN, _PRIO_0, _PRIO_AN, _PRIO_HI, _PRIO_HI #endif // _PRIO_0 }; void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (*func)( void*), void* data, int prio /* -3..+3 */) { - pthread_attr_t a; - bool_t const normal = + pthread_attr_t a; + bool_t const normal = #if defined(PLATFORM_LINUX) && defined(LINUX_SCHED_RR) - !sudo; // with sudo, even normal thread must use SCHED_RR + !sudo; // with sudo, even normal thread must use SCHED_RR #else - (prio == 0); + (prio == 0); #endif - PT_CALL( pthread_attr_init( &a)); + PT_CALL( pthread_attr_init( &a)); #ifndef PTHREAD_TIMEDJOIN - // We create a NON-JOINABLE thread. This is mainly due to the lack of - // 'pthread_timedjoin()', but does offer other benefits (s.a. earlier - // freeing of the thread's resources). - // - PT_CALL( pthread_attr_setdetachstate( &a, PTHREAD_CREATE_DETACHED)); + // We create a NON-JOINABLE thread. This is mainly due to the lack of + // 'pthread_timedjoin()', but does offer other benefits (s.a. earlier + // freeing of the thread's resources). + // + PT_CALL( pthread_attr_setdetachstate( &a, PTHREAD_CREATE_DETACHED)); #endif // PTHREAD_TIMEDJOIN - // Use this to find a system's default stack size (DEBUG) + // Use this to find a system's default stack size (DEBUG) #if 0 - { - size_t n; - pthread_attr_getstacksize( &a, &n); - fprintf( stderr, "Getstack: %u\n", (unsigned int)n); - } - // 524288 on OS X - // 2097152 on Linux x86 (Ubuntu 7.04) - // 1048576 on FreeBSD 6.2 SMP i386 + { + size_t n; + pthread_attr_getstacksize( &a, &n); + fprintf( stderr, "Getstack: %u\n", (unsigned int)n); + } + // 524288 on OS X + // 2097152 on Linux x86 (Ubuntu 7.04) + // 1048576 on FreeBSD 6.2 SMP i386 #endif // 0 #if defined _THREAD_STACK_SIZE && _THREAD_STACK_SIZE > 0 - PT_CALL( pthread_attr_setstacksize( &a, _THREAD_STACK_SIZE)); + PT_CALL( pthread_attr_setstacksize( &a, _THREAD_STACK_SIZE)); #endif - if( !normal) - { - struct sched_param sp; - // "The specified scheduling parameters are only used if the scheduling - // parameter inheritance attribute is PTHREAD_EXPLICIT_SCHED." - // + if( !normal) + { + struct sched_param sp; + // "The specified scheduling parameters are only used if the scheduling + // parameter inheritance attribute is PTHREAD_EXPLICIT_SCHED." + // #if !defined __ANDROID__ || ( defined __ANDROID__ && __ANDROID_API__ >= 28 ) - PT_CALL( pthread_attr_setinheritsched( &a, PTHREAD_EXPLICIT_SCHED)); + PT_CALL( pthread_attr_setinheritsched( &a, PTHREAD_EXPLICIT_SCHED)); #endif #ifdef _PRIO_SCOPE - PT_CALL( pthread_attr_setscope( &a, _PRIO_SCOPE)); + PT_CALL( pthread_attr_setscope( &a, _PRIO_SCOPE)); #endif // _PRIO_SCOPE - PT_CALL( pthread_attr_setschedpolicy( &a, _PRIO_MODE)); + PT_CALL( pthread_attr_setschedpolicy( &a, _PRIO_MODE)); - // prio range [-3,+3] was checked by the caller - sp.sched_priority = gs_prio_remap[ prio + 3]; - PT_CALL( pthread_attr_setschedparam( &a, &sp)); - } + // prio range [-3,+3] was checked by the caller + sp.sched_priority = gs_prio_remap[ prio + 3]; + PT_CALL( pthread_attr_setschedparam( &a, &sp)); + } - //--- - // Seems on OS X, _POSIX_THREAD_THREADS_MAX is some kind of system - // thread limit (not userland thread). Actual limit for us is way higher. - // PTHREAD_THREADS_MAX is not defined (even though man page refers to it!) - // + //--- + // Seems on OS X, _POSIX_THREAD_THREADS_MAX is some kind of system + // thread limit (not userland thread). Actual limit for us is way higher. + // PTHREAD_THREADS_MAX is not defined (even though man page refers to it!) + // # ifndef THREAD_CREATE_RETRIES_MAX - // Don't bother with retries; a failure is a failure - // - { - int rc = pthread_create( ref, &a, func, data); - if( rc) _PT_FAIL( rc, "pthread_create()", __FILE__, __LINE__ - 1); - } + // Don't bother with retries; a failure is a failure + // + { + int rc = pthread_create( ref, &a, func, data); + if( rc) _PT_FAIL( rc, "pthread_create()", __FILE__, __LINE__ - 1); + } # else # error "This code deprecated" - /* - // Wait slightly if thread creation has exchausted the system - // - { uint_t retries; - for( retries=0; retries>= 1; - } + while( aff != 0) + { + if( aff & 1) + { + CPU_SET( bit, &cpuset); + } + ++ bit; + aff >>= 1; + } #ifdef __ANDROID__ - PT_CALL( sched_setaffinity( pthread_self(), sizeof(cpu_set_t), &cpuset)); + PT_CALL( sched_setaffinity( pthread_self(), sizeof(cpu_set_t), &cpuset)); #elif defined(__NetBSD__) - PT_CALL( pthread_setaffinity_np( pthread_self(), cpuset_size(cpuset), cpuset)); - cpuset_destroy( cpuset); + PT_CALL( pthread_setaffinity_np( pthread_self(), cpuset_size(cpuset), cpuset)); + cpuset_destroy( cpuset); #else - PT_CALL( pthread_setaffinity_np( pthread_self(), sizeof(cpu_set_t), &cpuset)); + PT_CALL( pthread_setaffinity_np( pthread_self(), sizeof(cpu_set_t), &cpuset)); #endif } @@ -986,47 +986,47 @@ bool_t THREAD_WAIT( THREAD_T *ref, double secs , SIGNAL_T *signal_ref, MUTEX_T * #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR return done; } - // + // void THREAD_KILL( THREAD_T *ref ) { #ifdef __ANDROID__ - __android_log_print(ANDROID_LOG_WARN, LOG_TAG, "Cannot kill thread!"); + __android_log_print(ANDROID_LOG_WARN, LOG_TAG, "Cannot kill thread!"); #else pthread_cancel( *ref ); #endif } - void THREAD_MAKE_ASYNCH_CANCELLABLE() - { + void THREAD_MAKE_ASYNCH_CANCELLABLE() + { #ifdef __ANDROID__ - __android_log_print(ANDROID_LOG_WARN, LOG_TAG, "Cannot make thread async cancellable!"); + __android_log_print(ANDROID_LOG_WARN, LOG_TAG, "Cannot make thread async cancellable!"); #else - // that's the default, but just in case... - pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); - // we want cancellation to take effect immediately if possible, instead of waiting for a cancellation point (which is the default) - pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + // that's the default, but just in case... + pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); + // we want cancellation to take effect immediately if possible, instead of waiting for a cancellation point (which is the default) + pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, NULL); #endif - } + } - void THREAD_SETNAME( char const* _name) - { - // exact API to set the thread name is platform-dependant - // if you need to fix the build, or if you know how to fill a hole, tell me (bnt.germain@gmail.com) so that I can submit the fix in github. + void THREAD_SETNAME( char const* _name) + { + // exact API to set the thread name is platform-dependant + // if you need to fix the build, or if you know how to fill a hole, tell me (bnt.germain@gmail.com) so that I can submit the fix in github. #if defined PLATFORM_BSD && !defined __NetBSD__ - pthread_set_name_np( pthread_self(), _name); + pthread_set_name_np( pthread_self(), _name); #elif defined PLATFORM_BSD && defined __NetBSD__ - pthread_setname_np( pthread_self(), "%s", (void *)_name); + pthread_setname_np( pthread_self(), "%s", (void *)_name); #elif defined PLATFORM_LINUX - #if LINUX_USE_PTHREAD_SETNAME_NP - pthread_setname_np( pthread_self(), _name); - #else // LINUX_USE_PTHREAD_SETNAME_NP - prctl(PR_SET_NAME, _name, 0, 0, 0); - #endif // LINUX_USE_PTHREAD_SETNAME_NP + #if LINUX_USE_PTHREAD_SETNAME_NP + pthread_setname_np( pthread_self(), _name); + #else // LINUX_USE_PTHREAD_SETNAME_NP + prctl(PR_SET_NAME, _name, 0, 0, 0); + #endif // LINUX_USE_PTHREAD_SETNAME_NP #elif defined PLATFORM_QNX || defined PLATFORM_CYGWIN - pthread_setname_np( pthread_self(), _name); + pthread_setname_np( pthread_self(), _name); #elif defined PLATFORM_OSX - pthread_setname_np(_name); + pthread_setname_np(_name); #elif defined PLATFORM_WIN32 || defined PLATFORM_POCKETPC - PT_CALL( pthread_setname_np( pthread_self(), _name)); + PT_CALL( pthread_setname_np( pthread_self(), _name)); #endif - } + } #endif // THREADAPI == THREADAPI_PTHREAD diff --git a/src/threading.h b/src/threading.h index 778b6a0..1224e08 100644 --- a/src/threading.h +++ b/src/threading.h @@ -66,41 +66,41 @@ enum e_status { PENDING, RUNNING, WAITING, DONE, ERROR_ST, CANCELLED }; // needed for use with the SIGNAL system. // - #if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available, use a signal + #if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available, use a signal - typedef struct - { - CRITICAL_SECTION signalCS; - CRITICAL_SECTION countCS; - HANDLE waitEvent; - HANDLE waitDoneEvent; - LONG waitersCount; - } SIGNAL_T; + typedef struct + { + CRITICAL_SECTION signalCS; + CRITICAL_SECTION countCS; + HANDLE waitEvent; + HANDLE waitDoneEvent; + LONG waitersCount; + } SIGNAL_T; - #define MUTEX_T HANDLE - void MUTEX_INIT( MUTEX_T* ref); - void MUTEX_FREE( MUTEX_T* ref); - void MUTEX_LOCK( MUTEX_T* ref); - void MUTEX_UNLOCK( MUTEX_T* ref); + #define MUTEX_T HANDLE + void MUTEX_INIT( MUTEX_T* ref); + void MUTEX_FREE( MUTEX_T* ref); + void MUTEX_LOCK( MUTEX_T* ref); + void MUTEX_UNLOCK( MUTEX_T* ref); - #else // CONDITION_VARIABLE are available, use them + #else // CONDITION_VARIABLE are available, use them - #define SIGNAL_T CONDITION_VARIABLE - #define MUTEX_T CRITICAL_SECTION - #define MUTEX_INIT( ref) InitializeCriticalSection( ref) - #define MUTEX_FREE( ref) DeleteCriticalSection( ref) - #define MUTEX_LOCK( ref) EnterCriticalSection( ref) - #define MUTEX_UNLOCK( ref) LeaveCriticalSection( ref) + #define SIGNAL_T CONDITION_VARIABLE + #define MUTEX_T CRITICAL_SECTION + #define MUTEX_INIT( ref) InitializeCriticalSection( ref) + #define MUTEX_FREE( ref) DeleteCriticalSection( ref) + #define MUTEX_LOCK( ref) EnterCriticalSection( ref) + #define MUTEX_UNLOCK( ref) LeaveCriticalSection( ref) - #endif // CONDITION_VARIABLE are available + #endif // CONDITION_VARIABLE are available #define MUTEX_RECURSIVE_INIT(ref) MUTEX_INIT(ref) /* always recursive in Win32 */ typedef unsigned int THREAD_RETURN_T; #define YIELD() Sleep(0) - #define THREAD_CALLCONV __stdcall + #define THREAD_CALLCONV __stdcall #else // THREADAPI == THREADAPI_PTHREAD // PThread (Linux, OS X, ...) @@ -143,13 +143,10 @@ enum e_status { PENDING, RUNNING, WAITING, DONE, ERROR_ST, CANCELLED }; // #if defined( PLATFORM_OSX) #define YIELD() pthread_yield_np() -#elif defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) || defined(__ANDROID__) || defined(__NetBSD__) // no PTHREAD for PLATFORM_XBOX - // for some reason win32-pthread doesn't have pthread_yield(), but sched_yield() - #define YIELD() sched_yield() #else #define YIELD() sched_yield() #endif - #define THREAD_CALLCONV + #define THREAD_CALLCONV #endif //THREADAPI == THREADAPI_PTHREAD void SIGNAL_INIT( SIGNAL_T *ref ); @@ -174,9 +171,9 @@ bool_t SIGNAL_WAIT( SIGNAL_T *ref, MUTEX_T *mu, time_d timeout ); #if THREADAPI == THREADAPI_WINDOWS - typedef HANDLE THREAD_T; + typedef HANDLE THREAD_T; # define THREAD_ISNULL( _h) (_h == 0) - void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (__stdcall *func)( void*), void* data, int prio /* -3..+3 */); + void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (__stdcall *func)( void*), void* data, int prio /* -3..+3 */); # define THREAD_PRIO_MIN (-3) # define THREAD_PRIO_MAX (+3) @@ -186,9 +183,9 @@ bool_t SIGNAL_WAIT( SIGNAL_T *ref, MUTEX_T *mu, time_d timeout ); #else // THREADAPI == THREADAPI_PTHREAD - /* Platforms that have a timed 'pthread_join()' can get away with a simpler - * implementation. Others will use a condition variable. - */ + /* Platforms that have a timed 'pthread_join()' can get away with a simpler + * implementation. Others will use a condition variable. + */ # if defined __WINPTHREADS_VERSION //# define USE_PTHREAD_TIMEDJOIN # endif // __WINPTHREADS_VERSION @@ -202,13 +199,13 @@ bool_t SIGNAL_WAIT( SIGNAL_T *ref, MUTEX_T *mu, time_d timeout ); # endif # endif - typedef pthread_t THREAD_T; + typedef pthread_t THREAD_T; # define THREAD_ISNULL( _h) 0 // pthread_t may be a structure: never 'null' by itself - void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (*func)( void*), void* data, int prio /* -3..+3 */); + void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (*func)( void*), void* data, int prio /* -3..+3 */); # if defined(PLATFORM_LINUX) - extern volatile bool_t sudo; + extern volatile bool_t sudo; # ifdef LINUX_SCHED_RR # define THREAD_PRIO_MIN (sudo ? -3 : 0) # else diff --git a/src/tools.c b/src/tools.c index 1436e8d..acb78e6 100644 --- a/src/tools.c +++ b/src/tools.c @@ -106,50 +106,50 @@ void push_registry_subtable( lua_State* L, UniqueKey key_) #ifdef _DEBUG void luaG_dump( lua_State* L) { - int top = lua_gettop( L); - int i; - - fprintf( stderr, "\n\tDEBUG STACK:\n"); - - if( top == 0) - fprintf( stderr, "\t(none)\n"); - - for( i = 1; i <= top; ++ i) - { - int type = lua_type( L, i); - - fprintf( stderr, "\t[%d]= (%s) ", i, lua_typename( L, type)); - - // Print item contents here... - // - // Note: this requires 'tostring()' to be defined. If it is NOT, - // enable it for more debugging. - // - STACK_CHECK( L, 0); - STACK_GROW( L, 2); - - lua_getglobal( L, "tostring"); - // - // [-1]: tostring function, or nil - - if( !lua_isfunction( L, -1)) - { - fprintf( stderr, "('tostring' not available)"); - } - else - { - lua_pushvalue( L, i); - lua_call( L, 1 /*args*/, 1 /*retvals*/); - - // Don't trust the string contents - // - fprintf( stderr, "%s", lua_tostring( L, -1)); - } - lua_pop( L, 1); - STACK_END( L, 0); - fprintf( stderr, "\n"); - } - fprintf( stderr, "\n"); + int top = lua_gettop( L); + int i; + + fprintf( stderr, "\n\tDEBUG STACK:\n"); + + if( top == 0) + fprintf( stderr, "\t(none)\n"); + + for( i = 1; i <= top; ++ i) + { + int type = lua_type( L, i); + + fprintf( stderr, "\t[%d]= (%s) ", i, lua_typename( L, type)); + + // Print item contents here... + // + // Note: this requires 'tostring()' to be defined. If it is NOT, + // enable it for more debugging. + // + STACK_CHECK( L, 0); + STACK_GROW( L, 2); + + lua_getglobal( L, "tostring"); + // + // [-1]: tostring function, or nil + + if( !lua_isfunction( L, -1)) + { + fprintf( stderr, "('tostring' not available)"); + } + else + { + lua_pushvalue( L, i); + lua_call( L, 1 /*args*/, 1 /*retvals*/); + + // Don't trust the string contents + // + fprintf( stderr, "%s", lua_tostring( L, -1)); + } + lua_pop( L, 1); + STACK_END( L, 0); + fprintf( stderr, "\n"); + } + fprintf( stderr, "\n"); } #endif // _DEBUG @@ -157,79 +157,79 @@ void luaG_dump( lua_State* L) static void* protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize) { - void* p; - ProtectedAllocator* s = (ProtectedAllocator*) ud; - MUTEX_LOCK( &s->lock); - p = s->definition.allocF( s->definition.allocUD, ptr, osize, nsize); - MUTEX_UNLOCK( &s->lock); - return p; + void* p; + ProtectedAllocator* s = (ProtectedAllocator*) ud; + MUTEX_LOCK( &s->lock); + p = s->definition.allocF( s->definition.allocUD, ptr, osize, nsize); + MUTEX_UNLOCK( &s->lock); + return p; } static int luaG_provide_protected_allocator( lua_State* L) { - Universe* U = universe_get( L); - AllocatorDefinition* def = lua_newuserdatauv( L, sizeof(AllocatorDefinition), 0); - def->allocF = protected_lua_Alloc; - def->allocUD = &U->protected_allocator; - return 1; + Universe* U = universe_get( L); + AllocatorDefinition* def = lua_newuserdatauv( L, sizeof(AllocatorDefinition), 0); + def->allocF = protected_lua_Alloc; + def->allocUD = &U->protected_allocator; + return 1; } // Do I need to disable this when compiling for LuaJIT to prevent issues? void initialize_allocator_function( Universe* U, lua_State* L) { - STACK_CHECK( L, 0); - lua_getfield( L, -1, "allocator"); // settings allocator|nil|"protected" - if( !lua_isnil( L, -1)) - { - // store C function pointer in an internal variable - U->provide_allocator = lua_tocfunction( L, -1); // settings allocator - if( U->provide_allocator != NULL) - { - // make sure the function doesn't have upvalues - char const* upname = lua_getupvalue( L, -1, 1); // settings allocator upval? - if( upname != NULL) // should be "" for C functions with upvalues if any - { - (void) luaL_error( L, "config.allocator() shouldn't have upvalues"); - } - // remove this C function from the config table so that it doesn't cause problems - // when we transfer the config table in newly created Lua states - lua_pushnil( L); // settings allocator nil - lua_setfield( L, -3, "allocator"); // settings allocator - } - else if( lua_type( L, -1) == LUA_TSTRING) - { - // initialize all we need for the protected allocator - MUTEX_INIT( &U->protected_allocator.lock); // the mutex - // and the original allocator to call from inside protection by the mutex - U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); - // before a state is created, this function will be called to obtain the allocator - U->provide_allocator = luaG_provide_protected_allocator; - - lua_setallocf( L, protected_lua_Alloc, &U->protected_allocator); - } - } - lua_pop( L, 1); // settings - STACK_END( L, 0); + STACK_CHECK( L, 0); + lua_getfield( L, -1, "allocator"); // settings allocator|nil|"protected" + if( !lua_isnil( L, -1)) + { + // store C function pointer in an internal variable + U->provide_allocator = lua_tocfunction( L, -1); // settings allocator + if( U->provide_allocator != NULL) + { + // make sure the function doesn't have upvalues + char const* upname = lua_getupvalue( L, -1, 1); // settings allocator upval? + if( upname != NULL) // should be "" for C functions with upvalues if any + { + (void) luaL_error( L, "config.allocator() shouldn't have upvalues"); + } + // remove this C function from the config table so that it doesn't cause problems + // when we transfer the config table in newly created Lua states + lua_pushnil( L); // settings allocator nil + lua_setfield( L, -3, "allocator"); // settings allocator + } + else if( lua_type( L, -1) == LUA_TSTRING) + { + // initialize all we need for the protected allocator + MUTEX_INIT( &U->protected_allocator.lock); // the mutex + // and the original allocator to call from inside protection by the mutex + U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); + // before a state is created, this function will be called to obtain the allocator + U->provide_allocator = luaG_provide_protected_allocator; + + lua_setallocf( L, protected_lua_Alloc, &U->protected_allocator); + } + } + lua_pop( L, 1); // settings + STACK_END( L, 0); } void cleanup_allocator_function( Universe* U, lua_State* L) { - // remove the protected allocator, if any - if( U->protected_allocator.definition.allocF != NULL) - { - // install the non-protected allocator - lua_setallocf( L, U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD); - // release the mutex - MUTEX_FREE( &U->protected_allocator.lock); - } + // remove the protected allocator, if any + if( U->protected_allocator.definition.allocF != NULL) + { + // install the non-protected allocator + lua_setallocf( L, U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD); + // release the mutex + MUTEX_FREE( &U->protected_allocator.lock); + } } // ################################################################################################ static int dummy_writer( lua_State* L, void const* p, size_t sz, void* ud) { - (void)L; (void)p; (void)sz; (void) ud; // unused - return 666; + (void)L; (void)p; (void)sz; (void) ud; // unused + return 666; } @@ -250,42 +250,42 @@ static int dummy_writer( lua_State* L, void const* p, size_t sz, void* ud) typedef enum { - FST_Bytecode, - FST_Native, - FST_FastJIT + FST_Bytecode, + FST_Native, + FST_FastJIT } FuncSubType; FuncSubType luaG_getfuncsubtype( lua_State *L, int _i) { - if( lua_tocfunction( L, _i)) - { - return FST_Native; - } - { - int mustpush = 0, dumpres; - if( lua_absindex( L, _i) != lua_gettop( L)) - { - lua_pushvalue( L, _i); - mustpush = 1; - } - // the provided writer fails with code 666 - // therefore, anytime we get 666, this means that lua_dump() attempted a dump - // all other cases mean this is either a C or LuaJIT-fast function - dumpres = lua504_dump( L, dummy_writer, NULL, 0); - lua_pop( L, mustpush); - if( dumpres == 666) - { - return FST_Bytecode; - } - } - return FST_FastJIT; + if( lua_tocfunction( L, _i)) + { + return FST_Native; + } + { + int mustpush = 0, dumpres; + if( lua_absindex( L, _i) != lua_gettop( L)) + { + lua_pushvalue( L, _i); + mustpush = 1; + } + // the provided writer fails with code 666 + // therefore, anytime we get 666, this means that lua_dump() attempted a dump + // all other cases mean this is either a C or LuaJIT-fast function + dumpres = lua504_dump( L, dummy_writer, NULL, 0); + lua_pop( L, mustpush); + if( dumpres == 666) + { + return FST_Bytecode; + } + } + return FST_FastJIT; } static lua_CFunction luaG_tocfunction( lua_State *L, int _i, FuncSubType *_out) { - lua_CFunction p = lua_tocfunction( L, _i); - *_out = luaG_getfuncsubtype( L, _i); - return p; + lua_CFunction p = lua_tocfunction( L, _i); + *_out = luaG_getfuncsubtype( L, _i); + return p; } // crc64/we of string "LOOKUPCACHE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ @@ -294,26 +294,26 @@ static DECLARE_CONST_UNIQUE_KEY( LOOKUPCACHE_REGKEY, 0x837a68dfc6fcb716); // inspired from tconcat() in ltablib.c static char const* luaG_pushFQN( lua_State* L, int t, int last, size_t* length) { - int i = 1; - luaL_Buffer b; - STACK_CHECK( L, 0); - // Lua 5.4 pushes &b as light userdata on the stack. be aware of it... - luaL_buffinit( L, &b); // ... {} ... &b? - for( ; i < last; ++ i) - { - lua_rawgeti( L, t, i); - luaL_addvalue( &b); - luaL_addlstring(&b, "/", 1); - } - if( i == last) // add last value (if interval was not empty) - { - lua_rawgeti( L, t, i); - luaL_addvalue( &b); - } - // &b is popped at that point (-> replaced by the result) - luaL_pushresult( &b); // ... {} ... "" - STACK_END( L, 1); - return lua_tolstring( L, -1, length); + int i = 1; + luaL_Buffer b; + STACK_CHECK( L, 0); + // Lua 5.4 pushes &b as light userdata on the stack. be aware of it... + luaL_buffinit( L, &b); // ... {} ... &b? + for( ; i < last; ++ i) + { + lua_rawgeti( L, t, i); + luaL_addvalue( &b); + luaL_addlstring(&b, "/", 1); + } + if( i == last) // add last value (if interval was not empty) + { + lua_rawgeti( L, t, i); + luaL_addvalue( &b); + } + // &b is popped at that point (-> replaced by the result) + luaL_pushresult( &b); // ... {} ... "" + STACK_END( L, 1); + return lua_tolstring( L, -1, length); } /* @@ -326,199 +326,199 @@ static char const* luaG_pushFQN( lua_State* L, int t, int last, size_t* length) */ static void update_lookup_entry( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, int _ctx_base, int _depth) { - // slot 1 in the stack contains the table that receives everything we found - int const dest = _ctx_base; - // slot 2 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot _i - int const fqn = _ctx_base + 1; - - size_t prevNameLength, newNameLength; - char const* prevName; - DEBUGSPEW_CODE( char const *newName); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "update_lookup_entry()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - - STACK_CHECK( L, 0); - // first, raise an error if the function is already known - lua_pushvalue( L, -1); // ... {bfc} k o o - lua_rawget( L, dest); // ... {bfc} k o name? - prevName = lua_tolstring( L, -1, &prevNameLength); // NULL if we got nil (first encounter of this object) - // push name in fqn stack (note that concatenation will crash if name is a not string or a number) - lua_pushvalue( L, -3); // ... {bfc} k o name? k - ASSERT_L( lua_type( L, -1) == LUA_TNUMBER || lua_type( L, -1) == LUA_TSTRING); - ++ _depth; - lua_rawseti( L, fqn, _depth); // ... {bfc} k o name? - // generate name - DEBUGSPEW_CODE( newName =) luaG_pushFQN( L, fqn, _depth, &newNameLength); // ... {bfc} k o name? "f.q.n" - // Lua 5.2 introduced a hash randomizer seed which causes table iteration to yield a different key order - // on different VMs even when the tables are populated the exact same way. - // When Lua is built with compatibility options (such as LUA_COMPAT_ALL), - // this causes several base libraries to register functions under multiple names. - // This, with the randomizer, can cause the first generated name of an object to be different on different VMs, - // which breaks function transfer. - // Also, nothing prevents any external module from exposing a given object under several names, so... - // Therefore, when we encounter an object for which a name was previously registered, we need to select the names - // based on some sorting order so that we end up with the same name in all databases whatever order the table walk yielded - if( prevName != NULL && (prevNameLength < newNameLength || lua_lessthan( L, -2, -1))) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s '%s' remained named '%s'\n" INDENT_END, lua_typename( L, lua_type( L, -3)), newName, prevName)); - // the previous name is 'smaller' than the one we just generated: keep it! - lua_pop( L, 3); // ... {bfc} k - } - else - { - // the name we generated is either the first one, or a better fit for our purposes - if( prevName) - { - // clear the previous name for the database to avoid clutter - lua_insert( L, -2); // ... {bfc} k o "f.q.n" prevName - // t[prevName] = nil - lua_pushnil( L); // ... {bfc} k o "f.q.n" prevName nil - lua_rawset( L, dest); // ... {bfc} k o "f.q.n" - } - else - { - lua_remove( L, -2); // ... {bfc} k o "f.q.n" - } - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s '%s'\n" INDENT_END, lua_typename( L, lua_type( L, -2)), newName)); - // prepare the stack for database feed - lua_pushvalue( L, -1); // ... {bfc} k o "f.q.n" "f.q.n" - lua_pushvalue( L, -3); // ... {bfc} k o "f.q.n" "f.q.n" o - ASSERT_L( lua_rawequal( L, -1, -4)); - ASSERT_L( lua_rawequal( L, -2, -3)); - // t["f.q.n"] = o - lua_rawset( L, dest); // ... {bfc} k o "f.q.n" - // t[o] = "f.q.n" - lua_rawset( L, dest); // ... {bfc} k - // remove table name from fqn stack - lua_pushnil( L); // ... {bfc} k nil - lua_rawseti( L, fqn, _depth); // ... {bfc} k - } - -- _depth; - STACK_END( L, -1); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + // slot 1 in the stack contains the table that receives everything we found + int const dest = _ctx_base; + // slot 2 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot _i + int const fqn = _ctx_base + 1; + + size_t prevNameLength, newNameLength; + char const* prevName; + DEBUGSPEW_CODE( char const *newName); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "update_lookup_entry()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + + STACK_CHECK( L, 0); + // first, raise an error if the function is already known + lua_pushvalue( L, -1); // ... {bfc} k o o + lua_rawget( L, dest); // ... {bfc} k o name? + prevName = lua_tolstring( L, -1, &prevNameLength); // NULL if we got nil (first encounter of this object) + // push name in fqn stack (note that concatenation will crash if name is a not string or a number) + lua_pushvalue( L, -3); // ... {bfc} k o name? k + ASSERT_L( lua_type( L, -1) == LUA_TNUMBER || lua_type( L, -1) == LUA_TSTRING); + ++ _depth; + lua_rawseti( L, fqn, _depth); // ... {bfc} k o name? + // generate name + DEBUGSPEW_CODE( newName =) luaG_pushFQN( L, fqn, _depth, &newNameLength); // ... {bfc} k o name? "f.q.n" + // Lua 5.2 introduced a hash randomizer seed which causes table iteration to yield a different key order + // on different VMs even when the tables are populated the exact same way. + // When Lua is built with compatibility options (such as LUA_COMPAT_ALL), + // this causes several base libraries to register functions under multiple names. + // This, with the randomizer, can cause the first generated name of an object to be different on different VMs, + // which breaks function transfer. + // Also, nothing prevents any external module from exposing a given object under several names, so... + // Therefore, when we encounter an object for which a name was previously registered, we need to select the names + // based on some sorting order so that we end up with the same name in all databases whatever order the table walk yielded + if( prevName != NULL && (prevNameLength < newNameLength || lua_lessthan( L, -2, -1))) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s '%s' remained named '%s'\n" INDENT_END, lua_typename( L, lua_type( L, -3)), newName, prevName)); + // the previous name is 'smaller' than the one we just generated: keep it! + lua_pop( L, 3); // ... {bfc} k + } + else + { + // the name we generated is either the first one, or a better fit for our purposes + if( prevName) + { + // clear the previous name for the database to avoid clutter + lua_insert( L, -2); // ... {bfc} k o "f.q.n" prevName + // t[prevName] = nil + lua_pushnil( L); // ... {bfc} k o "f.q.n" prevName nil + lua_rawset( L, dest); // ... {bfc} k o "f.q.n" + } + else + { + lua_remove( L, -2); // ... {bfc} k o "f.q.n" + } + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s '%s'\n" INDENT_END, lua_typename( L, lua_type( L, -2)), newName)); + // prepare the stack for database feed + lua_pushvalue( L, -1); // ... {bfc} k o "f.q.n" "f.q.n" + lua_pushvalue( L, -3); // ... {bfc} k o "f.q.n" "f.q.n" o + ASSERT_L( lua_rawequal( L, -1, -4)); + ASSERT_L( lua_rawequal( L, -2, -3)); + // t["f.q.n"] = o + lua_rawset( L, dest); // ... {bfc} k o "f.q.n" + // t[o] = "f.q.n" + lua_rawset( L, dest); // ... {bfc} k + // remove table name from fqn stack + lua_pushnil( L); // ... {bfc} k nil + lua_rawseti( L, fqn, _depth); // ... {bfc} k + } + -- _depth; + STACK_END( L, -1); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); } static void populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, int _ctx_base, int _i, int _depth) { - lua_Integer visit_count; - // slot 2 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot _i - int const fqn = _ctx_base + 1; - // slot 3 contains a cache that stores all already visited tables to avoid infinite recursion loops - int const cache = _ctx_base + 2; - // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search) - int const breadth_first_cache = lua_gettop( L) + 1; - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "populate_func_lookup_table_recur()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - - STACK_GROW( L, 6); - // slot _i contains a table where we search for functions (or a full userdata with a metatable) - STACK_CHECK( L, 0); // ... {_i} - - // if object is a userdata, replace it by its metatable - if( lua_type( L, _i) == LUA_TUSERDATA) - { - lua_getmetatable( L, _i); // ... {_i} mt - lua_replace( L, _i); // ... {_i} - } - - // if table is already visited, we are done - lua_pushvalue( L, _i); // ... {_i} {} - lua_rawget( L, cache); // ... {_i} nil|n - visit_count = lua_tointeger( L, -1); // 0 if nil, else n - lua_pop( L, 1); // ... {_i} - STACK_MID( L, 0); - if( visit_count > 0) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "already visited\n" INDENT_END)); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - return; - } - - // remember we visited this table (1-visit count) - lua_pushvalue( L, _i); // ... {_i} {} - lua_pushinteger( L, visit_count + 1); // ... {_i} {} 1 - lua_rawset( L, cache); // ... {_i} - STACK_MID( L, 0); - - // this table is at breadth_first_cache index - lua_newtable( L); // ... {_i} {bfc} - ASSERT_L( lua_gettop( L) == breadth_first_cache); - // iterate over all entries in the processed table - lua_pushnil( L); // ... {_i} {bfc} nil - while( lua_next( L, _i) != 0) // ... {_i} {bfc} k v - { - // just for debug, not actually needed - //char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"; - // subtable: process it recursively - if( lua_istable( L, -1)) // ... {_i} {bfc} k {} - { - // increment visit count to make sure we will actually scan it at this recursive level - lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} - lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} {} - lua_rawget( L, cache); // ... {_i} {bfc} k {} {} n? - visit_count = lua_tointeger( L, -1) + 1; // 1 if we got nil, else n+1 - lua_pop( L, 1); // ... {_i} {bfc} k {} {} - lua_pushinteger( L, visit_count); // ... {_i} {bfc} k {} {} n - lua_rawset( L, cache); // ... {_i} {bfc} k {} - // store the table in the breadth-first cache - lua_pushvalue( L, -2); // ... {_i} {bfc} k {} k - lua_pushvalue( L, -2); // ... {_i} {bfc} k {} k {} - lua_rawset( L, breadth_first_cache); // ... {_i} {bfc} k {} - // generate a name, and if we already had one name, keep whichever is the shorter - update_lookup_entry( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, _depth); // ... {_i} {bfc} k - } - else if( lua_isfunction( L, -1) && (luaG_getfuncsubtype( L, -1) != FST_Bytecode)) // ... {_i} {bfc} k func - { - // generate a name, and if we already had one name, keep whichever is the shorter - update_lookup_entry( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, _depth); // ... {_i} {bfc} k - } - else - { - lua_pop( L, 1); // ... {_i} {bfc} k - } - STACK_MID( L, 2); - } - // now process the tables we encountered at that depth - ++ _depth; - lua_pushnil( L); // ... {_i} {bfc} nil - while( lua_next( L, breadth_first_cache) != 0) // ... {_i} {bfc} k {} - { - DEBUGSPEW_CODE( char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "table '%s'\n" INDENT_END, key)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - // un-visit this table in case we do need to process it - lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} - lua_rawget( L, cache); // ... {_i} {bfc} k {} n - ASSERT_L( lua_type( L, -1) == LUA_TNUMBER); - visit_count = lua_tointeger( L, -1) - 1; - lua_pop( L, 1); // ... {_i} {bfc} k {} - lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} - if( visit_count > 0) - { - lua_pushinteger( L, visit_count); // ... {_i} {bfc} k {} {} n - } - else - { - lua_pushnil( L); // ... {_i} {bfc} k {} {} nil - } - lua_rawset( L, cache); // ... {_i} {bfc} k {} - // push table name in fqn stack (note that concatenation will crash if name is a not string!) - lua_pushvalue( L, -2); // ... {_i} {bfc} k {} k - lua_rawseti( L, fqn, _depth); // ... {_i} {bfc} k {} - populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, lua_gettop( L), _depth); - lua_pop( L, 1); // ... {_i} {bfc} k - STACK_MID( L, 2); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - } - // remove table name from fqn stack - lua_pushnil( L); // ... {_i} {bfc} nil - lua_rawseti( L, fqn, _depth); // ... {_i} {bfc} - -- _depth; - // we are done with our cache - lua_pop( L, 1); // ... {_i} - STACK_END( L, 0); - // we are done // ... {_i} {bfc} - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + lua_Integer visit_count; + // slot 2 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot _i + int const fqn = _ctx_base + 1; + // slot 3 contains a cache that stores all already visited tables to avoid infinite recursion loops + int const cache = _ctx_base + 2; + // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search) + int const breadth_first_cache = lua_gettop( L) + 1; + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "populate_func_lookup_table_recur()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + + STACK_GROW( L, 6); + // slot _i contains a table where we search for functions (or a full userdata with a metatable) + STACK_CHECK( L, 0); // ... {_i} + + // if object is a userdata, replace it by its metatable + if( lua_type( L, _i) == LUA_TUSERDATA) + { + lua_getmetatable( L, _i); // ... {_i} mt + lua_replace( L, _i); // ... {_i} + } + + // if table is already visited, we are done + lua_pushvalue( L, _i); // ... {_i} {} + lua_rawget( L, cache); // ... {_i} nil|n + visit_count = lua_tointeger( L, -1); // 0 if nil, else n + lua_pop( L, 1); // ... {_i} + STACK_MID( L, 0); + if( visit_count > 0) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "already visited\n" INDENT_END)); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + return; + } + + // remember we visited this table (1-visit count) + lua_pushvalue( L, _i); // ... {_i} {} + lua_pushinteger( L, visit_count + 1); // ... {_i} {} 1 + lua_rawset( L, cache); // ... {_i} + STACK_MID( L, 0); + + // this table is at breadth_first_cache index + lua_newtable( L); // ... {_i} {bfc} + ASSERT_L( lua_gettop( L) == breadth_first_cache); + // iterate over all entries in the processed table + lua_pushnil( L); // ... {_i} {bfc} nil + while( lua_next( L, _i) != 0) // ... {_i} {bfc} k v + { + // just for debug, not actually needed + //char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"; + // subtable: process it recursively + if( lua_istable( L, -1)) // ... {_i} {bfc} k {} + { + // increment visit count to make sure we will actually scan it at this recursive level + lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} + lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} {} + lua_rawget( L, cache); // ... {_i} {bfc} k {} {} n? + visit_count = lua_tointeger( L, -1) + 1; // 1 if we got nil, else n+1 + lua_pop( L, 1); // ... {_i} {bfc} k {} {} + lua_pushinteger( L, visit_count); // ... {_i} {bfc} k {} {} n + lua_rawset( L, cache); // ... {_i} {bfc} k {} + // store the table in the breadth-first cache + lua_pushvalue( L, -2); // ... {_i} {bfc} k {} k + lua_pushvalue( L, -2); // ... {_i} {bfc} k {} k {} + lua_rawset( L, breadth_first_cache); // ... {_i} {bfc} k {} + // generate a name, and if we already had one name, keep whichever is the shorter + update_lookup_entry( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, _depth); // ... {_i} {bfc} k + } + else if( lua_isfunction( L, -1) && (luaG_getfuncsubtype( L, -1) != FST_Bytecode)) // ... {_i} {bfc} k func + { + // generate a name, and if we already had one name, keep whichever is the shorter + update_lookup_entry( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, _depth); // ... {_i} {bfc} k + } + else + { + lua_pop( L, 1); // ... {_i} {bfc} k + } + STACK_MID( L, 2); + } + // now process the tables we encountered at that depth + ++ _depth; + lua_pushnil( L); // ... {_i} {bfc} nil + while( lua_next( L, breadth_first_cache) != 0) // ... {_i} {bfc} k {} + { + DEBUGSPEW_CODE( char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "table '%s'\n" INDENT_END, key)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + // un-visit this table in case we do need to process it + lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} + lua_rawget( L, cache); // ... {_i} {bfc} k {} n + ASSERT_L( lua_type( L, -1) == LUA_TNUMBER); + visit_count = lua_tointeger( L, -1) - 1; + lua_pop( L, 1); // ... {_i} {bfc} k {} + lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} + if( visit_count > 0) + { + lua_pushinteger( L, visit_count); // ... {_i} {bfc} k {} {} n + } + else + { + lua_pushnil( L); // ... {_i} {bfc} k {} {} nil + } + lua_rawset( L, cache); // ... {_i} {bfc} k {} + // push table name in fqn stack (note that concatenation will crash if name is a not string!) + lua_pushvalue( L, -2); // ... {_i} {bfc} k {} k + lua_rawseti( L, fqn, _depth); // ... {_i} {bfc} k {} + populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, lua_gettop( L), _depth); + lua_pop( L, 1); // ... {_i} {bfc} k + STACK_MID( L, 2); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + } + // remove table name from fqn stack + lua_pushnil( L); // ... {_i} {bfc} nil + lua_rawseti( L, fqn, _depth); // ... {_i} {bfc} + -- _depth; + // we are done with our cache + lua_pop( L, 1); // ... {_i} + STACK_END( L, 0); + // we are done // ... {_i} {bfc} + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); } /* @@ -526,63 +526,63 @@ static void populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( Universe* U */ void populate_func_lookup_table( lua_State* L, int _i, char const* name_) { - int const ctx_base = lua_gettop( L) + 1; - int const in_base = lua_absindex( L, _i); - int start_depth = 0; - DEBUGSPEW_CODE( Universe* U = universe_get( L)); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END, L, name_ ? name_ : "NULL")); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - STACK_GROW( L, 3); - STACK_CHECK( L, 0); - REGISTRY_GET( L, LOOKUP_REGKEY); // {} - STACK_MID( L, 1); - ASSERT_L( lua_istable( L, -1)); - if( lua_type( L, in_base) == LUA_TFUNCTION) // for example when a module is a simple function - { - name_ = name_ ? name_ : "NULL"; - lua_pushvalue( L, in_base); // {} f - lua_pushstring( L, name_); // {} f _name - lua_rawset( L, -3); // {} - lua_pushstring( L, name_); // {} _name - lua_pushvalue( L, in_base); // {} _name f - lua_rawset( L, -3); // {} - lua_pop( L, 1); // - } - else if( lua_type( L, in_base) == LUA_TTABLE) - { - lua_newtable( L); // {} {fqn} - if( name_) - { - STACK_MID( L, 2); - lua_pushstring( L, name_); // {} {fqn} "name" - // generate a name, and if we already had one name, keep whichever is the shorter - lua_pushvalue( L, in_base); // {} {fqn} "name" t - update_lookup_entry( DEBUGSPEW_PARAM_COMMA( U) L, ctx_base, start_depth); // {} {fqn} "name" - // don't forget to store the name at the bottom of the fqn stack - ++ start_depth; - lua_rawseti( L, -2, start_depth); // {} {fqn} - STACK_MID( L, 2); - } - // retrieve the cache, create it if we haven't done it yet - REGISTRY_GET( L, LOOKUPCACHE_REGKEY); // {} {fqn} {cache}? - if( lua_isnil( L, -1)) - { - lua_pop( L, 1); // {} {fqn} - lua_newtable( L); // {} {fqn} {cache} - REGISTRY_SET( L, LOOKUPCACHE_REGKEY, lua_pushvalue( L, -2)); - STACK_MID( L, 3); - } - // process everything we find in that table, filling in lookup data for all functions and tables we see there - populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, ctx_base, in_base, start_depth); - lua_pop( L, 3); - } - else - { - lua_pop( L, 1); // - (void) luaL_error( L, "unsupported module type %s", lua_typename( L, lua_type( L, in_base))); - } - STACK_END( L, 0); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + int const ctx_base = lua_gettop( L) + 1; + int const in_base = lua_absindex( L, _i); + int start_depth = 0; + DEBUGSPEW_CODE( Universe* U = universe_get( L)); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END, L, name_ ? name_ : "NULL")); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + STACK_GROW( L, 3); + STACK_CHECK( L, 0); + REGISTRY_GET( L, LOOKUP_REGKEY); // {} + STACK_MID( L, 1); + ASSERT_L( lua_istable( L, -1)); + if( lua_type( L, in_base) == LUA_TFUNCTION) // for example when a module is a simple function + { + name_ = name_ ? name_ : "NULL"; + lua_pushvalue( L, in_base); // {} f + lua_pushstring( L, name_); // {} f _name + lua_rawset( L, -3); // {} + lua_pushstring( L, name_); // {} _name + lua_pushvalue( L, in_base); // {} _name f + lua_rawset( L, -3); // {} + lua_pop( L, 1); // + } + else if( lua_type( L, in_base) == LUA_TTABLE) + { + lua_newtable( L); // {} {fqn} + if( name_) + { + STACK_MID( L, 2); + lua_pushstring( L, name_); // {} {fqn} "name" + // generate a name, and if we already had one name, keep whichever is the shorter + lua_pushvalue( L, in_base); // {} {fqn} "name" t + update_lookup_entry( DEBUGSPEW_PARAM_COMMA( U) L, ctx_base, start_depth); // {} {fqn} "name" + // don't forget to store the name at the bottom of the fqn stack + ++ start_depth; + lua_rawseti( L, -2, start_depth); // {} {fqn} + STACK_MID( L, 2); + } + // retrieve the cache, create it if we haven't done it yet + REGISTRY_GET( L, LOOKUPCACHE_REGKEY); // {} {fqn} {cache}? + if( lua_isnil( L, -1)) + { + lua_pop( L, 1); // {} {fqn} + lua_newtable( L); // {} {fqn} {cache} + REGISTRY_SET( L, LOOKUPCACHE_REGKEY, lua_pushvalue( L, -2)); + STACK_MID( L, 3); + } + // process everything we find in that table, filling in lookup data for all functions and tables we see there + populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, ctx_base, in_base, start_depth); + lua_pop( L, 3); + } + else + { + lua_pop( L, 1); // + (void) luaL_error( L, "unsupported module type %s", lua_typename( L, lua_type( L, in_base))); + } + STACK_END( L, 0); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); } /*---=== Inter-state copying ===---*/ @@ -595,61 +595,61 @@ static DECLARE_CONST_UNIQUE_KEY( REG_MTID, 0x2e68f9b4751584dc); */ static lua_Integer get_mt_id( Universe* U, lua_State* L, int i) { - lua_Integer id; + lua_Integer id; - i = lua_absindex( L, i); + i = lua_absindex( L, i); - STACK_GROW( L, 3); + STACK_GROW( L, 3); - STACK_CHECK( L, 0); - push_registry_subtable( L, REG_MTID); // ... _R[REG_MTID] - lua_pushvalue( L, i); // ... _R[REG_MTID] {mt} - lua_rawget( L, -2); // ... _R[REG_MTID] mtk? + STACK_CHECK( L, 0); + push_registry_subtable( L, REG_MTID); // ... _R[REG_MTID] + lua_pushvalue( L, i); // ... _R[REG_MTID] {mt} + lua_rawget( L, -2); // ... _R[REG_MTID] mtk? - id = lua_tointeger( L, -1); // 0 for nil - lua_pop( L, 1); // ... _R[REG_MTID] - STACK_MID( L, 1); - - if( id == 0) - { - MUTEX_LOCK( &U->mtid_lock); - id = ++ U->last_mt_id; - MUTEX_UNLOCK( &U->mtid_lock); - - /* Create two-way references: id_uint <-> table - */ - lua_pushvalue( L, i); // ... _R[REG_MTID] {mt} - lua_pushinteger( L, id); // ... _R[REG_MTID] {mt} id - lua_rawset( L, -3); // ... _R[REG_MTID] + id = lua_tointeger( L, -1); // 0 for nil + lua_pop( L, 1); // ... _R[REG_MTID] + STACK_MID( L, 1); - lua_pushinteger( L, id); // ... _R[REG_MTID] id - lua_pushvalue( L, i); // ... _R[REG_MTID] id {mt} - lua_rawset( L, -3); // ... _R[REG_MTID] - } - lua_pop( L, 1); // ... + if( id == 0) + { + MUTEX_LOCK( &U->mtid_lock); + id = ++ U->last_mt_id; + MUTEX_UNLOCK( &U->mtid_lock); + + /* Create two-way references: id_uint <-> table + */ + lua_pushvalue( L, i); // ... _R[REG_MTID] {mt} + lua_pushinteger( L, id); // ... _R[REG_MTID] {mt} id + lua_rawset( L, -3); // ... _R[REG_MTID] + + lua_pushinteger( L, id); // ... _R[REG_MTID] id + lua_pushvalue( L, i); // ... _R[REG_MTID] id {mt} + lua_rawset( L, -3); // ... _R[REG_MTID] + } + lua_pop( L, 1); // ... - STACK_END( L, 0); + STACK_END( L, 0); - return id; + return id; } // function sentinel used to transfer native functions from/to keeper states static int func_lookup_sentinel( lua_State* L) { - return luaL_error( L, "function lookup sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); + return luaL_error( L, "function lookup sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); } // function sentinel used to transfer native table from/to keeper states static int table_lookup_sentinel( lua_State* L) { - return luaL_error( L, "table lookup sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); + return luaL_error( L, "table lookup sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); } // function sentinel used to transfer cloned full userdata from/to keeper states static int userdata_clone_sentinel( lua_State* L) { - return luaL_error( L, "userdata clone sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); + return luaL_error( L, "userdata clone sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); } /* @@ -657,70 +657,70 @@ static int userdata_clone_sentinel( lua_State* L) */ static char const* find_lookup_name( lua_State* L, uint_t i, LookupMode mode_, char const* upName_, size_t* len_) { - DEBUGSPEW_CODE( Universe* const U = universe_get( L)); - char const* fqn; - ASSERT_L( lua_isfunction( L, i) || lua_istable( L, i)); // ... v ... - STACK_CHECK( L, 0); - STACK_GROW( L, 3); // up to 3 slots are necessary on error - if( mode_ == eLM_FromKeeper) - { - lua_CFunction f = lua_tocfunction( L, i); // should *always* be func_lookup_sentinel or table_lookup_sentinel! - if( f == func_lookup_sentinel || f == table_lookup_sentinel || f == userdata_clone_sentinel) - { - lua_getupvalue( L, i, 1); // ... v ... "f.q.n" - } - else - { - // if this is not a sentinel, this is some user-created table we wanted to lookup - ASSERT_L( NULL == f && lua_istable( L, i)); - // push anything that will convert to NULL string - lua_pushnil( L); // ... v ... nil - } - } - else - { - // fetch the name from the source state's lookup table - REGISTRY_GET( L, LOOKUP_REGKEY); // ... v ... {} - STACK_MID( L, 1); - ASSERT_L( lua_istable( L, -1)); - lua_pushvalue( L, i); // ... v ... {} v - lua_rawget( L, -2); // ... v ... {} "f.q.n" - } - fqn = lua_tolstring( L, -1, len_); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "function [C] %s \n" INDENT_END, fqn)); - // popping doesn't invalidate the pointer since this is an interned string gotten from the lookup database - lua_pop( L, (mode_ == eLM_FromKeeper) ? 1 : 2); // ... v ... - STACK_MID( L, 0); - if( NULL == fqn && !lua_istable( L, i)) // raise an error if we try to send an unknown function (but not for tables) - { - char const *from, *typewhat, *what, *gotchaA, *gotchaB; - // try to discover the name of the function we want to send - lua_getglobal( L, "decoda_name"); // ... v ... decoda_name - from = lua_tostring( L, -1); - lua_pushcfunction( L, luaG_nameof); // ... v ... decoda_name luaG_nameof - lua_pushvalue( L, i); // ... v ... decoda_name luaG_nameof t - lua_call( L, 1, 2); // ... v ... decoda_name "type" "name"|nil - typewhat = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : luaL_typename( L, -2); - // second return value can be nil if the table was not found - // probable reason: the function was removed from the source Lua state before Lanes was required. - if( lua_isnil( L, -1)) - { - gotchaA = " referenced by"; - gotchaB = "\n(did you remove it from the source Lua state before requiring Lanes?)"; - what = upName_; - } - else - { - gotchaA = ""; - gotchaB = ""; - what = (lua_type( L, -1) == LUA_TSTRING) ? lua_tostring( L, -1) : luaL_typename( L, -1); - } - (void) luaL_error( L, "%s%s '%s' not found in %s origin transfer database.%s", typewhat, gotchaA, what, from ? from : "main", gotchaB); - *len_ = 0; - return NULL; - } - STACK_END( L, 0); - return fqn; + DEBUGSPEW_CODE( Universe* const U = universe_get( L)); + char const* fqn; + ASSERT_L( lua_isfunction( L, i) || lua_istable( L, i)); // ... v ... + STACK_CHECK( L, 0); + STACK_GROW( L, 3); // up to 3 slots are necessary on error + if( mode_ == eLM_FromKeeper) + { + lua_CFunction f = lua_tocfunction( L, i); // should *always* be func_lookup_sentinel or table_lookup_sentinel! + if( f == func_lookup_sentinel || f == table_lookup_sentinel || f == userdata_clone_sentinel) + { + lua_getupvalue( L, i, 1); // ... v ... "f.q.n" + } + else + { + // if this is not a sentinel, this is some user-created table we wanted to lookup + ASSERT_L( NULL == f && lua_istable( L, i)); + // push anything that will convert to NULL string + lua_pushnil( L); // ... v ... nil + } + } + else + { + // fetch the name from the source state's lookup table + REGISTRY_GET( L, LOOKUP_REGKEY); // ... v ... {} + STACK_MID( L, 1); + ASSERT_L( lua_istable( L, -1)); + lua_pushvalue( L, i); // ... v ... {} v + lua_rawget( L, -2); // ... v ... {} "f.q.n" + } + fqn = lua_tolstring( L, -1, len_); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "function [C] %s \n" INDENT_END, fqn)); + // popping doesn't invalidate the pointer since this is an interned string gotten from the lookup database + lua_pop( L, (mode_ == eLM_FromKeeper) ? 1 : 2); // ... v ... + STACK_MID( L, 0); + if( NULL == fqn && !lua_istable( L, i)) // raise an error if we try to send an unknown function (but not for tables) + { + char const *from, *typewhat, *what, *gotchaA, *gotchaB; + // try to discover the name of the function we want to send + lua_getglobal( L, "decoda_name"); // ... v ... decoda_name + from = lua_tostring( L, -1); + lua_pushcfunction( L, luaG_nameof); // ... v ... decoda_name luaG_nameof + lua_pushvalue( L, i); // ... v ... decoda_name luaG_nameof t + lua_call( L, 1, 2); // ... v ... decoda_name "type" "name"|nil + typewhat = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : luaL_typename( L, -2); + // second return value can be nil if the table was not found + // probable reason: the function was removed from the source Lua state before Lanes was required. + if( lua_isnil( L, -1)) + { + gotchaA = " referenced by"; + gotchaB = "\n(did you remove it from the source Lua state before requiring Lanes?)"; + what = upName_; + } + else + { + gotchaA = ""; + gotchaB = ""; + what = (lua_type( L, -1) == LUA_TSTRING) ? lua_tostring( L, -1) : luaL_typename( L, -1); + } + (void) luaL_error( L, "%s%s '%s' not found in %s origin transfer database.%s", typewhat, gotchaA, what, from ? from : "main", gotchaB); + *len_ = 0; + return NULL; + } + STACK_END( L, 0); + return fqn; } @@ -729,67 +729,67 @@ static char const* find_lookup_name( lua_State* L, uint_t i, LookupMode mode_, c */ static bool_t lookup_table( lua_State* L2, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) { - // get the name of the table we want to send - size_t len; - char const* fqn = find_lookup_name( L, i, mode_, upName_, &len); - if( NULL == fqn) // name not found, it is some user-created table - { - return FALSE; - } - // push the equivalent table in the destination's stack, retrieved from the lookup table - STACK_CHECK( L2, 0); // L // L2 - STACK_GROW( L2, 3); // up to 3 slots are necessary on error - switch( mode_) - { - default: // shouldn't happen, in theory... - (void) luaL_error( L, "internal error: unknown lookup mode"); - return FALSE; - - case eLM_ToKeeper: - // push a sentinel closure that holds the lookup name as upvalue - lua_pushlstring( L2, fqn, len); // "f.q.n" - lua_pushcclosure( L2, table_lookup_sentinel, 1); // f - break; - - case eLM_LaneBody: - case eLM_FromKeeper: - REGISTRY_GET( L2, LOOKUP_REGKEY); // {} - STACK_MID( L2, 1); - ASSERT_L( lua_istable( L2, -1)); - lua_pushlstring( L2, fqn, len); // {} "f.q.n" - lua_rawget( L2, -2); // {} t - // we accept destination lookup failures in the case of transfering the Lanes body function (this will result in the source table being cloned instead) - // but not when we extract something out of a keeper, as there is nothing to clone! - if( lua_isnil( L2, -1) && mode_ == eLM_LaneBody) - { - lua_pop( L2, 2); // - STACK_MID( L2, 0); - return FALSE; - } - else if( !lua_istable( L2, -1)) - { - char const* from, *to; - lua_getglobal( L, "decoda_name"); // ... t ... decoda_name - from = lua_tostring( L, -1); - lua_pop( L, 1); // ... t ... - lua_getglobal( L2, "decoda_name"); // {} t decoda_name - to = lua_tostring( L2, -1); - lua_pop( L2, 1); // {} t - // when mode_ == eLM_FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error - (void) luaL_error( - (mode_ == eLM_FromKeeper) ? L2 : L - , "INTERNAL ERROR IN %s: table '%s' not found in %s destination transfer database." - , from ? from : "main" - , fqn - , to ? to : "main" - ); - return FALSE; - } - lua_remove( L2, -2); // t - break; - } - STACK_END( L2, 1); - return TRUE; + // get the name of the table we want to send + size_t len; + char const* fqn = find_lookup_name( L, i, mode_, upName_, &len); + if( NULL == fqn) // name not found, it is some user-created table + { + return FALSE; + } + // push the equivalent table in the destination's stack, retrieved from the lookup table + STACK_CHECK( L2, 0); // L // L2 + STACK_GROW( L2, 3); // up to 3 slots are necessary on error + switch( mode_) + { + default: // shouldn't happen, in theory... + (void) luaL_error( L, "internal error: unknown lookup mode"); + return FALSE; + + case eLM_ToKeeper: + // push a sentinel closure that holds the lookup name as upvalue + lua_pushlstring( L2, fqn, len); // "f.q.n" + lua_pushcclosure( L2, table_lookup_sentinel, 1); // f + break; + + case eLM_LaneBody: + case eLM_FromKeeper: + REGISTRY_GET( L2, LOOKUP_REGKEY); // {} + STACK_MID( L2, 1); + ASSERT_L( lua_istable( L2, -1)); + lua_pushlstring( L2, fqn, len); // {} "f.q.n" + lua_rawget( L2, -2); // {} t + // we accept destination lookup failures in the case of transfering the Lanes body function (this will result in the source table being cloned instead) + // but not when we extract something out of a keeper, as there is nothing to clone! + if( lua_isnil( L2, -1) && mode_ == eLM_LaneBody) + { + lua_pop( L2, 2); // + STACK_MID( L2, 0); + return FALSE; + } + else if( !lua_istable( L2, -1)) + { + char const* from, *to; + lua_getglobal( L, "decoda_name"); // ... t ... decoda_name + from = lua_tostring( L, -1); + lua_pop( L, 1); // ... t ... + lua_getglobal( L2, "decoda_name"); // {} t decoda_name + to = lua_tostring( L2, -1); + lua_pop( L2, 1); // {} t + // when mode_ == eLM_FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error + (void) luaL_error( + (mode_ == eLM_FromKeeper) ? L2 : L + , "INTERNAL ERROR IN %s: table '%s' not found in %s destination transfer database." + , from ? from : "main" + , fqn + , to ? to : "main" + ); + return FALSE; + } + lua_remove( L2, -2); // t + break; + } + STACK_END( L2, 1); + return TRUE; } @@ -805,33 +805,33 @@ static bool_t lookup_table( lua_State* L2, lua_State* L, uint_t i, LookupMode mo */ static bool_t push_cached_table( lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i) { - bool_t not_found_in_cache; // L2 - DECLARE_CONST_UNIQUE_KEY( p, lua_topointer( L, i)); - - ASSERT_L( L2_cache_i != 0); - STACK_GROW( L2, 3); - STACK_CHECK( L2, 0); - - // We don't need to use the from state ('L') in ID since the life span - // is only for the duration of a copy (both states are locked). - // push a light userdata uniquely representing the table - push_unique_key( L2, p); // ... p - - //fprintf( stderr, "<< ID: %s >>\n", lua_tostring( L2, -1)); - - lua_rawget( L2, L2_cache_i); // ... {cached|nil} - not_found_in_cache = lua_isnil( L2, -1); - if( not_found_in_cache) - { - lua_pop( L2, 1); // ... - lua_newtable( L2); // ... {} - push_unique_key( L2, p); // ... {} p - lua_pushvalue( L2, -2); // ... {} p {} - lua_rawset( L2, L2_cache_i); // ... {} - } - STACK_END( L2, 1); - ASSERT_L( lua_istable( L2, -1)); - return !not_found_in_cache; + bool_t not_found_in_cache; // L2 + DECLARE_CONST_UNIQUE_KEY( p, lua_topointer( L, i)); + + ASSERT_L( L2_cache_i != 0); + STACK_GROW( L2, 3); + STACK_CHECK( L2, 0); + + // We don't need to use the from state ('L') in ID since the life span + // is only for the duration of a copy (both states are locked). + // push a light userdata uniquely representing the table + push_unique_key( L2, p); // ... p + + //fprintf( stderr, "<< ID: %s >>\n", lua_tostring( L2, -1)); + + lua_rawget( L2, L2_cache_i); // ... {cached|nil} + not_found_in_cache = lua_isnil( L2, -1); + if( not_found_in_cache) + { + lua_pop( L2, 1); // ... + lua_newtable( L2); // ... {} + push_unique_key( L2, p); // ... {} p + lua_pushvalue( L2, -2); // ... {} p {} + lua_rawset( L2, L2_cache_i); // ... {} + } + STACK_END( L2, 1); + ASSERT_L( lua_istable( L2, -1)); + return !not_found_in_cache; } @@ -840,144 +840,144 @@ static bool_t push_cached_table( lua_State* L2, uint_t L2_cache_i, lua_State* L, */ static int discover_object_name_recur( lua_State* L, int shortest_, int depth_) { - int const what = 1; // o "r" {c} {fqn} ... {?} - int const result = 2; - int const cache = 3; - int const fqn = 4; - // no need to scan this table if the name we will discover is longer than one we already know - if( shortest_ <= depth_ + 1) - { - return shortest_; - } - STACK_GROW( L, 3); - STACK_CHECK( L, 0); - // stack top contains the table to search in - lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} - lua_rawget( L, cache); // o "r" {c} {fqn} ... {?} nil/1 - // if table is already visited, we are done - if( !lua_isnil( L, -1)) - { - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} - return shortest_; - } - // examined table is not in the cache, add it now - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} - lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} - lua_pushinteger( L, 1); // o "r" {c} {fqn} ... {?} {?} 1 - lua_rawset( L, cache); // o "r" {c} {fqn} ... {?} - // scan table contents - lua_pushnil( L); // o "r" {c} {fqn} ... {?} nil - while( lua_next( L, -2)) // o "r" {c} {fqn} ... {?} k v - { - //char const *const strKey = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : NULL; // only for debugging - //lua_Number const numKey = (lua_type( L, -2) == LUA_TNUMBER) ? lua_tonumber( L, -2) : -6666; // only for debugging - STACK_MID( L, 2); - // append key name to fqn stack - ++ depth_; - lua_pushvalue( L, -2); // o "r" {c} {fqn} ... {?} k v k - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v - if( lua_rawequal( L, -1, what)) // is it what we are looking for? - { - STACK_MID( L, 2); - // update shortest name - if( depth_ < shortest_) - { - shortest_ = depth_; - luaG_pushFQN( L, fqn, depth_, NULL); // o "r" {c} {fqn} ... {?} k v "fqn" - lua_replace( L, result); // o "r" {c} {fqn} ... {?} k v - } - // no need to search further at this level - lua_pop( L, 2); // o "r" {c} {fqn} ... {?} - STACK_MID( L, 0); - break; - } - switch( lua_type( L, -1)) // o "r" {c} {fqn} ... {?} k v - { - default: // nil, boolean, light userdata, number and string aren't identifiable - break; - - case LUA_TTABLE: // o "r" {c} {fqn} ... {?} k {} - STACK_MID( L, 2); - shortest_ = discover_object_name_recur( L, shortest_, depth_); - // search in the table's metatable too - if( lua_getmetatable( L, -1)) // o "r" {c} {fqn} ... {?} k {} {mt} - { - if( lua_istable( L, -1)) - { - ++ depth_; - lua_pushliteral( L, "__metatable"); // o "r" {c} {fqn} ... {?} k {} {mt} "__metatable" - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} - shortest_ = discover_object_name_recur( L, shortest_, depth_); - lua_pushnil( L); // o "r" {c} {fqn} ... {?} k {} {mt} nil - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} - -- depth_; - } - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k {} - } - STACK_MID( L, 2); - break; - - case LUA_TTHREAD: // o "r" {c} {fqn} ... {?} k T - // TODO: explore the thread's stack frame looking for our culprit? - break; - - case LUA_TUSERDATA: // o "r" {c} {fqn} ... {?} k U - STACK_MID( L, 2); - // search in the object's metatable (some modules are built that way) - if( lua_getmetatable( L, -1)) // o "r" {c} {fqn} ... {?} k U {mt} - { - if( lua_istable( L, -1)) - { - ++ depth_; - lua_pushliteral( L, "__metatable"); // o "r" {c} {fqn} ... {?} k U {mt} "__metatable" - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} - shortest_ = discover_object_name_recur( L, shortest_, depth_); - lua_pushnil( L); // o "r" {c} {fqn} ... {?} k U {mt} nil - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} - -- depth_; - } - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U - } - STACK_MID( L, 2); - // search in the object's uservalues - { - int uvi = 1; - while( lua_getiuservalue( L, -1, uvi) != LUA_TNONE) // o "r" {c} {fqn} ... {?} k U {u} - { - if( lua_istable( L, -1)) // if it is a table, look inside - { - ++ depth_; - lua_pushliteral( L, "uservalue"); // o "r" {c} {fqn} ... {?} k v {u} "uservalue" - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} - shortest_ = discover_object_name_recur( L, shortest_, depth_); - lua_pushnil( L); // o "r" {c} {fqn} ... {?} k v {u} nil - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} - -- depth_; - } - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U - ++ uvi; - } - // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U - } - STACK_MID( L, 2); - break; - } - // make ready for next iteration - lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k - // remove name from fqn stack - lua_pushnil( L); // o "r" {c} {fqn} ... {?} k nil - lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k - STACK_MID( L, 1); - -- depth_; - } // o "r" {c} {fqn} ... {?} - STACK_MID( L, 0); - // remove the visited table from the cache, in case a shorter path to the searched object exists - lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} - lua_pushnil( L); // o "r" {c} {fqn} ... {?} {?} nil - lua_rawset( L, cache); // o "r" {c} {fqn} ... {?} - STACK_END( L, 0); - return shortest_; + int const what = 1; // o "r" {c} {fqn} ... {?} + int const result = 2; + int const cache = 3; + int const fqn = 4; + // no need to scan this table if the name we will discover is longer than one we already know + if( shortest_ <= depth_ + 1) + { + return shortest_; + } + STACK_GROW( L, 3); + STACK_CHECK( L, 0); + // stack top contains the table to search in + lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} + lua_rawget( L, cache); // o "r" {c} {fqn} ... {?} nil/1 + // if table is already visited, we are done + if( !lua_isnil( L, -1)) + { + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} + return shortest_; + } + // examined table is not in the cache, add it now + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} + lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} + lua_pushinteger( L, 1); // o "r" {c} {fqn} ... {?} {?} 1 + lua_rawset( L, cache); // o "r" {c} {fqn} ... {?} + // scan table contents + lua_pushnil( L); // o "r" {c} {fqn} ... {?} nil + while( lua_next( L, -2)) // o "r" {c} {fqn} ... {?} k v + { + //char const *const strKey = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : NULL; // only for debugging + //lua_Number const numKey = (lua_type( L, -2) == LUA_TNUMBER) ? lua_tonumber( L, -2) : -6666; // only for debugging + STACK_MID( L, 2); + // append key name to fqn stack + ++ depth_; + lua_pushvalue( L, -2); // o "r" {c} {fqn} ... {?} k v k + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v + if( lua_rawequal( L, -1, what)) // is it what we are looking for? + { + STACK_MID( L, 2); + // update shortest name + if( depth_ < shortest_) + { + shortest_ = depth_; + luaG_pushFQN( L, fqn, depth_, NULL); // o "r" {c} {fqn} ... {?} k v "fqn" + lua_replace( L, result); // o "r" {c} {fqn} ... {?} k v + } + // no need to search further at this level + lua_pop( L, 2); // o "r" {c} {fqn} ... {?} + STACK_MID( L, 0); + break; + } + switch( lua_type( L, -1)) // o "r" {c} {fqn} ... {?} k v + { + default: // nil, boolean, light userdata, number and string aren't identifiable + break; + + case LUA_TTABLE: // o "r" {c} {fqn} ... {?} k {} + STACK_MID( L, 2); + shortest_ = discover_object_name_recur( L, shortest_, depth_); + // search in the table's metatable too + if( lua_getmetatable( L, -1)) // o "r" {c} {fqn} ... {?} k {} {mt} + { + if( lua_istable( L, -1)) + { + ++ depth_; + lua_pushliteral( L, "__metatable"); // o "r" {c} {fqn} ... {?} k {} {mt} "__metatable" + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} + shortest_ = discover_object_name_recur( L, shortest_, depth_); + lua_pushnil( L); // o "r" {c} {fqn} ... {?} k {} {mt} nil + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} + -- depth_; + } + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k {} + } + STACK_MID( L, 2); + break; + + case LUA_TTHREAD: // o "r" {c} {fqn} ... {?} k T + // TODO: explore the thread's stack frame looking for our culprit? + break; + + case LUA_TUSERDATA: // o "r" {c} {fqn} ... {?} k U + STACK_MID( L, 2); + // search in the object's metatable (some modules are built that way) + if( lua_getmetatable( L, -1)) // o "r" {c} {fqn} ... {?} k U {mt} + { + if( lua_istable( L, -1)) + { + ++ depth_; + lua_pushliteral( L, "__metatable"); // o "r" {c} {fqn} ... {?} k U {mt} "__metatable" + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} + shortest_ = discover_object_name_recur( L, shortest_, depth_); + lua_pushnil( L); // o "r" {c} {fqn} ... {?} k U {mt} nil + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} + -- depth_; + } + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U + } + STACK_MID( L, 2); + // search in the object's uservalues + { + int uvi = 1; + while( lua_getiuservalue( L, -1, uvi) != LUA_TNONE) // o "r" {c} {fqn} ... {?} k U {u} + { + if( lua_istable( L, -1)) // if it is a table, look inside + { + ++ depth_; + lua_pushliteral( L, "uservalue"); // o "r" {c} {fqn} ... {?} k v {u} "uservalue" + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} + shortest_ = discover_object_name_recur( L, shortest_, depth_); + lua_pushnil( L); // o "r" {c} {fqn} ... {?} k v {u} nil + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} + -- depth_; + } + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U + ++ uvi; + } + // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U + } + STACK_MID( L, 2); + break; + } + // make ready for next iteration + lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k + // remove name from fqn stack + lua_pushnil( L); // o "r" {c} {fqn} ... {?} k nil + lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k + STACK_MID( L, 1); + -- depth_; + } // o "r" {c} {fqn} ... {?} + STACK_MID( L, 0); + // remove the visited table from the cache, in case a shorter path to the searched object exists + lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} + lua_pushnil( L); // o "r" {c} {fqn} ... {?} {?} nil + lua_rawset( L, cache); // o "r" {c} {fqn} ... {?} + STACK_END( L, 0); + return shortest_; } @@ -986,46 +986,46 @@ static int discover_object_name_recur( lua_State* L, int shortest_, int depth_) */ int luaG_nameof( lua_State* L) { - int what = lua_gettop( L); - if( what > 1) - { - luaL_argerror( L, what, "too many arguments."); - } - - // nil, boolean, light userdata, number and string aren't identifiable - if( lua_type( L, 1) < LUA_TTABLE) - { - lua_pushstring( L, luaL_typename( L, 1)); // o "type" - lua_insert( L, -2); // "type" o - return 2; - } - - STACK_GROW( L, 4); - STACK_CHECK( L, 0); - // this slot will contain the shortest name we found when we are done - lua_pushnil( L); // o nil - // push a cache that will contain all already visited tables - lua_newtable( L); // o nil {c} - // push a table whose contents are strings that, when concatenated, produce unique name - lua_newtable( L); // o nil {c} {fqn} - lua_pushliteral( L, "_G"); // o nil {c} {fqn} "_G" - lua_rawseti( L, -2, 1); // o nil {c} {fqn} - // this is where we start the search - lua_pushglobaltable( L); // o nil {c} {fqn} _G - (void) discover_object_name_recur( L, 6666, 1); - if( lua_isnil( L, 2)) // try again with registry, just in case... - { - lua_pop( L, 1); // o nil {c} {fqn} - lua_pushliteral( L, "_R"); // o nil {c} {fqn} "_R" - lua_rawseti( L, -2, 1); // o nil {c} {fqn} - lua_pushvalue( L, LUA_REGISTRYINDEX); // o nil {c} {fqn} _R - (void) discover_object_name_recur( L, 6666, 1); - } - lua_pop( L, 3); // o "result" - STACK_END( L, 1); - lua_pushstring( L, luaL_typename( L, 1)); // o "result" "type" - lua_replace( L, -3); // "type" "result" - return 2; + int what = lua_gettop( L); + if( what > 1) + { + luaL_argerror( L, what, "too many arguments."); + } + + // nil, boolean, light userdata, number and string aren't identifiable + if( lua_type( L, 1) < LUA_TTABLE) + { + lua_pushstring( L, luaL_typename( L, 1)); // o "type" + lua_insert( L, -2); // "type" o + return 2; + } + + STACK_GROW( L, 4); + STACK_CHECK( L, 0); + // this slot will contain the shortest name we found when we are done + lua_pushnil( L); // o nil + // push a cache that will contain all already visited tables + lua_newtable( L); // o nil {c} + // push a table whose contents are strings that, when concatenated, produce unique name + lua_newtable( L); // o nil {c} {fqn} + lua_pushliteral( L, "_G"); // o nil {c} {fqn} "_G" + lua_rawseti( L, -2, 1); // o nil {c} {fqn} + // this is where we start the search + lua_pushglobaltable( L); // o nil {c} {fqn} _G + (void) discover_object_name_recur( L, 6666, 1); + if( lua_isnil( L, 2)) // try again with registry, just in case... + { + lua_pop( L, 1); // o nil {c} {fqn} + lua_pushliteral( L, "_R"); // o nil {c} {fqn} "_R" + lua_rawseti( L, -2, 1); // o nil {c} {fqn} + lua_pushvalue( L, LUA_REGISTRYINDEX); // o nil {c} {fqn} _R + (void) discover_object_name_recur( L, 6666, 1); + } + lua_pop( L, 3); // o "result" + STACK_END( L, 1); + lua_pushstring( L, luaL_typename( L, 1)); // o "result" "type" + lua_replace( L, -3); // "type" "result" + return 2; } @@ -1034,73 +1034,73 @@ int luaG_nameof( lua_State* L) */ static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) { - // get the name of the function we want to send - size_t len; - char const* fqn = find_lookup_name( L, i, mode_, upName_, &len); - // push the equivalent function in the destination's stack, retrieved from the lookup table - STACK_CHECK( L2, 0); // L // L2 - STACK_GROW( L2, 3); // up to 3 slots are necessary on error - switch( mode_) - { - default: // shouldn't happen, in theory... - (void) luaL_error( L, "internal error: unknown lookup mode"); - return; - - case eLM_ToKeeper: - // push a sentinel closure that holds the lookup name as upvalue - lua_pushlstring( L2, fqn, len); // "f.q.n" - lua_pushcclosure( L2, func_lookup_sentinel, 1); // f - break; - - case eLM_LaneBody: - case eLM_FromKeeper: - REGISTRY_GET( L2, LOOKUP_REGKEY); // {} - STACK_MID( L2, 1); - ASSERT_L( lua_istable( L2, -1)); - lua_pushlstring( L2, fqn, len); // {} "f.q.n" - lua_rawget( L2, -2); // {} f - // nil means we don't know how to transfer stuff: user should do something - // anything other than function or table should not happen! - if( !lua_isfunction( L2, -1) && !lua_istable( L2, -1)) - { - char const* from, * to; - lua_getglobal( L, "decoda_name"); // ... f ... decoda_name - from = lua_tostring( L, -1); - lua_pop( L, 1); // ... f ... - lua_getglobal( L2, "decoda_name"); // {} f decoda_name - to = lua_tostring( L2, -1); - lua_pop( L2, 1); // {} f - // when mode_ == eLM_FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error - (void) luaL_error( - (mode_ == eLM_FromKeeper) ? L2 : L - , "%s%s: function '%s' not found in %s destination transfer database." - , lua_isnil( L2, -1) ? "" : "INTERNAL ERROR IN " - , from ? from : "main" - , fqn - , to ? to : "main" - ); - return; - } - lua_remove( L2, -2); // f - break; - - /* keep it in case I need it someday, who knows... - case eLM_RawFunctions: - { - int n; - char const* upname; - lua_CFunction f = lua_tocfunction( L, i); - // copy upvalues - for( n = 0; (upname = lua_getupvalue( L, i, 1 + n)) != NULL; ++ n) - { - luaG_inter_move( U, L, L2, 1, mode_); // [up[,up ...]] - } - lua_pushcclosure( L2, f, n); // - } - break; - */ - } - STACK_END( L2, 1); + // get the name of the function we want to send + size_t len; + char const* fqn = find_lookup_name( L, i, mode_, upName_, &len); + // push the equivalent function in the destination's stack, retrieved from the lookup table + STACK_CHECK( L2, 0); // L // L2 + STACK_GROW( L2, 3); // up to 3 slots are necessary on error + switch( mode_) + { + default: // shouldn't happen, in theory... + (void) luaL_error( L, "internal error: unknown lookup mode"); + return; + + case eLM_ToKeeper: + // push a sentinel closure that holds the lookup name as upvalue + lua_pushlstring( L2, fqn, len); // "f.q.n" + lua_pushcclosure( L2, func_lookup_sentinel, 1); // f + break; + + case eLM_LaneBody: + case eLM_FromKeeper: + REGISTRY_GET( L2, LOOKUP_REGKEY); // {} + STACK_MID( L2, 1); + ASSERT_L( lua_istable( L2, -1)); + lua_pushlstring( L2, fqn, len); // {} "f.q.n" + lua_rawget( L2, -2); // {} f + // nil means we don't know how to transfer stuff: user should do something + // anything other than function or table should not happen! + if( !lua_isfunction( L2, -1) && !lua_istable( L2, -1)) + { + char const* from, * to; + lua_getglobal( L, "decoda_name"); // ... f ... decoda_name + from = lua_tostring( L, -1); + lua_pop( L, 1); // ... f ... + lua_getglobal( L2, "decoda_name"); // {} f decoda_name + to = lua_tostring( L2, -1); + lua_pop( L2, 1); // {} f + // when mode_ == eLM_FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error + (void) luaL_error( + (mode_ == eLM_FromKeeper) ? L2 : L + , "%s%s: function '%s' not found in %s destination transfer database." + , lua_isnil( L2, -1) ? "" : "INTERNAL ERROR IN " + , from ? from : "main" + , fqn + , to ? to : "main" + ); + return; + } + lua_remove( L2, -2); // f + break; + + /* keep it in case I need it someday, who knows... + case eLM_RawFunctions: + { + int n; + char const* upname; + lua_CFunction f = lua_tocfunction( L, i); + // copy upvalues + for( n = 0; (upname = lua_getupvalue( L, i, 1 + n)) != NULL; ++ n) + { + luaG_inter_move( U, L, L2, 1, mode_); // [up[,up ...]] + } + lua_pushcclosure( L2, f, n); // + } + break; + */ + } + STACK_END( L2, 1); } @@ -1112,23 +1112,23 @@ static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, LookupMod #if USE_DEBUG_SPEW static char const* lua_type_names[] = { - "LUA_TNIL" - , "LUA_TBOOLEAN" - , "LUA_TLIGHTUSERDATA" - , "LUA_TNUMBER" - , "LUA_TSTRING" - , "LUA_TTABLE" - , "LUA_TFUNCTION" - , "LUA_TUSERDATA" - , "LUA_TTHREAD" - , "" // not really a type - , "LUA_TJITCDATA" // LuaJIT specific + "LUA_TNIL" + , "LUA_TBOOLEAN" + , "LUA_TLIGHTUSERDATA" + , "LUA_TNUMBER" + , "LUA_TSTRING" + , "LUA_TTABLE" + , "LUA_TFUNCTION" + , "LUA_TUSERDATA" + , "LUA_TTHREAD" + , "" // not really a type + , "LUA_TJITCDATA" // LuaJIT specific }; static char const* vt_names[] = { - "VT_NORMAL" - , "VT_KEY" - , "VT_METATABLE" + "VT_NORMAL" + , "VT_KEY" + , "VT_METATABLE" }; #endif // USE_DEBUG_SPEW @@ -1149,148 +1149,148 @@ static int buf_writer( lua_State* L, void const* b, size_t size, void* ud) static void copy_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) { - int n, needToPush; - luaL_Buffer B; - B.L = NULL; - - ASSERT_L( L2_cache_i != 0); // ... {cache} ... p - STACK_GROW( L, 2); - STACK_CHECK( L, 0); - - - // 'lua_dump()' needs the function at top of stack - // if already on top of the stack, no need to push again - needToPush = (i != (uint_t)lua_gettop( L)); - if( needToPush) - { - lua_pushvalue( L, i); // ... f - } - - // - // "value returned is the error code returned by the last call - // to the writer" (and we only return 0) - // not sure this could ever fail but for memory shortage reasons - // last parameter is Lua 5.4-specific (no stripping) - if( lua504_dump( L, buf_writer, &B, 0) != 0) - { - luaL_error( L, "internal error: function dump failed."); - } - - // pushes dumped string on 'L' - luaL_pushresult( &B); // ... f b - - // if not pushed, no need to pop - if( needToPush) - { - lua_remove( L, -2); // ... b - } - - // transfer the bytecode, then the upvalues, to create a similar closure - { - char const* name = NULL; - - #if LOG_FUNC_INFO - // "To get information about a function you push it onto the - // stack and start the what string with the character '>'." - // - { - lua_Debug ar; - lua_pushvalue( L, i); // ... b f - // fills 'name' 'namewhat' and 'linedefined', pops function - lua_getinfo( L, ">nS", &ar); // ... b - name = ar.namewhat; - fprintf( stderr, INDENT_BEGIN "FNAME: %s @ %d\n", i, s_indent, ar.short_src, ar.linedefined); // just gives NULL - } - #endif // LOG_FUNC_INFO - { - size_t sz; - char const* s = lua_tolstring( L, -1, &sz); // ... b - ASSERT_L( s && sz); - STACK_GROW( L2, 2); - // Note: Line numbers seem to be taken precisely from the - // original function. 'name' is not used since the chunk - // is precompiled (it seems...). - // - // TBD: Can we get the function's original name through, as well? - // - if( luaL_loadbuffer( L2, s, sz, name) != 0) // ... {cache} ... p function - { - // chunk is precompiled so only LUA_ERRMEM can happen - // "Otherwise, it pushes an error message" - // - STACK_GROW( L, 1); - luaL_error( L, "%s: %s", upName_, lua_tostring( L2, -1)); - } - // remove the dumped string - lua_pop( L, 1); // ... - // now set the cache as soon as we can. - // this is necessary if one of the function's upvalues references it indirectly - // we need to find it in the cache even if it isn't fully transfered yet - lua_insert( L2, -2); // ... {cache} ... function p - lua_pushvalue( L2, -2); // ... {cache} ... function p function - // cache[p] = function - lua_rawset( L2, L2_cache_i); // ... {cache} ... function - } - STACK_MID( L, 0); - - /* push over any upvalues; references to this function will come from - * cache so we don't end up in eternal loop. - * Lua5.2 and Lua5.3: one of the upvalues is _ENV, which we don't want to copy! - * instead, the function shall have LUA_RIDX_GLOBALS taken in the destination state! - */ - { - char const* upname; + int n, needToPush; + luaL_Buffer B; + B.L = NULL; + + ASSERT_L( L2_cache_i != 0); // ... {cache} ... p + STACK_GROW( L, 2); + STACK_CHECK( L, 0); + + + // 'lua_dump()' needs the function at top of stack + // if already on top of the stack, no need to push again + needToPush = (i != (uint_t)lua_gettop( L)); + if( needToPush) + { + lua_pushvalue( L, i); // ... f + } + + // + // "value returned is the error code returned by the last call + // to the writer" (and we only return 0) + // not sure this could ever fail but for memory shortage reasons + // last parameter is Lua 5.4-specific (no stripping) + if( lua504_dump( L, buf_writer, &B, 0) != 0) + { + luaL_error( L, "internal error: function dump failed."); + } + + // pushes dumped string on 'L' + luaL_pushresult( &B); // ... f b + + // if not pushed, no need to pop + if( needToPush) + { + lua_remove( L, -2); // ... b + } + + // transfer the bytecode, then the upvalues, to create a similar closure + { + char const* name = NULL; + + #if LOG_FUNC_INFO + // "To get information about a function you push it onto the + // stack and start the what string with the character '>'." + // + { + lua_Debug ar; + lua_pushvalue( L, i); // ... b f + // fills 'name' 'namewhat' and 'linedefined', pops function + lua_getinfo( L, ">nS", &ar); // ... b + name = ar.namewhat; + fprintf( stderr, INDENT_BEGIN "FNAME: %s @ %d\n", i, s_indent, ar.short_src, ar.linedefined); // just gives NULL + } + #endif // LOG_FUNC_INFO + { + size_t sz; + char const* s = lua_tolstring( L, -1, &sz); // ... b + ASSERT_L( s && sz); + STACK_GROW( L2, 2); + // Note: Line numbers seem to be taken precisely from the + // original function. 'name' is not used since the chunk + // is precompiled (it seems...). + // + // TBD: Can we get the function's original name through, as well? + // + if( luaL_loadbuffer( L2, s, sz, name) != 0) // ... {cache} ... p function + { + // chunk is precompiled so only LUA_ERRMEM can happen + // "Otherwise, it pushes an error message" + // + STACK_GROW( L, 1); + luaL_error( L, "%s: %s", upName_, lua_tostring( L2, -1)); + } + // remove the dumped string + lua_pop( L, 1); // ... + // now set the cache as soon as we can. + // this is necessary if one of the function's upvalues references it indirectly + // we need to find it in the cache even if it isn't fully transfered yet + lua_insert( L2, -2); // ... {cache} ... function p + lua_pushvalue( L2, -2); // ... {cache} ... function p function + // cache[p] = function + lua_rawset( L2, L2_cache_i); // ... {cache} ... function + } + STACK_MID( L, 0); + + /* push over any upvalues; references to this function will come from + * cache so we don't end up in eternal loop. + * Lua5.2 and Lua5.3: one of the upvalues is _ENV, which we don't want to copy! + * instead, the function shall have LUA_RIDX_GLOBALS taken in the destination state! + */ + { + char const* upname; #if LUA_VERSION_NUM >= 502 - // Starting with Lua 5.2, each Lua function gets its environment as one of its upvalues (named LUA_ENV, aka "_ENV" by default) - // Generally this is LUA_RIDX_GLOBALS, which we don't want to copy from the source to the destination state... - // -> if we encounter an upvalue equal to the global table in the source, bind it to the destination's global table - lua_pushglobaltable( L); // ... _G + // Starting with Lua 5.2, each Lua function gets its environment as one of its upvalues (named LUA_ENV, aka "_ENV" by default) + // Generally this is LUA_RIDX_GLOBALS, which we don't want to copy from the source to the destination state... + // -> if we encounter an upvalue equal to the global table in the source, bind it to the destination's global table + lua_pushglobaltable( L); // ... _G #endif // LUA_VERSION_NUM - for( n = 0; (upname = lua_getupvalue( L, i, 1 + n)) != NULL; ++ n) - { // ... _G up[n] - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "UPNAME[%d]: %s -> " INDENT_END, n, upname)); + for( n = 0; (upname = lua_getupvalue( L, i, 1 + n)) != NULL; ++ n) + { // ... _G up[n] + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "UPNAME[%d]: %s -> " INDENT_END, n, upname)); #if LUA_VERSION_NUM >= 502 - if( lua_rawequal( L, -1, -2)) // is the upvalue equal to the global table? - { - DEBUGSPEW_CODE( fprintf( stderr, "pushing destination global scope\n")); - lua_pushglobaltable( L2); // ... {cache} ... function - } - else + if( lua_rawequal( L, -1, -2)) // is the upvalue equal to the global table? + { + DEBUGSPEW_CODE( fprintf( stderr, "pushing destination global scope\n")); + lua_pushglobaltable( L2); // ... {cache} ... function + } + else #endif // LUA_VERSION_NUM - { - DEBUGSPEW_CODE( fprintf( stderr, "copying value\n")); - if( !inter_copy_one( U, L2, L2_cache_i, L, lua_gettop( L), VT_NORMAL, mode_, upname)) // ... {cache} ... function - { - luaL_error( L, "Cannot copy upvalue type '%s'", luaL_typename( L, -1)); - } - } - lua_pop( L, 1); // ... _G - } + { + DEBUGSPEW_CODE( fprintf( stderr, "copying value\n")); + if( !inter_copy_one( U, L2, L2_cache_i, L, lua_gettop( L), VT_NORMAL, mode_, upname)) // ... {cache} ... function + { + luaL_error( L, "Cannot copy upvalue type '%s'", luaL_typename( L, -1)); + } + } + lua_pop( L, 1); // ... _G + } #if LUA_VERSION_NUM >= 502 - lua_pop( L, 1); // ... + lua_pop( L, 1); // ... #endif // LUA_VERSION_NUM - } - // L2: function + 'n' upvalues (>=0) - - STACK_MID( L, 0); - - // Set upvalues (originally set to 'nil' by 'lua_load') - { - int func_index = lua_gettop( L2) - n; - for( ; n > 0; -- n) - { - char const* rc = lua_setupvalue( L2, func_index, n); // ... {cache} ... function - // - // "assigns the value at the top of the stack to the upvalue and returns its name. - // It also pops the value from the stack." - - ASSERT_L( rc); // not having enough slots? - } - // once all upvalues have been set we are left - // with the function at the top of the stack // ... {cache} ... function - } - } - STACK_END( L, 0); + } + // L2: function + 'n' upvalues (>=0) + + STACK_MID( L, 0); + + // Set upvalues (originally set to 'nil' by 'lua_load') + { + int func_index = lua_gettop( L2) - n; + for( ; n > 0; -- n) + { + char const* rc = lua_setupvalue( L2, func_index, n); // ... {cache} ... function + // + // "assigns the value at the top of the stack to the upvalue and returns its name. + // It also pops the value from the stack." + + ASSERT_L( rc); // not having enough slots? + } + // once all upvalues have been set we are left + // with the function at the top of the stack // ... {cache} ... function + } + } + STACK_END( L, 0); } /* @@ -1301,168 +1301,168 @@ static void copy_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* */ static void copy_cached_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) { - FuncSubType funcSubType; - /*lua_CFunction cfunc =*/ luaG_tocfunction( L, i, &funcSubType); // NULL for LuaJIT-fast && bytecode functions - if( funcSubType == FST_Bytecode) - { - void* const aspointer = (void*)lua_topointer( L, i); - // TBD: Merge this and same code for tables - ASSERT_L( L2_cache_i != 0); - - STACK_GROW( L2, 2); - - // L2_cache[id_str]= function - // - STACK_CHECK( L2, 0); - - // We don't need to use the from state ('L') in ID since the life span - // is only for the duration of a copy (both states are locked). - // - - // push a light userdata uniquely representing the function - lua_pushlightuserdata( L2, aspointer); // ... {cache} ... p - - //fprintf( stderr, "<< ID: %s >>\n", lua_tostring( L2, -1)); - - lua_pushvalue( L2, -1); // ... {cache} ... p p - lua_rawget( L2, L2_cache_i); // ... {cache} ... p function|nil|true - - if( lua_isnil( L2, -1)) // function is unknown - { - lua_pop( L2, 1); // ... {cache} ... p - - // Set to 'true' for the duration of creation; need to find self-references - // via upvalues - // - // pushes a copy of the func, stores a reference in the cache - copy_func( U, L2, L2_cache_i, L, i, mode_, upName_); // ... {cache} ... function - } - else // found function in the cache - { - lua_remove( L2, -2); // ... {cache} ... function - } - STACK_END( L2, 1); - ASSERT_L( lua_isfunction( L2, -1)); - } - else // function is native/LuaJIT: no need to cache - { - lookup_native_func( L2, L, i, mode_, upName_); // ... {cache} ... function - // if the function was in fact a lookup sentinel, we can either get a function or a table here - ASSERT_L( lua_isfunction( L2, -1) || lua_istable( L2, -1)); - } + FuncSubType funcSubType; + /*lua_CFunction cfunc =*/ luaG_tocfunction( L, i, &funcSubType); // NULL for LuaJIT-fast && bytecode functions + if( funcSubType == FST_Bytecode) + { + void* const aspointer = (void*)lua_topointer( L, i); + // TBD: Merge this and same code for tables + ASSERT_L( L2_cache_i != 0); + + STACK_GROW( L2, 2); + + // L2_cache[id_str]= function + // + STACK_CHECK( L2, 0); + + // We don't need to use the from state ('L') in ID since the life span + // is only for the duration of a copy (both states are locked). + // + + // push a light userdata uniquely representing the function + lua_pushlightuserdata( L2, aspointer); // ... {cache} ... p + + //fprintf( stderr, "<< ID: %s >>\n", lua_tostring( L2, -1)); + + lua_pushvalue( L2, -1); // ... {cache} ... p p + lua_rawget( L2, L2_cache_i); // ... {cache} ... p function|nil|true + + if( lua_isnil( L2, -1)) // function is unknown + { + lua_pop( L2, 1); // ... {cache} ... p + + // Set to 'true' for the duration of creation; need to find self-references + // via upvalues + // + // pushes a copy of the func, stores a reference in the cache + copy_func( U, L2, L2_cache_i, L, i, mode_, upName_); // ... {cache} ... function + } + else // found function in the cache + { + lua_remove( L2, -2); // ... {cache} ... function + } + STACK_END( L2, 1); + ASSERT_L( lua_isfunction( L2, -1)); + } + else // function is native/LuaJIT: no need to cache + { + lookup_native_func( L2, L, i, mode_, upName_); // ... {cache} ... function + // if the function was in fact a lookup sentinel, we can either get a function or a table here + ASSERT_L( lua_isfunction( L2, -1) || lua_istable( L2, -1)); + } } static bool_t push_cached_metatable( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum eLookupMode mode_, char const* upName_) { - STACK_CHECK( L, 0); - if( lua_getmetatable( L, i)) // ... mt - { - lua_Integer const mt_id = get_mt_id( U, L, -1); // Unique id for the metatable - - STACK_CHECK( L2, 0); - STACK_GROW( L2, 4); - // do we already know this metatable? - push_registry_subtable( L2, REG_MTID); // _R[REG_MTID] - lua_pushinteger( L2, mt_id); // _R[REG_MTID] id - lua_rawget( L2, -2); // _R[REG_MTID] mt? - - STACK_MID( L2, 2); - - if( lua_isnil( L2, -1)) - { // L2 did not know the metatable - lua_pop( L2, 1); // _R[REG_MTID] - if( inter_copy_one( U, L2, L2_cache_i, L, lua_gettop( L), VT_METATABLE, mode_, upName_)) // _R[REG_MTID] mt - { - STACK_MID( L2, 2); - // mt_id -> metatable - lua_pushinteger( L2, mt_id); // _R[REG_MTID] mt id - lua_pushvalue( L2, -2); // _R[REG_MTID] mt id mt - lua_rawset( L2, -4); // _R[REG_MTID] mt - - // metatable -> mt_id - lua_pushvalue( L2, -1); // _R[REG_MTID] mt mt - lua_pushinteger( L2, mt_id); // _R[REG_MTID] mt mt id - lua_rawset( L2, -4); // _R[REG_MTID] mt - } - else - { - (void) luaL_error( L, "Error copying a metatable"); - } - STACK_MID( L2, 2); - } - lua_remove( L2, -2); // mt - - lua_pop( L, 1); // ... - STACK_END( L2, 1); - STACK_MID( L, 0); - return TRUE; - } - STACK_END( L, 0); - return FALSE; + STACK_CHECK( L, 0); + if( lua_getmetatable( L, i)) // ... mt + { + lua_Integer const mt_id = get_mt_id( U, L, -1); // Unique id for the metatable + + STACK_CHECK( L2, 0); + STACK_GROW( L2, 4); + // do we already know this metatable? + push_registry_subtable( L2, REG_MTID); // _R[REG_MTID] + lua_pushinteger( L2, mt_id); // _R[REG_MTID] id + lua_rawget( L2, -2); // _R[REG_MTID] mt? + + STACK_MID( L2, 2); + + if( lua_isnil( L2, -1)) + { // L2 did not know the metatable + lua_pop( L2, 1); // _R[REG_MTID] + if( inter_copy_one( U, L2, L2_cache_i, L, lua_gettop( L), VT_METATABLE, mode_, upName_)) // _R[REG_MTID] mt + { + STACK_MID( L2, 2); + // mt_id -> metatable + lua_pushinteger( L2, mt_id); // _R[REG_MTID] mt id + lua_pushvalue( L2, -2); // _R[REG_MTID] mt id mt + lua_rawset( L2, -4); // _R[REG_MTID] mt + + // metatable -> mt_id + lua_pushvalue( L2, -1); // _R[REG_MTID] mt mt + lua_pushinteger( L2, mt_id); // _R[REG_MTID] mt mt id + lua_rawset( L2, -4); // _R[REG_MTID] mt + } + else + { + (void) luaL_error( L, "Error copying a metatable"); + } + STACK_MID( L2, 2); + } + lua_remove( L2, -2); // mt + + lua_pop( L, 1); // ... + STACK_END( L2, 1); + STACK_MID( L, 0); + return TRUE; + } + STACK_END( L, 0); + return FALSE; } static void inter_copy_keyvaluepair( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, enum e_vt vt, LookupMode mode_, char const* upName_) { - uint_t val_i = lua_gettop( L); - uint_t key_i = val_i - 1; - - // Only basic key types are copied over; others ignored - if( inter_copy_one( U, L2, 0 /*key*/, L, key_i, VT_KEY, mode_, upName_)) - { - char* valPath = (char*) upName_; - if( U->verboseErrors) - { - // for debug purposes, let's try to build a useful name - if( lua_type( L, key_i) == LUA_TSTRING) - { - char const* key = lua_tostring( L, key_i); - size_t const keyRawLen = lua_rawlen( L, key_i); - size_t const bufLen = strlen( upName_) + keyRawLen + 2; - valPath = (char*) alloca( bufLen); - sprintf( valPath, "%s.%*s", upName_, (int) keyRawLen, key); - key = NULL; - } + uint_t val_i = lua_gettop( L); + uint_t key_i = val_i - 1; + + // Only basic key types are copied over; others ignored + if( inter_copy_one( U, L2, 0 /*key*/, L, key_i, VT_KEY, mode_, upName_)) + { + char* valPath = (char*) upName_; + if( U->verboseErrors) + { + // for debug purposes, let's try to build a useful name + if( lua_type( L, key_i) == LUA_TSTRING) + { + char const* key = lua_tostring( L, key_i); + size_t const keyRawLen = lua_rawlen( L, key_i); + size_t const bufLen = strlen( upName_) + keyRawLen + 2; + valPath = (char*) alloca( bufLen); + sprintf( valPath, "%s.%*s", upName_, (int) keyRawLen, key); + key = NULL; + } #if defined LUA_LNUM || LUA_VERSION_NUM >= 503 - else if( lua_isinteger( L, key_i)) - { - lua_Integer key = lua_tointeger( L, key_i); - valPath = (char*) alloca( strlen( upName_) + 32 + 3); - sprintf( valPath, "%s[" LUA_INTEGER_FMT "]", upName_, key); - } + else if( lua_isinteger( L, key_i)) + { + lua_Integer key = lua_tointeger( L, key_i); + valPath = (char*) alloca( strlen( upName_) + 32 + 3); + sprintf( valPath, "%s[" LUA_INTEGER_FMT "]", upName_, key); + } #endif // defined LUA_LNUM || LUA_VERSION_NUM >= 503 - else if( lua_type( L, key_i) == LUA_TNUMBER) - { - lua_Number key = lua_tonumber( L, key_i); - valPath = (char*) alloca( strlen( upName_) + 32 + 3); - sprintf( valPath, "%s[" LUA_NUMBER_FMT "]", upName_, key); - } - else if( lua_type( L, key_i) == LUA_TLIGHTUSERDATA) - { - void* key = lua_touserdata( L, key_i); - valPath = (char*) alloca( strlen( upName_) + 16 + 5); - sprintf( valPath, "%s[U:%p]", upName_, key); - } - else if( lua_type( L, key_i) == LUA_TBOOLEAN) - { - int key = lua_toboolean( L, key_i); - valPath = (char*) alloca( strlen( upName_) + 8); - sprintf( valPath, "%s[%s]", upName_, key ? "true" : "false"); - } - } - /* - * Contents of metatables are copied with cache checking; - * important to detect loops. - */ - if( inter_copy_one( U, L2, L2_cache_i, L, val_i, VT_NORMAL, mode_, valPath)) - { - ASSERT_L( lua_istable( L2, -3)); - lua_rawset( L2, -3); // add to table (pops key & val) - } - else - { - luaL_error( L, "Unable to copy %s entry '%s' because of value is of type '%s'", (vt == VT_NORMAL) ? "table" : "metatable", valPath, luaL_typename( L, val_i)); - } - } + else if( lua_type( L, key_i) == LUA_TNUMBER) + { + lua_Number key = lua_tonumber( L, key_i); + valPath = (char*) alloca( strlen( upName_) + 32 + 3); + sprintf( valPath, "%s[" LUA_NUMBER_FMT "]", upName_, key); + } + else if( lua_type( L, key_i) == LUA_TLIGHTUSERDATA) + { + void* key = lua_touserdata( L, key_i); + valPath = (char*) alloca( strlen( upName_) + 16 + 5); + sprintf( valPath, "%s[U:%p]", upName_, key); + } + else if( lua_type( L, key_i) == LUA_TBOOLEAN) + { + int key = lua_toboolean( L, key_i); + valPath = (char*) alloca( strlen( upName_) + 8); + sprintf( valPath, "%s[%s]", upName_, key ? "true" : "false"); + } + } + /* + * Contents of metatables are copied with cache checking; + * important to detect loops. + */ + if( inter_copy_one( U, L2, L2_cache_i, L, val_i, VT_NORMAL, mode_, valPath)) + { + ASSERT_L( lua_istable( L2, -3)); + lua_rawset( L2, -3); // add to table (pops key & val) + } + else + { + luaL_error( L, "Unable to copy %s entry '%s' because of value is of type '%s'", (vt == VT_NORMAL) ? "table" : "metatable", valPath, luaL_typename( L, val_i)); + } + } } /* @@ -1473,330 +1473,330 @@ static DECLARE_CONST_UNIQUE_KEY( CLONABLES_CACHE_KEY, 0xD04EE018B3DEE8F5); static bool_t copyclone( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) { - void* const source = lua_touserdata( L, i); - - STACK_CHECK( L, 0); - STACK_CHECK( L2, 0); - - // Check if the source was already cloned during this copy - lua_pushlightuserdata( L2, source); // ... source - lua_rawget( L2, L2_cache_i); // ... clone? - if ( !lua_isnil( L2, -1)) - { - STACK_MID( L2, 1); - return TRUE; - } - else - { - lua_pop( L2, 1); // ... - } - STACK_MID( L2, 0); - - // no metatable? -> not clonable - if( !lua_getmetatable( L, i)) // ... mt? - { - STACK_MID( L, 0); - return FALSE; - } - - // no __lanesclone? -> not clonable - lua_getfield( L, -1, "__lanesclone"); // ... mt __lanesclone? - if( lua_isnil( L, -1)) - { - lua_pop( L, 2); // ... - STACK_MID( L, 0); - return FALSE; - } - - { - int const mt = lua_absindex( L, -2); - size_t userdata_size = 0; - void* clone = NULL; - lua_pushvalue( L, -1); // ... mt __lanesclone __lanesclone - // call the cloning function with 1 argument, should return the number of bytes to allocate for the clone - lua_pushlightuserdata( L, source); // ... mt __lanesclone __lanesclone source - lua_call( L, 1, 1); // ... mt __lanesclone size - STACK_MID( L, 3); - userdata_size = (size_t) lua_tointeger( L, -1); // ... mt __lanesclone size - lua_pop( L, 1); // ... mt __lanesclone - // we need to copy over the uservalues of the userdata as well - { - // extract all the uservalues, but don't transfer them yet - int uvi = 0; - while( lua_getiuservalue( L, i, uvi + 1) != LUA_TNONE) // ... mt __lanesclone [uv]+ nil - { - ++ uvi; - } - // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now - lua_pop( L, 1); // ... mt __lanesclone [uv]+ - // create the clone userdata with the required number of uservalue slots - clone = lua_newuserdatauv( L2, userdata_size, uvi); // ... u - // copy the metatable in the target state, and give it to the clone we put there - if( inter_copy_one( U, L2, L2_cache_i, L, mt, VT_NORMAL, mode_, upName_)) // ... u mt|sentinel - { - if( eLM_ToKeeper == mode_) // ... u sentinel - { - ASSERT_L( lua_tocfunction( L2, -1) == table_lookup_sentinel); - // we want to create a new closure with a 'clone sentinel' function, where the upvalues are the userdata and the metatable fqn - lua_getupvalue( L2, -1, 1); // ... u sentinel fqn - lua_remove( L2, -2); // ... u fqn - lua_insert( L2, -2); // ... fqn u - lua_pushcclosure( L2, userdata_clone_sentinel, 2); // ... userdata_clone_sentinel - } - else // from keeper or direct // ... u mt - { - ASSERT_L( lua_istable( L2, -1)); - lua_setmetatable( L2, -2); // ... u - } - STACK_MID( L2, 1); - } - else - { - (void) luaL_error( L, "Error copying a metatable"); - } - // first, add the entry in the cache (at this point it is either the actual userdata or the keeper sentinel - lua_pushlightuserdata( L2, source); // ... u source - lua_pushvalue( L2, -2); // ... u source u - lua_rawset( L2, L2_cache_i); // ... u - // make sure we have the userdata now - if( eLM_ToKeeper == mode_) // ... userdata_clone_sentinel - { - lua_getupvalue( L2, -1, 2); // ... userdata_clone_sentinel u - } - // assign uservalues - while( uvi > 0) - { - inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), VT_NORMAL, mode_, upName_); // ... u uv - lua_pop( L, 1); // ... mt __lanesclone [uv]* - // this pops the value from the stack - lua_setiuservalue( L2, -2, uvi); // ... u - -- uvi; - } - // when we are done, all uservalues are popped from the source stack, and we want only the single transferred value in the destination - if( eLM_ToKeeper == mode_) // ... userdata_clone_sentinel u - { - lua_pop( L2, 1); // ... userdata_clone_sentinel - } - STACK_MID( L2, 1); - STACK_MID( L, 2); - // call cloning function in source state to perform the actual memory cloning - lua_pushlightuserdata( L, clone); // ... mt __lanesclone clone - lua_pushlightuserdata( L, source); // ... mt __lanesclone clone source - lua_call( L, 2, 0); // ... mt - STACK_MID( L, 1); - } - } - - STACK_END( L2, 1); - lua_pop( L, 1); // ... - STACK_END( L, 0); - return TRUE; + void* const source = lua_touserdata( L, i); + + STACK_CHECK( L, 0); + STACK_CHECK( L2, 0); + + // Check if the source was already cloned during this copy + lua_pushlightuserdata( L2, source); // ... source + lua_rawget( L2, L2_cache_i); // ... clone? + if ( !lua_isnil( L2, -1)) + { + STACK_MID( L2, 1); + return TRUE; + } + else + { + lua_pop( L2, 1); // ... + } + STACK_MID( L2, 0); + + // no metatable? -> not clonable + if( !lua_getmetatable( L, i)) // ... mt? + { + STACK_MID( L, 0); + return FALSE; + } + + // no __lanesclone? -> not clonable + lua_getfield( L, -1, "__lanesclone"); // ... mt __lanesclone? + if( lua_isnil( L, -1)) + { + lua_pop( L, 2); // ... + STACK_MID( L, 0); + return FALSE; + } + + { + int const mt = lua_absindex( L, -2); + size_t userdata_size = 0; + void* clone = NULL; + lua_pushvalue( L, -1); // ... mt __lanesclone __lanesclone + // call the cloning function with 1 argument, should return the number of bytes to allocate for the clone + lua_pushlightuserdata( L, source); // ... mt __lanesclone __lanesclone source + lua_call( L, 1, 1); // ... mt __lanesclone size + STACK_MID( L, 3); + userdata_size = (size_t) lua_tointeger( L, -1); // ... mt __lanesclone size + lua_pop( L, 1); // ... mt __lanesclone + // we need to copy over the uservalues of the userdata as well + { + // extract all the uservalues, but don't transfer them yet + int uvi = 0; + while( lua_getiuservalue( L, i, uvi + 1) != LUA_TNONE) // ... mt __lanesclone [uv]+ nil + { + ++ uvi; + } + // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now + lua_pop( L, 1); // ... mt __lanesclone [uv]+ + // create the clone userdata with the required number of uservalue slots + clone = lua_newuserdatauv( L2, userdata_size, uvi); // ... u + // copy the metatable in the target state, and give it to the clone we put there + if( inter_copy_one( U, L2, L2_cache_i, L, mt, VT_NORMAL, mode_, upName_)) // ... u mt|sentinel + { + if( eLM_ToKeeper == mode_) // ... u sentinel + { + ASSERT_L( lua_tocfunction( L2, -1) == table_lookup_sentinel); + // we want to create a new closure with a 'clone sentinel' function, where the upvalues are the userdata and the metatable fqn + lua_getupvalue( L2, -1, 1); // ... u sentinel fqn + lua_remove( L2, -2); // ... u fqn + lua_insert( L2, -2); // ... fqn u + lua_pushcclosure( L2, userdata_clone_sentinel, 2); // ... userdata_clone_sentinel + } + else // from keeper or direct // ... u mt + { + ASSERT_L( lua_istable( L2, -1)); + lua_setmetatable( L2, -2); // ... u + } + STACK_MID( L2, 1); + } + else + { + (void) luaL_error( L, "Error copying a metatable"); + } + // first, add the entry in the cache (at this point it is either the actual userdata or the keeper sentinel + lua_pushlightuserdata( L2, source); // ... u source + lua_pushvalue( L2, -2); // ... u source u + lua_rawset( L2, L2_cache_i); // ... u + // make sure we have the userdata now + if( eLM_ToKeeper == mode_) // ... userdata_clone_sentinel + { + lua_getupvalue( L2, -1, 2); // ... userdata_clone_sentinel u + } + // assign uservalues + while( uvi > 0) + { + inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), VT_NORMAL, mode_, upName_); // ... u uv + lua_pop( L, 1); // ... mt __lanesclone [uv]* + // this pops the value from the stack + lua_setiuservalue( L2, -2, uvi); // ... u + -- uvi; + } + // when we are done, all uservalues are popped from the source stack, and we want only the single transferred value in the destination + if( eLM_ToKeeper == mode_) // ... userdata_clone_sentinel u + { + lua_pop( L2, 1); // ... userdata_clone_sentinel + } + STACK_MID( L2, 1); + STACK_MID( L, 2); + // call cloning function in source state to perform the actual memory cloning + lua_pushlightuserdata( L, clone); // ... mt __lanesclone clone + lua_pushlightuserdata( L, source); // ... mt __lanesclone clone source + lua_call( L, 2, 0); // ... mt + STACK_MID( L, 1); + } + } + + STACK_END( L2, 1); + lua_pop( L, 1); // ... + STACK_END( L, 0); + return TRUE; } static bool_t inter_copy_userdata( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) { - STACK_CHECK( L, 0); - STACK_CHECK( L2, 0); - if( vt == VT_KEY) - { - return FALSE; - } - - // try clonable userdata first - if( copyclone( U, L2, L2_cache_i, L, i, mode_, upName_)) - { - STACK_MID( L, 0); - STACK_MID( L2, 1); - return TRUE; - } - - STACK_MID( L, 0); - STACK_MID( L2, 0); - - // Allow only deep userdata entities to be copied across - DEBUGSPEW_CODE( fprintf( stderr, "USERDATA\n")); - if( copydeep( U, L2, L2_cache_i, L, i, mode_, upName_)) - { - STACK_MID( L, 0); - STACK_MID( L2, 1); - return TRUE; - } - - STACK_MID( L, 0); - STACK_MID( L2, 0); - - // Not a deep or clonable full userdata - if( U->demoteFullUserdata) // attempt demotion to light userdata - { - void* lud = lua_touserdata( L, i); - lua_pushlightuserdata( L2, lud); - } - else // raise an error - { - (void) luaL_error( L, "can't copy non-deep full userdata across lanes"); - } - - STACK_END( L2, 1); - STACK_END( L, 0); - return TRUE; + STACK_CHECK( L, 0); + STACK_CHECK( L2, 0); + if( vt == VT_KEY) + { + return FALSE; + } + + // try clonable userdata first + if( copyclone( U, L2, L2_cache_i, L, i, mode_, upName_)) + { + STACK_MID( L, 0); + STACK_MID( L2, 1); + return TRUE; + } + + STACK_MID( L, 0); + STACK_MID( L2, 0); + + // Allow only deep userdata entities to be copied across + DEBUGSPEW_CODE( fprintf( stderr, "USERDATA\n")); + if( copydeep( U, L2, L2_cache_i, L, i, mode_, upName_)) + { + STACK_MID( L, 0); + STACK_MID( L2, 1); + return TRUE; + } + + STACK_MID( L, 0); + STACK_MID( L2, 0); + + // Not a deep or clonable full userdata + if( U->demoteFullUserdata) // attempt demotion to light userdata + { + void* lud = lua_touserdata( L, i); + lua_pushlightuserdata( L2, lud); + } + else // raise an error + { + (void) luaL_error( L, "can't copy non-deep full userdata across lanes"); + } + + STACK_END( L2, 1); + STACK_END( L, 0); + return TRUE; } static bool_t inter_copy_function( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) { - if( vt == VT_KEY) - { - return FALSE; - } - - STACK_CHECK( L, 0); - STACK_CHECK( L2, 0); - DEBUGSPEW_CODE( fprintf( stderr, "FUNCTION %s\n", upName_)); - - if( lua_tocfunction( L, i) == userdata_clone_sentinel) // we are actually copying a clonable full userdata from a keeper - { - // clone the full userdata again - size_t userdata_size = 0; - void* source; - void* clone; - - // let's see if we already restored this userdata - lua_getupvalue( L, i, 2); // ... u - source = lua_touserdata( L, -1); - lua_pushlightuserdata( L2, source); // ... source - lua_rawget( L2, L2_cache_i); // ... u? - if( !lua_isnil( L2, -1)) - { - lua_pop( L, 1); // ... - STACK_MID( L, 0); - STACK_MID( L2, 1); - return TRUE; - } - lua_pop( L2, 1); // ... - - // this function has 2 upvalues: the fqn of its metatable, and the userdata itself - lookup_table( L2, L, i, mode_, upName_); // ... mt - // __lanesclone should always exist because we wouldn't be restoring data from a userdata_clone_sentinel closure to begin with - lua_getfield( L2, -1, "__lanesclone"); // ... mt __lanesclone - lua_pushvalue( L2, -1); // ... mt __lanesclone __lanesclone - // 'i' slot is the closure, but from now on it is the actual userdata - i = lua_gettop( L); - source = lua_touserdata( L, -1); - // call the cloning function with 1 argument, should return the number of bytes to allocate for the clone - lua_pushlightuserdata( L2, source); // ... mt __lanesclone __lanesclone source - lua_call( L2, 1, 1); // ... mt __lanesclone size - userdata_size = (size_t) lua_tointeger( L2, -1); // ... mt __lanesclone size - lua_pop( L2, 1); // ... mt __lanesclone - { - // extract uservalues (don't transfer them yet) - int uvi = 0; - while( lua_getiuservalue( L, i, uvi + 1) != LUA_TNONE) // ... u uv - { - ++ uvi; - } - // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now - lua_pop( L, 1); // ... u [uv]* - STACK_MID( L, uvi + 1); - // create the clone userdata with the required number of uservalue slots - clone = lua_newuserdatauv( L2, userdata_size, uvi); // ... mt __lanesclone u - // add it in the cache - lua_pushlightuserdata( L2, source); // ... mt __lanesclone u source - lua_pushvalue( L2, -2); // ... mt __lanesclone u source u - lua_rawset( L2, L2_cache_i); // ... mt __lanesclone u - // set metatable - lua_pushvalue( L2, -3); // ... mt __lanesclone u mt - lua_setmetatable( L2, -2); // ... mt __lanesclone u - // transfer and assign uservalues - while( uvi > 0) - { - inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), vt, mode_, upName_); // ... mt __lanesclone u uv - lua_pop( L, 1); // ... u [uv]* - // this pops the value from the stack - lua_setiuservalue( L2, -2, uvi); // ... mt __lanesclone u - -- uvi; - } - // when we are done, all uservalues are popped from the stack - lua_pop( L, 1); // ... - STACK_MID( L, 0); - STACK_MID( L2, 3); // ... mt __lanesclone u - } - // perform the custom cloning part - lua_replace( L2, -3); // ... u __lanesclone - lua_pushlightuserdata( L2, clone); // ... u __lanesclone clone - lua_pushlightuserdata( L2, source); // ... u __lanesclone clone source - lua_call( L2, 2, 0); // ... u - } - else - { - DEBUGSPEW_CODE( fprintf( stderr, "FUNCTION %s\n", upName_)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - STACK_CHECK( L2, 0); - copy_cached_func( U, L2, L2_cache_i, L, i, mode_, upName_); - STACK_END( L2, 1); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - } - STACK_END( L2, 1); - STACK_END( L, 0); - return TRUE; + if( vt == VT_KEY) + { + return FALSE; + } + + STACK_CHECK( L, 0); + STACK_CHECK( L2, 0); + DEBUGSPEW_CODE( fprintf( stderr, "FUNCTION %s\n", upName_)); + + if( lua_tocfunction( L, i) == userdata_clone_sentinel) // we are actually copying a clonable full userdata from a keeper + { + // clone the full userdata again + size_t userdata_size = 0; + void* source; + void* clone; + + // let's see if we already restored this userdata + lua_getupvalue( L, i, 2); // ... u + source = lua_touserdata( L, -1); + lua_pushlightuserdata( L2, source); // ... source + lua_rawget( L2, L2_cache_i); // ... u? + if( !lua_isnil( L2, -1)) + { + lua_pop( L, 1); // ... + STACK_MID( L, 0); + STACK_MID( L2, 1); + return TRUE; + } + lua_pop( L2, 1); // ... + + // this function has 2 upvalues: the fqn of its metatable, and the userdata itself + lookup_table( L2, L, i, mode_, upName_); // ... mt + // __lanesclone should always exist because we wouldn't be restoring data from a userdata_clone_sentinel closure to begin with + lua_getfield( L2, -1, "__lanesclone"); // ... mt __lanesclone + lua_pushvalue( L2, -1); // ... mt __lanesclone __lanesclone + // 'i' slot is the closure, but from now on it is the actual userdata + i = lua_gettop( L); + source = lua_touserdata( L, -1); + // call the cloning function with 1 argument, should return the number of bytes to allocate for the clone + lua_pushlightuserdata( L2, source); // ... mt __lanesclone __lanesclone source + lua_call( L2, 1, 1); // ... mt __lanesclone size + userdata_size = (size_t) lua_tointeger( L2, -1); // ... mt __lanesclone size + lua_pop( L2, 1); // ... mt __lanesclone + { + // extract uservalues (don't transfer them yet) + int uvi = 0; + while( lua_getiuservalue( L, i, uvi + 1) != LUA_TNONE) // ... u uv + { + ++ uvi; + } + // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now + lua_pop( L, 1); // ... u [uv]* + STACK_MID( L, uvi + 1); + // create the clone userdata with the required number of uservalue slots + clone = lua_newuserdatauv( L2, userdata_size, uvi); // ... mt __lanesclone u + // add it in the cache + lua_pushlightuserdata( L2, source); // ... mt __lanesclone u source + lua_pushvalue( L2, -2); // ... mt __lanesclone u source u + lua_rawset( L2, L2_cache_i); // ... mt __lanesclone u + // set metatable + lua_pushvalue( L2, -3); // ... mt __lanesclone u mt + lua_setmetatable( L2, -2); // ... mt __lanesclone u + // transfer and assign uservalues + while( uvi > 0) + { + inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), vt, mode_, upName_); // ... mt __lanesclone u uv + lua_pop( L, 1); // ... u [uv]* + // this pops the value from the stack + lua_setiuservalue( L2, -2, uvi); // ... mt __lanesclone u + -- uvi; + } + // when we are done, all uservalues are popped from the stack + lua_pop( L, 1); // ... + STACK_MID( L, 0); + STACK_MID( L2, 3); // ... mt __lanesclone u + } + // perform the custom cloning part + lua_replace( L2, -3); // ... u __lanesclone + lua_pushlightuserdata( L2, clone); // ... u __lanesclone clone + lua_pushlightuserdata( L2, source); // ... u __lanesclone clone source + lua_call( L2, 2, 0); // ... u + } + else + { + DEBUGSPEW_CODE( fprintf( stderr, "FUNCTION %s\n", upName_)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + STACK_CHECK( L2, 0); + copy_cached_func( U, L2, L2_cache_i, L, i, mode_, upName_); + STACK_END( L2, 1); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + } + STACK_END( L2, 1); + STACK_END( L, 0); + return TRUE; } static bool_t inter_copy_table( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) { - if( vt == VT_KEY) - { - return FALSE; - } - - STACK_CHECK( L, 0); - STACK_CHECK( L2, 0); - DEBUGSPEW_CODE( fprintf( stderr, "TABLE %s\n", upName_)); - - /* - * First, let's try to see if this table is special (aka is it some table that we registered in our lookup databases during module registration?) - * Note that this table CAN be a module table, but we just didn't register it, in which case we'll send it through the table cloning mechanism - */ - if( lookup_table( L2, L, i, mode_, upName_)) - { - ASSERT_L( lua_istable( L2, -1) || (lua_tocfunction( L2, -1) == table_lookup_sentinel)); // from lookup datables // can also be table_lookup_sentinel if this is a table we know - return TRUE; - } - - /* Check if we've already copied the same table from 'L' (during this transmission), and - * reuse the old copy. This allows table upvalues shared by multiple - * local functions to point to the same table, also in the target. - * Also, this takes care of cyclic tables and multiple references - * to the same subtable. - * - * Note: Even metatables need to go through this test; to detect - * loops such as those in required module tables (getmetatable(lanes).lanes == lanes) - */ - if( push_cached_table( L2, L2_cache_i, L, i)) - { - ASSERT_L( lua_istable( L2, -1)); // from cache - return TRUE; - } - ASSERT_L( lua_istable( L2, -1)); - - STACK_GROW( L, 2); - STACK_GROW( L2, 2); - - lua_pushnil( L); // start iteration - while( lua_next( L, i)) - { - // need a function to prevent overflowing the stack with verboseErrors-induced alloca() - inter_copy_keyvaluepair( U, L2, L2_cache_i, L, vt, mode_, upName_); - lua_pop( L, 1); // pop value (next round) - } - STACK_MID( L, 0); - STACK_MID( L2, 1); - - // Metatables are expected to be immutable, and copied only once. - if( push_cached_metatable( U, L2, L2_cache_i, L, i, mode_, upName_)) // ... t mt? - { - lua_setmetatable( L2, -2); // ... t - } - STACK_END( L2, 1); - STACK_END( L, 0); - return TRUE; + if( vt == VT_KEY) + { + return FALSE; + } + + STACK_CHECK( L, 0); + STACK_CHECK( L2, 0); + DEBUGSPEW_CODE( fprintf( stderr, "TABLE %s\n", upName_)); + + /* + * First, let's try to see if this table is special (aka is it some table that we registered in our lookup databases during module registration?) + * Note that this table CAN be a module table, but we just didn't register it, in which case we'll send it through the table cloning mechanism + */ + if( lookup_table( L2, L, i, mode_, upName_)) + { + ASSERT_L( lua_istable( L2, -1) || (lua_tocfunction( L2, -1) == table_lookup_sentinel)); // from lookup datables // can also be table_lookup_sentinel if this is a table we know + return TRUE; + } + + /* Check if we've already copied the same table from 'L' (during this transmission), and + * reuse the old copy. This allows table upvalues shared by multiple + * local functions to point to the same table, also in the target. + * Also, this takes care of cyclic tables and multiple references + * to the same subtable. + * + * Note: Even metatables need to go through this test; to detect + * loops such as those in required module tables (getmetatable(lanes).lanes == lanes) + */ + if( push_cached_table( L2, L2_cache_i, L, i)) + { + ASSERT_L( lua_istable( L2, -1)); // from cache + return TRUE; + } + ASSERT_L( lua_istable( L2, -1)); + + STACK_GROW( L, 2); + STACK_GROW( L2, 2); + + lua_pushnil( L); // start iteration + while( lua_next( L, i)) + { + // need a function to prevent overflowing the stack with verboseErrors-induced alloca() + inter_copy_keyvaluepair( U, L2, L2_cache_i, L, vt, mode_, upName_); + lua_pop( L, 1); // pop value (next round) + } + STACK_MID( L, 0); + STACK_MID( L2, 1); + + // Metatables are expected to be immutable, and copied only once. + if( push_cached_metatable( U, L2, L2_cache_i, L, i, mode_, upName_)) // ... t mt? + { + lua_setmetatable( L2, -2); // ... t + } + STACK_END( L2, 1); + STACK_END( L, 0); + return TRUE; } /* @@ -1811,118 +1811,118 @@ static bool_t inter_copy_table( Universe* U, lua_State* L2, uint_t L2_cache_i, l */ bool_t inter_copy_one( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) { - bool_t ret = TRUE; - int val_type = lua_type( L, i); - static int const pod_mask = (1 << LUA_TNIL) | (1 << LUA_TBOOLEAN) | (1 << LUA_TLIGHTUSERDATA) | (1 << LUA_TNUMBER) | (1 << LUA_TSTRING); - STACK_GROW( L2, 1); - STACK_CHECK( L, 0); // L // L2 - STACK_CHECK( L2, 0); // L // L2 - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "inter_copy_one()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s %s: " INDENT_END, lua_type_names[val_type], vt_names[vt])); - - // Non-POD can be skipped if its metatable contains { __lanesignore = true } - if( ((1 << val_type) & pod_mask) == 0) - { - if( lua_getmetatable( L, i)) // ... mt - { - lua_getfield( L, -1, "__lanesignore"); // ... mt ignore? - if( lua_isboolean( L, -1) && lua_toboolean( L, -1)) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "__lanesignore -> LUA_TNIL\n" INDENT_END)); - val_type = LUA_TNIL; - } - lua_pop( L, 2); // ... - } - } - STACK_MID( L, 0); - - /* Lets push nil to L2 if the object should be ignored */ - switch( val_type) - { - /* Basic types allowed both as values, and as table keys */ - - case LUA_TBOOLEAN: - { - bool_t v = lua_toboolean( L, i); - DEBUGSPEW_CODE( fprintf( stderr, "%s\n", v ? "true" : "false")); - lua_pushboolean( L2, v); - } - break; - - case LUA_TNUMBER: - /* LNUM patch support (keeping integer accuracy) */ + bool_t ret = TRUE; + int val_type = lua_type( L, i); + static int const pod_mask = (1 << LUA_TNIL) | (1 << LUA_TBOOLEAN) | (1 << LUA_TLIGHTUSERDATA) | (1 << LUA_TNUMBER) | (1 << LUA_TSTRING); + STACK_GROW( L2, 1); + STACK_CHECK( L, 0); // L // L2 + STACK_CHECK( L2, 0); // L // L2 + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "inter_copy_one()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s %s: " INDENT_END, lua_type_names[val_type], vt_names[vt])); + + // Non-POD can be skipped if its metatable contains { __lanesignore = true } + if( ((1 << val_type) & pod_mask) == 0) + { + if( lua_getmetatable( L, i)) // ... mt + { + lua_getfield( L, -1, "__lanesignore"); // ... mt ignore? + if( lua_isboolean( L, -1) && lua_toboolean( L, -1)) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "__lanesignore -> LUA_TNIL\n" INDENT_END)); + val_type = LUA_TNIL; + } + lua_pop( L, 2); // ... + } + } + STACK_MID( L, 0); + + /* Lets push nil to L2 if the object should be ignored */ + switch( val_type) + { + /* Basic types allowed both as values, and as table keys */ + + case LUA_TBOOLEAN: + { + bool_t v = lua_toboolean( L, i); + DEBUGSPEW_CODE( fprintf( stderr, "%s\n", v ? "true" : "false")); + lua_pushboolean( L2, v); + } + break; + + case LUA_TNUMBER: + /* LNUM patch support (keeping integer accuracy) */ #if defined LUA_LNUM || LUA_VERSION_NUM >= 503 - if( lua_isinteger( L, i)) - { - lua_Integer v = lua_tointeger( L, i); - DEBUGSPEW_CODE( fprintf( stderr, LUA_INTEGER_FMT "\n", v)); - lua_pushinteger( L2, v); - break; - } - else + if( lua_isinteger( L, i)) + { + lua_Integer v = lua_tointeger( L, i); + DEBUGSPEW_CODE( fprintf( stderr, LUA_INTEGER_FMT "\n", v)); + lua_pushinteger( L2, v); + break; + } + else #endif // defined LUA_LNUM || LUA_VERSION_NUM >= 503 - { - lua_Number v = lua_tonumber( L, i); - DEBUGSPEW_CODE( fprintf( stderr, LUA_NUMBER_FMT "\n", v)); - lua_pushnumber( L2, v); - } - break; - - case LUA_TSTRING: - { - size_t len; - char const* s = lua_tolstring( L, i, &len); - DEBUGSPEW_CODE( fprintf( stderr, "'%s'\n", s)); - lua_pushlstring( L2, s, len); - } - break; - - case LUA_TLIGHTUSERDATA: - { - void* p = lua_touserdata( L, i); - DEBUGSPEW_CODE( fprintf( stderr, "%p\n", p)); - lua_pushlightuserdata( L2, p); - } - break; - - /* The following types are not allowed as table keys */ - - case LUA_TUSERDATA: - ret = inter_copy_userdata( U, L2, L2_cache_i, L, i, vt, mode_, upName_); - break; - - case LUA_TNIL: - if( vt == VT_KEY) - { - ret = FALSE; - break; - } - lua_pushnil( L2); - break; - - case LUA_TFUNCTION: - ret = inter_copy_function( U, L2, L2_cache_i, L, i, vt, mode_, upName_); - break; - - case LUA_TTABLE: - ret = inter_copy_table( U, L2, L2_cache_i, L, i, vt, mode_, upName_); - break; - - /* The following types cannot be copied */ - - case 10: // LuaJIT CDATA - case LUA_TTHREAD: - ret = FALSE; - break; - } - - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - - STACK_END( L2, ret ? 1 : 0); - STACK_END( L, 0); - return ret; + { + lua_Number v = lua_tonumber( L, i); + DEBUGSPEW_CODE( fprintf( stderr, LUA_NUMBER_FMT "\n", v)); + lua_pushnumber( L2, v); + } + break; + + case LUA_TSTRING: + { + size_t len; + char const* s = lua_tolstring( L, i, &len); + DEBUGSPEW_CODE( fprintf( stderr, "'%s'\n", s)); + lua_pushlstring( L2, s, len); + } + break; + + case LUA_TLIGHTUSERDATA: + { + void* p = lua_touserdata( L, i); + DEBUGSPEW_CODE( fprintf( stderr, "%p\n", p)); + lua_pushlightuserdata( L2, p); + } + break; + + /* The following types are not allowed as table keys */ + + case LUA_TUSERDATA: + ret = inter_copy_userdata( U, L2, L2_cache_i, L, i, vt, mode_, upName_); + break; + + case LUA_TNIL: + if( vt == VT_KEY) + { + ret = FALSE; + break; + } + lua_pushnil( L2); + break; + + case LUA_TFUNCTION: + ret = inter_copy_function( U, L2, L2_cache_i, L, i, vt, mode_, upName_); + break; + + case LUA_TTABLE: + ret = inter_copy_table( U, L2, L2_cache_i, L, i, vt, mode_, upName_); + break; + + /* The following types cannot be copied */ + + case 10: // LuaJIT CDATA + case LUA_TTHREAD: + ret = FALSE; + break; + } + + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + + STACK_END( L2, ret ? 1 : 0); + STACK_END( L, 0); + return ret; } /* @@ -1934,122 +1934,122 @@ bool_t inter_copy_one( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* */ int luaG_inter_copy( Universe* U, lua_State* L, lua_State* L2, uint_t n, LookupMode mode_) { - uint_t top_L = lua_gettop( L); // ... {}n - uint_t top_L2 = lua_gettop( L2); // ... - uint_t i, j; - char tmpBuf[16]; - char const* pBuf = U->verboseErrors ? tmpBuf : "?"; - bool_t copyok = TRUE; - - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - - if( n > top_L) - { - // requesting to copy more than is available? - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "nothing to copy()\n" INDENT_END)); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - return -1; - } - - STACK_CHECK( L2, 0); - STACK_GROW( L2, n + 1); - - /* - * Make a cache table for the duration of this copy. Collects tables and - * function entries, avoiding the same entries to be passed on as multiple - * copies. ESSENTIAL i.e. for handling upvalue tables in the right manner! - */ - lua_newtable( L2); // ... cache - - STACK_CHECK( L, 0); - for( i = top_L - n + 1, j = 1; i <= top_L; ++ i, ++ j) - { - if( U->verboseErrors) - { - sprintf( tmpBuf, "arg_%d", j); - } - copyok = inter_copy_one( U, L2, top_L2 + 1, L, i, VT_NORMAL, mode_, pBuf); // ... cache {}n - if( !copyok) - { - break; - } - } - STACK_END( L, 0); - - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - - if( copyok) - { - STACK_MID( L2, n + 1); - // Remove the cache table. Persistent caching would cause i.e. multiple - // messages passed in the same table to use the same table also in receiving end. - lua_remove( L2, top_L2 + 1); - return 0; - } - - // error -> pop everything from the target state stack - lua_settop( L2, top_L2); - STACK_END( L2, 0); - return -2; + uint_t top_L = lua_gettop( L); // ... {}n + uint_t top_L2 = lua_gettop( L2); // ... + uint_t i, j; + char tmpBuf[16]; + char const* pBuf = U->verboseErrors ? tmpBuf : "?"; + bool_t copyok = TRUE; + + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + + if( n > top_L) + { + // requesting to copy more than is available? + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "nothing to copy()\n" INDENT_END)); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + return -1; + } + + STACK_CHECK( L2, 0); + STACK_GROW( L2, n + 1); + + /* + * Make a cache table for the duration of this copy. Collects tables and + * function entries, avoiding the same entries to be passed on as multiple + * copies. ESSENTIAL i.e. for handling upvalue tables in the right manner! + */ + lua_newtable( L2); // ... cache + + STACK_CHECK( L, 0); + for( i = top_L - n + 1, j = 1; i <= top_L; ++ i, ++ j) + { + if( U->verboseErrors) + { + sprintf( tmpBuf, "arg_%d", j); + } + copyok = inter_copy_one( U, L2, top_L2 + 1, L, i, VT_NORMAL, mode_, pBuf); // ... cache {}n + if( !copyok) + { + break; + } + } + STACK_END( L, 0); + + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + + if( copyok) + { + STACK_MID( L2, n + 1); + // Remove the cache table. Persistent caching would cause i.e. multiple + // messages passed in the same table to use the same table also in receiving end. + lua_remove( L2, top_L2 + 1); + return 0; + } + + // error -> pop everything from the target state stack + lua_settop( L2, top_L2); + STACK_END( L2, 0); + return -2; } int luaG_inter_move( Universe* U, lua_State* L, lua_State* L2, uint_t n, LookupMode mode_) { - int ret = luaG_inter_copy( U, L, L2, n, mode_); - lua_pop( L, (int) n); - return ret; + int ret = luaG_inter_copy( U, L, L2, n, mode_); + lua_pop( L, (int) n); + return ret; } int luaG_inter_copy_package( Universe* U, lua_State* L, lua_State* L2, int package_idx_, LookupMode mode_) { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy_package()\n" INDENT_END)); - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - // package - STACK_CHECK( L, 0); - STACK_CHECK( L2, 0); - package_idx_ = lua_absindex( L, package_idx_); - if( lua_type( L, package_idx_) != LUA_TTABLE) - { - lua_pushfstring( L, "expected package as table, got %s", luaL_typename( L, package_idx_)); - STACK_MID( L, 1); - // raise the error when copying from lane to lane, else just leave it on the stack to be raised later - return ( mode_ == eLM_LaneBody) ? lua_error( L) : 1; - } - lua_getglobal( L2, "package"); - if( !lua_isnil( L2, -1)) // package library not loaded: do nothing - { - int i; - // package.loaders is renamed package.searchers in Lua 5.2 - // but don't copy it anyway, as the function names change depending on the slot index! - // users should provide an on_state_create function to setup custom loaders instead - // don't copy package.preload in keeper states (they don't know how to translate functions) - char const* entries[] = { "path", "cpath", (mode_ == eLM_LaneBody) ? "preload" : NULL/*, (LUA_VERSION_NUM == 501) ? "loaders" : "searchers"*/, NULL}; - for( i = 0; entries[i]; ++ i) - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "package.%s\n" INDENT_END, entries[i])); - lua_getfield( L, package_idx_, entries[i]); - if( lua_isnil( L, -1)) - { - lua_pop( L, 1); - } - else - { - DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); - luaG_inter_move( U, L, L2, 1, mode_); // moves the entry to L2 - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - lua_setfield( L2, -2, entries[i]); // set package[entries[i]] - } - } - } - else - { - DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "'package' not loaded, nothing to do\n" INDENT_END)); - } - lua_pop( L2, 1); - STACK_END( L2, 0); - STACK_END( L, 0); - DEBUGSPEW_CODE( -- U->debugspew_indent_depth); - return 0; + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy_package()\n" INDENT_END)); + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + // package + STACK_CHECK( L, 0); + STACK_CHECK( L2, 0); + package_idx_ = lua_absindex( L, package_idx_); + if( lua_type( L, package_idx_) != LUA_TTABLE) + { + lua_pushfstring( L, "expected package as table, got %s", luaL_typename( L, package_idx_)); + STACK_MID( L, 1); + // raise the error when copying from lane to lane, else just leave it on the stack to be raised later + return ( mode_ == eLM_LaneBody) ? lua_error( L) : 1; + } + lua_getglobal( L2, "package"); + if( !lua_isnil( L2, -1)) // package library not loaded: do nothing + { + int i; + // package.loaders is renamed package.searchers in Lua 5.2 + // but don't copy it anyway, as the function names change depending on the slot index! + // users should provide an on_state_create function to setup custom loaders instead + // don't copy package.preload in keeper states (they don't know how to translate functions) + char const* entries[] = { "path", "cpath", (mode_ == eLM_LaneBody) ? "preload" : NULL/*, (LUA_VERSION_NUM == 501) ? "loaders" : "searchers"*/, NULL}; + for( i = 0; entries[i]; ++ i) + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "package.%s\n" INDENT_END, entries[i])); + lua_getfield( L, package_idx_, entries[i]); + if( lua_isnil( L, -1)) + { + lua_pop( L, 1); + } + else + { + DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); + luaG_inter_move( U, L, L2, 1, mode_); // moves the entry to L2 + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + lua_setfield( L2, -2, entries[i]); // set package[entries[i]] + } + } + } + else + { + DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "'package' not loaded, nothing to do\n" INDENT_END)); + } + lua_pop( L2, 1); + STACK_END( L2, 0); + STACK_END( L, 0); + DEBUGSPEW_CODE( -- U->debugspew_indent_depth); + return 0; } diff --git a/src/tools.h b/src/tools.h index 3bf5a02..a0893e4 100644 --- a/src/tools.h +++ b/src/tools.h @@ -27,9 +27,9 @@ void push_registry_subtable( lua_State* L, UniqueKey key_); enum e_vt { - VT_NORMAL, - VT_KEY, - VT_METATABLE + VT_NORMAL, + VT_KEY, + VT_METATABLE }; bool_t inter_copy_one( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_); diff --git a/src/uniquekey.h b/src/uniquekey.h index 0cef3a1..ff3d45d 100644 --- a/src/uniquekey.h +++ b/src/uniquekey.h @@ -6,7 +6,7 @@ // Lua light userdata can hold a pointer. struct s_UniqueKey { - void* value; + void* value; }; typedef struct s_UniqueKey UniqueKey; diff --git a/src/universe.c b/src/universe.c index e1cd38f..9f84baf 100644 --- a/src/universe.c +++ b/src/universe.c @@ -43,33 +43,33 @@ static DECLARE_CONST_UNIQUE_KEY( UNIVERSE_REGKEY, 0x9f877b2cf078f17f); Universe* universe_create( lua_State* L) { - Universe* U = (Universe*) lua_newuserdatauv( L, sizeof(Universe), 0); // universe - memset( U, 0, sizeof( Universe)); - STACK_CHECK( L, 1); - REGISTRY_SET( L, UNIVERSE_REGKEY, lua_pushvalue(L, -2)); // universe - STACK_END( L, 1); - return U; + Universe* U = (Universe*) lua_newuserdatauv( L, sizeof(Universe), 0); // universe + memset( U, 0, sizeof( Universe)); + STACK_CHECK( L, 1); + REGISTRY_SET( L, UNIVERSE_REGKEY, lua_pushvalue(L, -2)); // universe + STACK_END( L, 1); + return U; } // ################################################################################################ void universe_store( lua_State* L, Universe* U) { - STACK_CHECK( L, 0); - REGISTRY_SET( L, UNIVERSE_REGKEY, (NULL != U) ? lua_pushlightuserdata( L, U) : lua_pushnil( L)); - STACK_END( L, 0); + STACK_CHECK( L, 0); + REGISTRY_SET( L, UNIVERSE_REGKEY, (NULL != U) ? lua_pushlightuserdata( L, U) : lua_pushnil( L)); + STACK_END( L, 0); } // ################################################################################################ Universe* universe_get( lua_State* L) { - Universe* universe; - STACK_GROW( L, 2); - STACK_CHECK( L, 0); - REGISTRY_GET( L, UNIVERSE_REGKEY); - universe = lua_touserdata( L, -1); // NULL if nil - lua_pop( L, 1); - STACK_END( L, 0); - return universe; + Universe* universe; + STACK_GROW( L, 2); + STACK_CHECK( L, 0); + REGISTRY_GET( L, UNIVERSE_REGKEY); + universe = lua_touserdata( L, -1); // NULL if nil + lua_pop( L, 1); + STACK_END( L, 0); + return universe; } diff --git a/src/universe.h b/src/universe.h index 0ef5a93..248a117 100644 --- a/src/universe.h +++ b/src/universe.h @@ -27,16 +27,16 @@ typedef struct s_Lane Lane; // everything we need to provide to lua_newstate() struct AllocatorDefinition_s { - lua_Alloc allocF; - void* allocUD; + lua_Alloc allocF; + void* allocUD; }; typedef struct AllocatorDefinition_s AllocatorDefinition; // mutex-protected allocator for use with Lua states that share a non-threadsafe allocator struct ProtectedAllocator_s { - AllocatorDefinition definition; - MUTEX_T lock; + AllocatorDefinition definition; + MUTEX_T lock; }; typedef struct ProtectedAllocator_s ProtectedAllocator; @@ -47,51 +47,51 @@ typedef struct ProtectedAllocator_s ProtectedAllocator; // don't forget to initialize all members in LG_configure() struct s_Universe { - // for verbose errors - bool_t verboseErrors; + // for verbose errors + bool_t verboseErrors; - bool_t demoteFullUserdata; + bool_t demoteFullUserdata; - // before a state is created, this function will be called to obtain the allocator - lua_CFunction provide_allocator; + // before a state is created, this function will be called to obtain the allocator + lua_CFunction provide_allocator; - // after a state is created, this function will be called right after the bases libraries are loaded - lua_CFunction on_state_create_func; + // after a state is created, this function will be called right after the bases libraries are loaded + lua_CFunction on_state_create_func; - // Initialized and used only if allocator="protected" is found in the configuration settings - // contains a mutex and the original allocator definition - ProtectedAllocator protected_allocator; + // Initialized and used only if allocator="protected" is found in the configuration settings + // contains a mutex and the original allocator definition + ProtectedAllocator protected_allocator; - Keepers* keepers; + Keepers* keepers; - // Initialized by 'init_once_LOCKED()': the deep userdata Linda object - // used for timers (each lane will get a proxy to this) - volatile DeepPrelude* timer_deep; // = NULL + // Initialized by 'init_once_LOCKED()': the deep userdata Linda object + // used for timers (each lane will get a proxy to this) + volatile DeepPrelude* timer_deep; // = NULL #if HAVE_LANE_TRACKING - MUTEX_T tracking_cs; - Lane* volatile tracking_first; // will change to TRACKING_END if we want to activate tracking + MUTEX_T tracking_cs; + Lane* volatile tracking_first; // will change to TRACKING_END if we want to activate tracking #endif // HAVE_LANE_TRACKING - MUTEX_T selfdestruct_cs; + MUTEX_T selfdestruct_cs; - // require() serialization - MUTEX_T require_cs; + // require() serialization + MUTEX_T require_cs; - // Lock for reference counter inc/dec locks (to be initialized by outside code) TODO: get rid of this and use atomics instead! - MUTEX_T deep_lock; - MUTEX_T mtid_lock; + // Lock for reference counter inc/dec locks (to be initialized by outside code) TODO: get rid of this and use atomics instead! + MUTEX_T deep_lock; + MUTEX_T mtid_lock; - lua_Integer last_mt_id; + lua_Integer last_mt_id; #if USE_DEBUG_SPEW - int debugspew_indent_depth; + int debugspew_indent_depth; #endif // USE_DEBUG_SPEW - Lane* volatile selfdestruct_first; - // After a lane has removed itself from the chain, it still performs some processing. - // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads - int volatile selfdestructing_count; + Lane* volatile selfdestruct_first; + // After a lane has removed itself from the chain, it still performs some processing. + // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads + int volatile selfdestructing_count; }; typedef struct s_Universe Universe; -- cgit v1.2.3-55-g6feb