aboutsummaryrefslogtreecommitdiff
path: root/src/universe.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/universe.h')
-rw-r--r--src/universe.h47
1 files changed, 22 insertions, 25 deletions
diff --git a/src/universe.h b/src/universe.h
index 34cef33..a6beb68 100644
--- a/src/universe.h
+++ b/src/universe.h
@@ -8,9 +8,10 @@ extern "C" {
8} 8}
9#endif // __cplusplus 9#endif // __cplusplus
10 10
11#include "threading.h"
12#include "macros_and_utils.h" 11#include "macros_and_utils.h"
13 12
13#include <mutex>
14
14// forwards 15// forwards
15struct DeepPrelude; 16struct DeepPrelude;
16struct Keepers; 17struct Keepers;
@@ -28,15 +29,15 @@ struct Lane;
28// everything we need to provide to lua_newstate() 29// everything we need to provide to lua_newstate()
29struct AllocatorDefinition 30struct AllocatorDefinition
30{ 31{
31 lua_Alloc allocF; 32 lua_Alloc allocF{ nullptr };
32 void* allocUD; 33 void* allocUD{ nullptr };
33}; 34};
34 35
35// mutex-protected allocator for use with Lua states that share a non-threadsafe allocator 36// mutex-protected allocator for use with Lua states that share a non-threadsafe allocator
36struct ProtectedAllocator 37struct ProtectedAllocator
37{ 38{
38 AllocatorDefinition definition; 39 AllocatorDefinition definition;
39 MUTEX_T lock; 40 std::mutex lock;
40}; 41};
41 42
42// ################################################################################################ 43// ################################################################################################
@@ -47,15 +48,15 @@ struct ProtectedAllocator
47struct Universe 48struct Universe
48{ 49{
49 // for verbose errors 50 // for verbose errors
50 bool verboseErrors; 51 bool verboseErrors{ false };
51 52
52 bool demoteFullUserdata; 53 bool demoteFullUserdata{ false };
53 54
54 // before a state is created, this function will be called to obtain the allocator 55 // before a state is created, this function will be called to obtain the allocator
55 lua_CFunction provide_allocator; 56 lua_CFunction provide_allocator{ nullptr };
56 57
57 // after a state is created, this function will be called right after the bases libraries are loaded 58 // after a state is created, this function will be called right after the bases libraries are loaded
58 lua_CFunction on_state_create_func; 59 lua_CFunction on_state_create_func{ nullptr };
59 60
60 // if allocator="protected" is found in the configuration settings, a wrapper allocator will protect all allocator calls with a mutex 61 // if allocator="protected" is found in the configuration settings, a wrapper allocator will protect all allocator calls with a mutex
61 // contains a mutex and the original allocator definition 62 // contains a mutex and the original allocator definition
@@ -63,38 +64,34 @@ struct Universe
63 64
64 AllocatorDefinition internal_allocator; 65 AllocatorDefinition internal_allocator;
65 66
66 Keepers* keepers; 67 Keepers* keepers{ nullptr };
67 68
68 // Initialized by 'init_once_LOCKED()': the deep userdata Linda object 69 // Initialized by 'init_once_LOCKED()': the deep userdata Linda object
69 // used for timers (each lane will get a proxy to this) 70 // used for timers (each lane will get a proxy to this)
70 volatile DeepPrelude* timer_deep; // = nullptr 71 volatile DeepPrelude* timer_deep{ nullptr }; // = nullptr
71 72
72#if HAVE_LANE_TRACKING() 73#if HAVE_LANE_TRACKING()
73 MUTEX_T tracking_cs; 74 std::mutex tracking_cs;
74 Lane* volatile tracking_first; // will change to TRACKING_END if we want to activate tracking 75 Lane* volatile tracking_first{ nullptr }; // will change to TRACKING_END if we want to activate tracking
75#endif // HAVE_LANE_TRACKING() 76#endif // HAVE_LANE_TRACKING()
76 77
77 MUTEX_T selfdestruct_cs; 78 std::mutex selfdestruct_cs;
78 79
79 // require() serialization 80 // require() serialization
80 MUTEX_T require_cs; 81 std::recursive_mutex require_cs;
81
82 // Lock for reference counter inc/dec locks (to be initialized by outside code) TODO: get rid of this and use atomics instead!
83 MUTEX_T deep_lock;
84 MUTEX_T mtid_lock;
85 82
86 lua_Integer last_mt_id; 83 std::atomic<lua_Integer> last_mt_id{ 0 };
87 84
88#if USE_DEBUG_SPEW() 85#if USE_DEBUG_SPEW()
89 int debugspew_indent_depth; 86 int debugspew_indent_depth{ 0 };
90#endif // USE_DEBUG_SPEW() 87#endif // USE_DEBUG_SPEW()
91 88
92 Lane* volatile selfdestruct_first; 89 Lane* volatile selfdestruct_first{ nullptr };
93 // After a lane has removed itself from the chain, it still performs some processing. 90 // After a lane has removed itself from the chain, it still performs some processing.
94 // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads 91 // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads
95 int volatile selfdestructing_count; 92 int volatile selfdestructing_count{ 0 };
96}; 93};
97 94
98Universe* universe_get( lua_State* L); 95Universe* universe_get(lua_State* L);
99Universe* universe_create( lua_State* L); 96Universe* universe_create(lua_State* L);
100void universe_store( lua_State* L, Universe* U); 97void universe_store(lua_State* L, Universe* U);