aboutsummaryrefslogtreecommitdiff
path: root/src/universe.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/universe.h')
-rw-r--r--src/universe.h34
1 files changed, 25 insertions, 9 deletions
diff --git a/src/universe.h b/src/universe.h
index 6a65888..113ed21 100644
--- a/src/universe.h
+++ b/src/universe.h
@@ -35,7 +35,7 @@ class AllocatorDefinition
35 lua_Alloc m_allocF{ nullptr }; 35 lua_Alloc m_allocF{ nullptr };
36 void* m_allocUD{ nullptr }; 36 void* m_allocUD{ nullptr };
37 37
38 static void* operator new(size_t size_, lua_State* L) noexcept { return lua_newuserdatauv(L, size_, 0); } 38 [[nodiscard]] static void* operator new(size_t size_, lua_State* L) noexcept { return lua_newuserdatauv(L, size_, 0); }
39 // always embedded somewhere else or "in-place constructed" as a full userdata 39 // always embedded somewhere else or "in-place constructed" as a full userdata
40 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception 40 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception
41 static void operator delete([[maybe_unused]] void* p_, lua_State* L) { ASSERT_L(!"should never be called") }; 41 static void operator delete([[maybe_unused]] void* p_, lua_State* L) { ASSERT_L(!"should never be called") };
@@ -81,7 +81,7 @@ class ProtectedAllocator : public AllocatorDefinition
81 81
82 std::mutex m_lock; 82 std::mutex m_lock;
83 83
84 static void* protected_lua_Alloc(void* ud_, void* ptr_, size_t osize_, size_t nsize_) 84 [[nodiscard]] static void* protected_lua_Alloc(void* ud_, void* ptr_, size_t osize_, size_t nsize_)
85 { 85 {
86 ProtectedAllocator* const allocator{ static_cast<ProtectedAllocator*>(ud_) }; 86 ProtectedAllocator* const allocator{ static_cast<ProtectedAllocator*>(ud_) };
87 std::lock_guard<std::mutex> guard{ allocator->m_lock }; 87 std::lock_guard<std::mutex> guard{ allocator->m_lock };
@@ -91,7 +91,7 @@ class ProtectedAllocator : public AllocatorDefinition
91 public: 91 public:
92 92
93 // we are not like our base class: we can't be created inside a full userdata (or we would have to install a metatable and __gc handler to destroy ourselves properly) 93 // we are not like our base class: we can't be created inside a full userdata (or we would have to install a metatable and __gc handler to destroy ourselves properly)
94 static void* operator new(size_t size_, lua_State* L) noexcept = delete; 94 [[nodiscard]] static void* operator new(size_t size_, lua_State* L) noexcept = delete;
95 static void operator delete(void* p_, lua_State* L) = delete; 95 static void operator delete(void* p_, lua_State* L) = delete;
96 96
97 AllocatorDefinition makeDefinition() 97 AllocatorDefinition makeDefinition()
@@ -119,9 +119,17 @@ class ProtectedAllocator : public AllocatorDefinition
119 119
120// everything regarding the Lanes universe is stored in that global structure 120// everything regarding the Lanes universe is stored in that global structure
121// held as a full userdata in the master Lua state that required it for the first time 121// held as a full userdata in the master Lua state that required it for the first time
122// don't forget to initialize all members in LG_configure() 122class Universe
123struct Universe
124{ 123{
124 public:
125
126#ifdef PLATFORM_LINUX
127 // Linux needs to check, whether it's been run as root
128 bool const m_sudo{ geteuid() == 0 };
129#else
130 bool const m_sudo{ false };
131#endif // PLATFORM_LINUX
132
125 // for verbose errors 133 // for verbose errors
126 bool verboseErrors{ false }; 134 bool verboseErrors{ false };
127 135
@@ -155,20 +163,28 @@ struct Universe
155 // require() serialization 163 // require() serialization
156 std::recursive_mutex require_cs; 164 std::recursive_mutex require_cs;
157 165
166 // metatable unique identifiers
158 std::atomic<lua_Integer> next_mt_id{ 1 }; 167 std::atomic<lua_Integer> next_mt_id{ 1 };
159 168
160#if USE_DEBUG_SPEW() 169#if USE_DEBUG_SPEW()
161 int debugspew_indent_depth{ 0 }; 170 std::atomic<int> debugspew_indent_depth{ 0 };
162#endif // USE_DEBUG_SPEW() 171#endif // USE_DEBUG_SPEW()
163 172
164 Lane* volatile selfdestruct_first{ nullptr }; 173 Lane* volatile selfdestruct_first{ nullptr };
165 // After a lane has removed itself from the chain, it still performs some processing. 174 // After a lane has removed itself from the chain, it still performs some processing.
166 // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads 175 // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads
167 int volatile selfdestructing_count{ 0 }; 176 std::atomic<int> selfdestructing_count{ 0 };
177
178 Universe();
179 ~Universe() = default;
180 Universe(Universe const&) = delete;
181 Universe(Universe&&) = delete;
182 Universe& operator=(Universe const&) = delete;
183 Universe& operator=(Universe&&) = delete;
168}; 184};
169 185
170// ################################################################################################ 186// ################################################################################################
171 187
172Universe* universe_get(lua_State* L); 188[[nodiscard]] Universe* universe_get(lua_State* L);
173Universe* universe_create(lua_State* L); 189[[nodiscard]] Universe* universe_create(lua_State* L);
174void universe_store(lua_State* L, Universe* U); 190void universe_store(lua_State* L, Universe* U);