diff options
Diffstat (limited to 'src/universe.h')
-rw-r--r-- | src/universe.h | 60 |
1 files changed, 30 insertions, 30 deletions
diff --git a/src/universe.h b/src/universe.h index c6c9c03..b2107af 100644 --- a/src/universe.h +++ b/src/universe.h | |||
@@ -30,8 +30,8 @@ class Lane; | |||
30 | class AllocatorDefinition | 30 | class AllocatorDefinition |
31 | { | 31 | { |
32 | public: | 32 | public: |
33 | lua_Alloc m_allocF{ nullptr }; | 33 | lua_Alloc allocF{ nullptr }; |
34 | void* m_allocUD{ nullptr }; | 34 | void* allocUD{ nullptr }; |
35 | 35 | ||
36 | [[nodiscard]] static void* operator new(size_t size_) noexcept = delete; // can't create one outside of a Lua state | 36 | [[nodiscard]] static void* operator new(size_t size_) noexcept = delete; // can't create one outside of a Lua state |
37 | [[nodiscard]] static void* operator new(size_t size_, lua_State* L_) noexcept { return lua_newuserdatauv(L_, size_, 0); } | 37 | [[nodiscard]] static void* operator new(size_t size_, lua_State* L_) noexcept { return lua_newuserdatauv(L_, size_, 0); } |
@@ -40,8 +40,8 @@ class AllocatorDefinition | |||
40 | static void operator delete([[maybe_unused]] void* p_, lua_State* L_) { LUA_ASSERT(L_, !"should never be called"); } | 40 | static void operator delete([[maybe_unused]] void* p_, lua_State* L_) { LUA_ASSERT(L_, !"should never be called"); } |
41 | 41 | ||
42 | AllocatorDefinition(lua_Alloc allocF_, void* allocUD_) noexcept | 42 | AllocatorDefinition(lua_Alloc allocF_, void* allocUD_) noexcept |
43 | : m_allocF{ allocF_ } | 43 | : allocF{ allocF_ } |
44 | , m_allocUD{ allocUD_ } | 44 | , allocUD{ allocUD_ } |
45 | { | 45 | { |
46 | } | 46 | } |
47 | AllocatorDefinition() = default; | 47 | AllocatorDefinition() = default; |
@@ -52,22 +52,22 @@ class AllocatorDefinition | |||
52 | 52 | ||
53 | void initFrom(lua_State* L_) | 53 | void initFrom(lua_State* L_) |
54 | { | 54 | { |
55 | m_allocF = lua_getallocf(L_, &m_allocUD); | 55 | allocF = lua_getallocf(L_, &allocUD); |
56 | } | 56 | } |
57 | 57 | ||
58 | void* lua_alloc(void* ptr_, size_t osize_, size_t nsize_) | 58 | void* lua_alloc(void* ptr_, size_t osize_, size_t nsize_) |
59 | { | 59 | { |
60 | m_allocF(m_allocUD, ptr_, osize_, nsize_); | 60 | allocF(allocUD, ptr_, osize_, nsize_); |
61 | } | 61 | } |
62 | 62 | ||
63 | void* alloc(size_t nsize_) | 63 | void* alloc(size_t nsize_) |
64 | { | 64 | { |
65 | return m_allocF(m_allocUD, nullptr, 0, nsize_); | 65 | return allocF(allocUD, nullptr, 0, nsize_); |
66 | } | 66 | } |
67 | 67 | ||
68 | void free(void* ptr_, size_t osize_) | 68 | void free(void* ptr_, size_t osize_) |
69 | { | 69 | { |
70 | std::ignore = m_allocF(m_allocUD, ptr_, osize_, 0); | 70 | std::ignore = allocF(allocUD, ptr_, osize_, 0); |
71 | } | 71 | } |
72 | }; | 72 | }; |
73 | 73 | ||
@@ -78,13 +78,13 @@ class ProtectedAllocator | |||
78 | : public AllocatorDefinition | 78 | : public AllocatorDefinition |
79 | { | 79 | { |
80 | private: | 80 | private: |
81 | std::mutex m_lock; | 81 | std::mutex mutex; |
82 | 82 | ||
83 | [[nodiscard]] static void* protected_lua_Alloc(void* ud_, void* ptr_, size_t osize_, size_t nsize_) | 83 | [[nodiscard]] static void* protected_lua_Alloc(void* ud_, void* ptr_, size_t osize_, size_t nsize_) |
84 | { | 84 | { |
85 | ProtectedAllocator* const allocator{ static_cast<ProtectedAllocator*>(ud_) }; | 85 | ProtectedAllocator* const allocator{ static_cast<ProtectedAllocator*>(ud_) }; |
86 | std::lock_guard<std::mutex> guard{ allocator->m_lock }; | 86 | std::lock_guard<std::mutex> guard{ allocator->mutex }; |
87 | return allocator->m_allocF(allocator->m_allocUD, ptr_, osize_, nsize_); | 87 | return allocator->allocF(allocator->allocUD, ptr_, osize_, nsize_); |
88 | } | 88 | } |
89 | 89 | ||
90 | public: | 90 | public: |
@@ -105,9 +105,9 @@ class ProtectedAllocator | |||
105 | void removeFrom(lua_State* L_) | 105 | void removeFrom(lua_State* L_) |
106 | { | 106 | { |
107 | // remove the protected allocator, if any | 107 | // remove the protected allocator, if any |
108 | if (m_allocF != nullptr) { | 108 | if (allocF != nullptr) { |
109 | // install the non-protected allocator | 109 | // install the non-protected allocator |
110 | lua_setallocf(L_, m_allocF, m_allocUD); | 110 | lua_setallocf(L_, allocF, allocUD); |
111 | } | 111 | } |
112 | } | 112 | } |
113 | }; | 113 | }; |
@@ -121,9 +121,9 @@ class Universe | |||
121 | public: | 121 | public: |
122 | #ifdef PLATFORM_LINUX | 122 | #ifdef PLATFORM_LINUX |
123 | // Linux needs to check, whether it's been run as root | 123 | // Linux needs to check, whether it's been run as root |
124 | bool const m_sudo{ geteuid() == 0 }; | 124 | bool const sudo{ geteuid() == 0 }; |
125 | #else | 125 | #else |
126 | bool const m_sudo{ false }; | 126 | bool const sudo{ false }; |
127 | #endif // PLATFORM_LINUX | 127 | #endif // PLATFORM_LINUX |
128 | 128 | ||
129 | // for verbose errors | 129 | // for verbose errors |
@@ -132,44 +132,44 @@ class Universe | |||
132 | bool demoteFullUserdata{ false }; | 132 | bool demoteFullUserdata{ false }; |
133 | 133 | ||
134 | // before a state is created, this function will be called to obtain the allocator | 134 | // before a state is created, this function will be called to obtain the allocator |
135 | lua_CFunction provide_allocator{ nullptr }; | 135 | lua_CFunction provideAllocator{ nullptr }; |
136 | 136 | ||
137 | // after a state is created, this function will be called right after the bases libraries are loaded | 137 | // after a state is created, this function will be called right after the bases libraries are loaded |
138 | lua_CFunction on_state_create_func{ nullptr }; | 138 | lua_CFunction onStateCreateFunc{ nullptr }; |
139 | 139 | ||
140 | // if allocator="protected" is found in the configuration settings, a wrapper allocator will protect all allocator calls with a mutex | 140 | // if allocator="protected" is found in the configuration settings, a wrapper allocator will protect all allocator calls with a mutex |
141 | // contains a mutex and the original allocator definition | 141 | // contains a mutex and the original allocator definition |
142 | ProtectedAllocator protected_allocator; | 142 | ProtectedAllocator protectedAllocator; |
143 | 143 | ||
144 | AllocatorDefinition internal_allocator; | 144 | AllocatorDefinition internalAllocator; |
145 | 145 | ||
146 | Keepers* keepers{ nullptr }; | 146 | Keepers* keepers{ nullptr }; |
147 | 147 | ||
148 | // Initialized by 'init_once_LOCKED()': the deep userdata Linda object | 148 | // Initialized by 'init_once_LOCKED()': the deep userdata Linda object |
149 | // used for timers (each lane will get a proxy to this) | 149 | // used for timers (each lane will get a proxy to this) |
150 | DeepPrelude* timer_deep{ nullptr }; | 150 | DeepPrelude* timerLinda{ nullptr }; |
151 | 151 | ||
152 | #if HAVE_LANE_TRACKING() | 152 | #if HAVE_LANE_TRACKING() |
153 | std::mutex tracking_cs; | 153 | std::mutex trackingMutex; |
154 | Lane* volatile tracking_first{ nullptr }; // will change to TRACKING_END if we want to activate tracking | 154 | Lane* volatile trackingFirst{ nullptr }; // will change to TRACKING_END if we want to activate tracking |
155 | #endif // HAVE_LANE_TRACKING() | 155 | #endif // HAVE_LANE_TRACKING() |
156 | 156 | ||
157 | std::mutex selfdestruct_cs; | 157 | std::mutex selfdestructMutex; |
158 | 158 | ||
159 | // require() serialization | 159 | // require() serialization |
160 | std::recursive_mutex require_cs; | 160 | std::recursive_mutex requireMutex; |
161 | 161 | ||
162 | // metatable unique identifiers | 162 | // metatable unique identifiers |
163 | std::atomic<lua_Integer> next_mt_id{ 1 }; | 163 | std::atomic<lua_Integer> nextMetatableId{ 1 }; |
164 | 164 | ||
165 | #if USE_DEBUG_SPEW() | 165 | #if USE_DEBUG_SPEW() |
166 | std::atomic<int> debugspew_indent_depth{ 0 }; | 166 | std::atomic<int> debugspewIndentDepth{ 0 }; |
167 | #endif // USE_DEBUG_SPEW() | 167 | #endif // USE_DEBUG_SPEW() |
168 | 168 | ||
169 | Lane* volatile selfdestruct_first{ nullptr }; | 169 | Lane* volatile selfdestructFirst{ nullptr }; |
170 | // After a lane has removed itself from the chain, it still performs some processing. | 170 | // After a lane has removed itself from the chain, it still performs some processing. |
171 | // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads | 171 | // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads |
172 | std::atomic<int> selfdestructing_count{ 0 }; | 172 | std::atomic<int> selfdestructingCount{ 0 }; |
173 | 173 | ||
174 | Universe(); | 174 | Universe(); |
175 | ~Universe() = default; | 175 | ~Universe() = default; |
@@ -201,13 +201,13 @@ class DebugSpewIndentScope | |||
201 | : U{ U_ } | 201 | : U{ U_ } |
202 | { | 202 | { |
203 | if (U) | 203 | if (U) |
204 | U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed); | 204 | U->debugspewIndentDepth.fetch_add(1, std::memory_order_relaxed); |
205 | } | 205 | } |
206 | 206 | ||
207 | ~DebugSpewIndentScope() | 207 | ~DebugSpewIndentScope() |
208 | { | 208 | { |
209 | if (U) | 209 | if (U) |
210 | U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed); | 210 | U->debugspewIndentDepth.fetch_sub(1, std::memory_order_relaxed); |
211 | } | 211 | } |
212 | }; | 212 | }; |
213 | #endif // USE_DEBUG_SPEW() | 213 | #endif // USE_DEBUG_SPEW() |