aboutsummaryrefslogtreecommitdiff
path: root/src/tools.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/tools.cpp')
-rw-r--r--src/tools.cpp18
1 files changed, 5 insertions, 13 deletions
diff --git a/src/tools.cpp b/src/tools.cpp
index 1e38144..68846ba 100644
--- a/src/tools.cpp
+++ b/src/tools.cpp
@@ -174,9 +174,9 @@ static void* protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsiz
174{ 174{
175 void* p; 175 void* p;
176 ProtectedAllocator* s = (ProtectedAllocator*) ud; 176 ProtectedAllocator* s = (ProtectedAllocator*) ud;
177 MUTEX_LOCK( &s->lock); 177 s->lock.lock();
178 p = s->definition.allocF( s->definition.allocUD, ptr, osize, nsize); 178 p = s->definition.allocF( s->definition.allocUD, ptr, osize, nsize);
179 MUTEX_UNLOCK( &s->lock); 179 s->lock.unlock();
180 return p; 180 return p;
181} 181}
182 182
@@ -214,9 +214,7 @@ void initialize_allocator_function( Universe* U, lua_State* L)
214 } 214 }
215 else if( lua_type( L, -1) == LUA_TSTRING) // should be "protected" 215 else if( lua_type( L, -1) == LUA_TSTRING) // should be "protected"
216 { 216 {
217 // initialize all we need for the protected allocator 217 // set the original allocator to call from inside protection by the mutex
218 MUTEX_INIT( &U->protected_allocator.lock); // the mutex
219 // and the original allocator to call from inside protection by the mutex
220 U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); 218 U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD);
221 // before a state is created, this function will be called to obtain the allocator 219 // before a state is created, this function will be called to obtain the allocator
222 U->provide_allocator = luaG_provide_protected_allocator; 220 U->provide_allocator = luaG_provide_protected_allocator;
@@ -226,8 +224,6 @@ void initialize_allocator_function( Universe* U, lua_State* L)
226 } 224 }
227 else 225 else
228 { 226 {
229 // initialize the mutex even if we are not going to use it, because cleanup_allocator_function will deinitialize it
230 MUTEX_INIT( &U->protected_allocator.lock);
231 // just grab whatever allocator was provided to lua_newstate 227 // just grab whatever allocator was provided to lua_newstate
232 U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); 228 U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD);
233 } 229 }
@@ -258,8 +254,6 @@ void cleanup_allocator_function( Universe* U, lua_State* L)
258 { 254 {
259 // install the non-protected allocator 255 // install the non-protected allocator
260 lua_setallocf( L, U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD); 256 lua_setallocf( L, U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD);
261 // release the mutex
262 MUTEX_FREE( &U->protected_allocator.lock);
263 } 257 }
264} 258}
265 259
@@ -645,15 +639,13 @@ static lua_Integer get_mt_id( Universe* U, lua_State* L, int i)
645 lua_pushvalue( L, i); // ... _R[REG_MTID] {mt} 639 lua_pushvalue( L, i); // ... _R[REG_MTID] {mt}
646 lua_rawget( L, -2); // ... _R[REG_MTID] mtk? 640 lua_rawget( L, -2); // ... _R[REG_MTID] mtk?
647 641
648 id = lua_tointeger( L, -1); // 0 for nil 642 id = lua_tointeger( L, -1); // 0 for nil
649 lua_pop( L, 1); // ... _R[REG_MTID] 643 lua_pop( L, 1); // ... _R[REG_MTID]
650 STACK_CHECK( L, 1); 644 STACK_CHECK( L, 1);
651 645
652 if( id == 0) 646 if( id == 0)
653 { 647 {
654 MUTEX_LOCK( &U->mtid_lock); 648 id = U->last_mt_id.fetch_add(1, std::memory_order_relaxed);
655 id = ++ U->last_mt_id;
656 MUTEX_UNLOCK( &U->mtid_lock);
657 649
658 /* Create two-way references: id_uint <-> table 650 /* Create two-way references: id_uint <-> table
659 */ 651 */