diff options
Diffstat (limited to '')
-rw-r--r-- | src/lanes.c | 68 |
1 files changed, 11 insertions, 57 deletions
diff --git a/src/lanes.c b/src/lanes.c index c8e012c..ccb32c0 100644 --- a/src/lanes.c +++ b/src/lanes.c | |||
@@ -580,25 +580,6 @@ static bool_t selfdestruct_remove( Lane* s) | |||
580 | } | 580 | } |
581 | 581 | ||
582 | /* | 582 | /* |
583 | ** mutex-protected allocator for use with Lua states that have non-threadsafe allocators (such as LuaJIT) | ||
584 | */ | ||
585 | struct ProtectedAllocator_s | ||
586 | { | ||
587 | lua_Alloc allocF; | ||
588 | void* allocUD; | ||
589 | MUTEX_T lock; | ||
590 | }; | ||
591 | void * protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize) | ||
592 | { | ||
593 | void* p; | ||
594 | struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) ud; | ||
595 | MUTEX_LOCK( &s->lock); | ||
596 | p = s->allocF( s->allocUD, ptr, osize, nsize); | ||
597 | MUTEX_UNLOCK( &s->lock); | ||
598 | return p; | ||
599 | } | ||
600 | |||
601 | /* | ||
602 | * Process end; cancel any still free-running threads | 583 | * Process end; cancel any still free-running threads |
603 | */ | 584 | */ |
604 | static int selfdestruct_gc( lua_State* L) | 585 | static int selfdestruct_gc( lua_State* L) |
@@ -679,15 +660,9 @@ static int selfdestruct_gc( lua_State* L) | |||
679 | 660 | ||
680 | // If some lanes are currently cleaning after themselves, wait until they are done. | 661 | // If some lanes are currently cleaning after themselves, wait until they are done. |
681 | // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). | 662 | // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). |
663 | while( U->selfdestructing_count > 0) | ||
682 | { | 664 | { |
683 | bool_t again = TRUE; | 665 | YIELD(); |
684 | do | ||
685 | { | ||
686 | MUTEX_LOCK( &U->selfdestruct_cs); | ||
687 | again = (U->selfdestructing_count > 0) ? TRUE : FALSE; | ||
688 | MUTEX_UNLOCK( &U->selfdestruct_cs); | ||
689 | YIELD(); | ||
690 | } while( again); | ||
691 | } | 666 | } |
692 | 667 | ||
693 | //--- | 668 | //--- |
@@ -727,6 +702,13 @@ static int selfdestruct_gc( lua_State* L) | |||
727 | } | 702 | } |
728 | } | 703 | } |
729 | 704 | ||
705 | // If some lanes are currently cleaning after themselves, wait until they are done. | ||
706 | // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). | ||
707 | while( U->selfdestructing_count > 0) | ||
708 | { | ||
709 | YIELD(); | ||
710 | } | ||
711 | |||
730 | // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 | 712 | // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 |
731 | lua_settop( L, 0); | 713 | lua_settop( L, 0); |
732 | // no need to mutex-protect this as all threads in the universe are gone at that point | 714 | // no need to mutex-protect this as all threads in the universe are gone at that point |
@@ -740,18 +722,7 @@ static int selfdestruct_gc( lua_State* L) | |||
740 | close_keepers( U, L); | 722 | close_keepers( U, L); |
741 | 723 | ||
742 | // remove the protected allocator, if any | 724 | // remove the protected allocator, if any |
743 | { | 725 | cleanup_allocator_function( U, L); |
744 | void* ud; | ||
745 | lua_Alloc allocF = lua_getallocf( L, &ud); | ||
746 | |||
747 | if( allocF == protected_lua_Alloc) | ||
748 | { | ||
749 | struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) ud; | ||
750 | lua_setallocf( L, s->allocF, s->allocUD); | ||
751 | MUTEX_FREE( &s->lock); | ||
752 | s->allocF( s->allocUD, s, sizeof( struct ProtectedAllocator_s), 0); | ||
753 | } | ||
754 | } | ||
755 | 726 | ||
756 | #if HAVE_LANE_TRACKING | 727 | #if HAVE_LANE_TRACKING |
757 | MUTEX_FREE( &U->tracking_cs); | 728 | MUTEX_FREE( &U->tracking_cs); |
@@ -2097,24 +2068,6 @@ LUAG_FUNC( configure) | |||
2097 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); | 2068 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); |
2098 | DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth); | 2069 | DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth); |
2099 | 2070 | ||
2100 | lua_getfield( L, 1, "protect_allocator"); // settings protect_allocator | ||
2101 | if( lua_toboolean( L, -1)) | ||
2102 | { | ||
2103 | void* allocUD; | ||
2104 | lua_Alloc allocF = lua_getallocf( L, &allocUD); | ||
2105 | if( allocF != protected_lua_Alloc) // just in case | ||
2106 | { | ||
2107 | struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) allocF( allocUD, NULL, 0, sizeof( struct ProtectedAllocator_s)); | ||
2108 | s->allocF = allocF; | ||
2109 | s->allocUD = allocUD; | ||
2110 | MUTEX_INIT( &s->lock); | ||
2111 | lua_setallocf( L, protected_lua_Alloc, s); | ||
2112 | } | ||
2113 | } | ||
2114 | lua_pop( L, 1); // settings | ||
2115 | STACK_MID( L, 1); | ||
2116 | |||
2117 | // grab or create the universe | ||
2118 | if( U == NULL) | 2071 | if( U == NULL) |
2119 | { | 2072 | { |
2120 | U = universe_create( L); // settings universe | 2073 | U = universe_create( L); // settings universe |
@@ -2144,6 +2097,7 @@ LUAG_FUNC( configure) | |||
2144 | MUTEX_INIT( &U->deep_lock); | 2097 | MUTEX_INIT( &U->deep_lock); |
2145 | MUTEX_INIT( &U->mtid_lock); | 2098 | MUTEX_INIT( &U->mtid_lock); |
2146 | U->selfdestruct_first = SELFDESTRUCT_END; | 2099 | U->selfdestruct_first = SELFDESTRUCT_END; |
2100 | initialize_allocator_function( U, L); | ||
2147 | initialize_on_state_create( U, L); | 2101 | initialize_on_state_create( U, L); |
2148 | init_keepers( U, L); | 2102 | init_keepers( U, L); |
2149 | STACK_MID( L, 1); | 2103 | STACK_MID( L, 1); |