diff options
author | Benoit Germain <bnt.germain@gmail.com> | 2012-12-17 11:29:34 +0100 |
---|---|---|
committer | Benoit Germain <bnt.germain@gmail.com> | 2012-12-17 11:29:34 +0100 |
commit | f741cac68de584f2e16507b2b84fc734ffcc3bb6 (patch) | |
tree | c4ae6002d523b741b4d0794dbb485e6d30cfe3a0 /src/lanes.c | |
parent | 00bfa229100a38687a0b76fbc53adf06d8b04eed (diff) | |
download | lanes-f741cac68de584f2e16507b2b84fc734ffcc3bb6.tar.gz lanes-f741cac68de584f2e16507b2b84fc734ffcc3bb6.tar.bz2 lanes-f741cac68de584f2e16507b2b84fc734ffcc3bb6.zip |
Fixed lane tracking feature
Diffstat (limited to 'src/lanes.c')
-rw-r--r-- | src/lanes.c | 192 |
1 files changed, 128 insertions, 64 deletions
diff --git a/src/lanes.c b/src/lanes.c index 161de59..b8f7eed 100644 --- a/src/lanes.c +++ b/src/lanes.c | |||
@@ -169,6 +169,12 @@ struct s_lane { | |||
169 | // M: sets to non-NULL if facing lane handle '__gc' cycle but the lane | 169 | // M: sets to non-NULL if facing lane handle '__gc' cycle but the lane |
170 | // is still running | 170 | // is still running |
171 | // S: cleans up after itself if non-NULL at lane exit | 171 | // S: cleans up after itself if non-NULL at lane exit |
172 | |||
173 | #if HAVE_LANE_TRACKING | ||
174 | struct s_lane * volatile tracking_next; | ||
175 | #endif // HAVE_LANE_TRACKING | ||
176 | // | ||
177 | // For tracking only | ||
172 | }; | 178 | }; |
173 | 179 | ||
174 | static bool_t cancel_test( lua_State*L ); | 180 | static bool_t cancel_test( lua_State*L ); |
@@ -233,6 +239,91 @@ static bool_t push_registry_table( lua_State*L, void *key, bool_t create ) { | |||
233 | return TRUE; // table pushed | 239 | return TRUE; // table pushed |
234 | } | 240 | } |
235 | 241 | ||
242 | #if HAVE_LANE_TRACKING | ||
243 | |||
244 | static MUTEX_T tracking_cs; | ||
245 | struct s_lane* volatile tracking_first = NULL; // will change to TRACKING_END if we want to activate tracking | ||
246 | |||
247 | // The chain is ended by '(struct s_lane*)(-1)', not NULL: | ||
248 | // 'tracking_first -> ... -> ... -> (-1)' | ||
249 | #define TRACKING_END ((struct s_lane *)(-1)) | ||
250 | |||
251 | /* | ||
252 | * Add the lane to tracking chain; the ones still running at the end of the | ||
253 | * whole process will be cancelled. | ||
254 | */ | ||
255 | static void tracking_add( struct s_lane *s) | ||
256 | { | ||
257 | |||
258 | MUTEX_LOCK( &tracking_cs); | ||
259 | { | ||
260 | assert( s->tracking_next == NULL); | ||
261 | |||
262 | s->tracking_next = tracking_first; | ||
263 | tracking_first = s; | ||
264 | } | ||
265 | MUTEX_UNLOCK( &tracking_cs); | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * A free-running lane has ended; remove it from tracking chain | ||
270 | */ | ||
271 | static bool_t tracking_remove( struct s_lane *s ) | ||
272 | { | ||
273 | bool_t found = FALSE; | ||
274 | MUTEX_LOCK( &tracking_cs); | ||
275 | { | ||
276 | // Make sure (within the MUTEX) that we actually are in the chain | ||
277 | // still (at process exit they will remove us from chain and then | ||
278 | // cancel/kill). | ||
279 | // | ||
280 | if (s->tracking_next != NULL) | ||
281 | { | ||
282 | struct s_lane **ref= (struct s_lane **) &tracking_first; | ||
283 | |||
284 | while( *ref != TRACKING_END) | ||
285 | { | ||
286 | if( *ref == s) | ||
287 | { | ||
288 | *ref = s->tracking_next; | ||
289 | s->tracking_next = NULL; | ||
290 | found = TRUE; | ||
291 | break; | ||
292 | } | ||
293 | ref = (struct s_lane **) &((*ref)->tracking_next); | ||
294 | } | ||
295 | assert( found); | ||
296 | } | ||
297 | } | ||
298 | MUTEX_UNLOCK( &tracking_cs); | ||
299 | return found; | ||
300 | } | ||
301 | |||
302 | #endif // HAVE_LANE_TRACKING | ||
303 | |||
304 | //--- | ||
305 | // low-level cleanup | ||
306 | |||
307 | static void lane_cleanup( struct s_lane* s) | ||
308 | { | ||
309 | // Clean up after a (finished) thread | ||
310 | // | ||
311 | #if THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
312 | SIGNAL_FREE( &s->done_signal); | ||
313 | MUTEX_FREE( &s->done_lock); | ||
314 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
315 | |||
316 | #if HAVE_LANE_TRACKING | ||
317 | if( tracking_first) | ||
318 | { | ||
319 | // Lane was cleaned up, no need to handle at process termination | ||
320 | tracking_remove( s); | ||
321 | } | ||
322 | #endif // HAVE_LANE_TRACKING | ||
323 | |||
324 | free( s); | ||
325 | } | ||
326 | |||
236 | /* | 327 | /* |
237 | * ############################################################################################### | 328 | * ############################################################################################### |
238 | * ############################################ Linda ############################################ | 329 | * ############################################ Linda ############################################ |
@@ -843,27 +934,27 @@ static void linda_id( lua_State*L, char const * const which) | |||
843 | 934 | ||
844 | lua_pushlightuserdata( L, s ); | 935 | lua_pushlightuserdata( L, s ); |
845 | } | 936 | } |
846 | else if (strcmp( which, "delete" )==0) | 937 | else if( strcmp( which, "delete" ) == 0) |
847 | { | 938 | { |
848 | struct s_Keeper *K; | 939 | struct s_Keeper* K; |
849 | struct s_Linda *s= lua_touserdata(L,1); | 940 | struct s_Linda* l= lua_touserdata( L, 1); |
850 | ASSERT_L(s); | 941 | ASSERT_L( l); |
851 | 942 | ||
852 | /* Clean associated structures in the keeper state. | 943 | /* Clean associated structures in the keeper state. |
853 | */ | 944 | */ |
854 | K= keeper_acquire(s); | 945 | K = keeper_acquire( l); |
855 | if( K && K->L) // can be NULL if this happens during main state shutdown (lanes is GC'ed -> no keepers -> no need to cleanup) | 946 | if( K && K->L) // can be NULL if this happens during main state shutdown (lanes is GC'ed -> no keepers -> no need to cleanup) |
856 | { | 947 | { |
857 | keeper_call( K->L, KEEPER_API( clear), L, s, 0 ); | 948 | keeper_call( K->L, KEEPER_API( clear), L, l, 0); |
858 | keeper_release( K); | 949 | keeper_release( K); |
859 | } | 950 | } |
860 | 951 | ||
861 | /* There aren't any lanes waiting on these lindas, since all proxies | 952 | /* There aren't any lanes waiting on these lindas, since all proxies |
862 | * have been gc'ed. Right? | 953 | * have been gc'ed. Right? |
863 | */ | 954 | */ |
864 | SIGNAL_FREE( &s->read_happened ); | 955 | SIGNAL_FREE( &l->read_happened); |
865 | SIGNAL_FREE( &s->write_happened ); | 956 | SIGNAL_FREE( &l->write_happened); |
866 | free(s); | 957 | free( l); |
867 | } | 958 | } |
868 | else if (strcmp( which, "metatable" )==0) | 959 | else if (strcmp( which, "metatable" )==0) |
869 | { | 960 | { |
@@ -1126,12 +1217,12 @@ static MUTEX_T selfdestruct_cs; | |||
1126 | // The chain is ended by '(struct s_lane*)(-1)', not NULL: | 1217 | // The chain is ended by '(struct s_lane*)(-1)', not NULL: |
1127 | // 'selfdestruct_first -> ... -> ... -> (-1)' | 1218 | // 'selfdestruct_first -> ... -> ... -> (-1)' |
1128 | 1219 | ||
1129 | struct s_lane * volatile selfdestruct_first= SELFDESTRUCT_END; | 1220 | struct s_lane* volatile selfdestruct_first = SELFDESTRUCT_END; |
1130 | 1221 | ||
1131 | /* | 1222 | /* |
1132 | * Add the lane to selfdestruct chain; the ones still running at the end of the | 1223 | * Add the lane to selfdestruct chain; the ones still running at the end of the |
1133 | * whole process will be cancelled. | 1224 | * whole process will be cancelled. |
1134 | */ | 1225 | */ |
1135 | static void selfdestruct_add( struct s_lane *s ) { | 1226 | static void selfdestruct_add( struct s_lane *s ) { |
1136 | 1227 | ||
1137 | MUTEX_LOCK( &selfdestruct_cs ); | 1228 | MUTEX_LOCK( &selfdestruct_cs ); |
@@ -1145,8 +1236,8 @@ static void selfdestruct_add( struct s_lane *s ) { | |||
1145 | } | 1236 | } |
1146 | 1237 | ||
1147 | /* | 1238 | /* |
1148 | * A free-running lane has ended; remove it from selfdestruct chain | 1239 | * A free-running lane has ended; remove it from selfdestruct chain |
1149 | */ | 1240 | */ |
1150 | static bool_t selfdestruct_remove( struct s_lane *s ) | 1241 | static bool_t selfdestruct_remove( struct s_lane *s ) |
1151 | { | 1242 | { |
1152 | bool_t found = FALSE; | 1243 | bool_t found = FALSE; |
@@ -1310,11 +1401,7 @@ static int selfdestruct_gc( lua_State*L) | |||
1310 | #endif // THREADAPI == THREADAPI_PTHREAD | 1401 | #endif // THREADAPI == THREADAPI_PTHREAD |
1311 | } | 1402 | } |
1312 | // NO lua_close() in this case because we don't know where execution of the state was interrupted | 1403 | // NO lua_close() in this case because we don't know where execution of the state was interrupted |
1313 | #if THREADWAIT_METHOD == THREADWAIT_CONDVAR | 1404 | lane_cleanup( s); |
1314 | SIGNAL_FREE( &s->done_signal); | ||
1315 | MUTEX_FREE( &s->done_lock); | ||
1316 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
1317 | free( s); | ||
1318 | s = next_s; | 1405 | s = next_s; |
1319 | n++; | 1406 | n++; |
1320 | } | 1407 | } |
@@ -1567,10 +1654,6 @@ LUAG_FUNC( set_debug_threadname) | |||
1567 | return 0; | 1654 | return 0; |
1568 | } | 1655 | } |
1569 | 1656 | ||
1570 | #if HAVE_LANE_TRACKING | ||
1571 | static bool_t GTrackLanes = FALSE; | ||
1572 | #endif // HAVE_LANE_TRACKING | ||
1573 | |||
1574 | //--- | 1657 | //--- |
1575 | static THREAD_RETURN_T THREAD_CALLCONV lane_main( void *vs) | 1658 | static THREAD_RETURN_T THREAD_CALLCONV lane_main( void *vs) |
1576 | { | 1659 | { |
@@ -1579,11 +1662,9 @@ static THREAD_RETURN_T THREAD_CALLCONV lane_main( void *vs) | |||
1579 | lua_State*L= s->L; | 1662 | lua_State*L= s->L; |
1580 | 1663 | ||
1581 | #if HAVE_LANE_TRACKING | 1664 | #if HAVE_LANE_TRACKING |
1582 | if( GTrackLanes) | 1665 | if( tracking_first) |
1583 | { | 1666 | { |
1584 | // If we track lanes, we add them right now to the list so that its traversal hits all known lanes | 1667 | tracking_add( s); |
1585 | // (else we get only the still running lanes for which GC was called, IOW not accessible anymore from a script) | ||
1586 | selfdestruct_add( s); | ||
1587 | } | 1668 | } |
1588 | #endif // HAVE_LANE_TRACKING | 1669 | #endif // HAVE_LANE_TRACKING |
1589 | 1670 | ||
@@ -1684,11 +1765,7 @@ static THREAD_RETURN_T THREAD_CALLCONV lane_main( void *vs) | |||
1684 | lua_close( s->L ); | 1765 | lua_close( s->L ); |
1685 | s->L = L = 0; | 1766 | s->L = L = 0; |
1686 | 1767 | ||
1687 | #if THREADWAIT_METHOD == THREADWAIT_CONDVAR | 1768 | lane_cleanup( s); |
1688 | SIGNAL_FREE( &s->done_signal); | ||
1689 | MUTEX_FREE( &s->done_lock); | ||
1690 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
1691 | free(s); | ||
1692 | 1769 | ||
1693 | } | 1770 | } |
1694 | else | 1771 | else |
@@ -1955,6 +2032,9 @@ LUAG_FUNC( thread_new ) | |||
1955 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | 2032 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR |
1956 | s->mstatus= NORMAL; | 2033 | s->mstatus= NORMAL; |
1957 | s->selfdestruct_next= NULL; | 2034 | s->selfdestruct_next= NULL; |
2035 | #if HAVE_LANE_TRACKING | ||
2036 | s->tracking_next = NULL; | ||
2037 | #endif // HAVE_LANE_TRACKING | ||
1958 | 2038 | ||
1959 | // Set metatable for the userdata | 2039 | // Set metatable for the userdata |
1960 | // | 2040 | // |
@@ -2024,14 +2104,9 @@ LUAG_FUNC( thread_gc) | |||
2024 | } | 2104 | } |
2025 | else if( s->status < DONE) | 2105 | else if( s->status < DONE) |
2026 | { | 2106 | { |
2027 | #if HAVE_LANE_TRACKING | 2107 | // still running: will have to be cleaned up later |
2028 | if( !GTrackLanes) | 2108 | selfdestruct_add( s); |
2029 | #endif // HAVE_LANE_TRACKING | 2109 | assert( s->selfdestruct_next); |
2030 | { | ||
2031 | // still running: will have to be cleaned up later | ||
2032 | selfdestruct_add( s); | ||
2033 | assert( s->selfdestruct_next); | ||
2034 | } | ||
2035 | return 0; | 2110 | return 0; |
2036 | 2111 | ||
2037 | } | 2112 | } |
@@ -2042,22 +2117,8 @@ LUAG_FUNC( thread_gc) | |||
2042 | s->L = 0; | 2117 | s->L = 0; |
2043 | } | 2118 | } |
2044 | 2119 | ||
2045 | #if HAVE_LANE_TRACKING | ||
2046 | if( GTrackLanes) | ||
2047 | { | ||
2048 | // Lane was cleaned up, no need to handle at process termination | ||
2049 | selfdestruct_remove( s); | ||
2050 | } | ||
2051 | #endif // HAVE_LANE_TRACKING | ||
2052 | |||
2053 | // Clean up after a (finished) thread | 2120 | // Clean up after a (finished) thread |
2054 | // | 2121 | lane_cleanup( s); |
2055 | #if THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
2056 | SIGNAL_FREE( &s->done_signal); | ||
2057 | MUTEX_FREE( &s->done_lock); | ||
2058 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
2059 | |||
2060 | free( s); | ||
2061 | return 0; | 2122 | return 0; |
2062 | } | 2123 | } |
2063 | 2124 | ||
@@ -2381,12 +2442,12 @@ LUAG_FUNC( threads) | |||
2381 | int const top = lua_gettop( L); | 2442 | int const top = lua_gettop( L); |
2382 | // List _all_ still running threads | 2443 | // List _all_ still running threads |
2383 | // | 2444 | // |
2384 | MUTEX_LOCK( &selfdestruct_cs); | 2445 | MUTEX_LOCK( &tracking_cs); |
2385 | if( selfdestruct_first != SELFDESTRUCT_END) | 2446 | if( tracking_first && tracking_first != TRACKING_END) |
2386 | { | 2447 | { |
2387 | struct s_lane* s = selfdestruct_first; | 2448 | struct s_lane* s = tracking_first; |
2388 | lua_newtable( L); // {} | 2449 | lua_newtable( L); // {} |
2389 | while( s != SELFDESTRUCT_END) | 2450 | while( s != TRACKING_END) |
2390 | { | 2451 | { |
2391 | if( s->debug_name) | 2452 | if( s->debug_name) |
2392 | lua_pushstring( L, s->debug_name); // {} "name" | 2453 | lua_pushstring( L, s->debug_name); // {} "name" |
@@ -2394,10 +2455,10 @@ LUAG_FUNC( threads) | |||
2394 | lua_pushfstring( L, "Lane %p", s); // {} "name" | 2455 | lua_pushfstring( L, "Lane %p", s); // {} "name" |
2395 | push_thread_status( L, s); // {} "name" "status" | 2456 | push_thread_status( L, s); // {} "name" "status" |
2396 | lua_rawset( L, -3); // {} | 2457 | lua_rawset( L, -3); // {} |
2397 | s = s->selfdestruct_next; | 2458 | s = s->tracking_next; |
2398 | } | 2459 | } |
2399 | } | 2460 | } |
2400 | MUTEX_UNLOCK( &selfdestruct_cs); | 2461 | MUTEX_UNLOCK( &tracking_cs); |
2401 | return lua_gettop( L) - top; | 2462 | return lua_gettop( L) - top; |
2402 | } | 2463 | } |
2403 | #endif // HAVE_LANE_TRACKING | 2464 | #endif // HAVE_LANE_TRACKING |
@@ -2499,7 +2560,7 @@ static void init_once_LOCKED( lua_State* L, volatile DEEP_PRELUDE** timer_deep_r | |||
2499 | #endif | 2560 | #endif |
2500 | 2561 | ||
2501 | #if HAVE_LANE_TRACKING | 2562 | #if HAVE_LANE_TRACKING |
2502 | GTrackLanes = _track_lanes; | 2563 | tracking_first = _track_lanes ? TRACKING_END : NULL; |
2503 | #endif // HAVE_LANE_TRACKING | 2564 | #endif // HAVE_LANE_TRACKING |
2504 | 2565 | ||
2505 | // Locks for 'tools.c' inc/dec counters | 2566 | // Locks for 'tools.c' inc/dec counters |
@@ -2513,9 +2574,12 @@ static void init_once_LOCKED( lua_State* L, volatile DEEP_PRELUDE** timer_deep_r | |||
2513 | 2574 | ||
2514 | serialize_require( L ); | 2575 | serialize_require( L ); |
2515 | 2576 | ||
2516 | // Selfdestruct chain handling | 2577 | // Linked chains handling |
2517 | // | 2578 | // |
2518 | MUTEX_INIT( &selfdestruct_cs ); | 2579 | MUTEX_INIT( &selfdestruct_cs ); |
2580 | #if HAVE_LANE_TRACKING | ||
2581 | MUTEX_INIT( &tracking_cs); | ||
2582 | #endif // HAVE_LANE_TRACKING | ||
2519 | 2583 | ||
2520 | //--- | 2584 | //--- |
2521 | // Linux needs SCHED_RR to change thread priorities, and that is only | 2585 | // Linux needs SCHED_RR to change thread priorities, and that is only |