aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorBenoit Germain <bnt.germain@gmail.com>2013-03-13 20:48:01 +0100
committerBenoit Germain <bnt.germain@gmail.com>2013-03-13 20:48:01 +0100
commit8fb8dc1edeceae9fff65463cd80da05d8995fb7f (patch)
tree290c3e7010d0f5a4674fa29141c0978b045e92c4 /src
parent6261eab8d81a6901201cb711283da6281db193ec (diff)
downloadlanes-8fb8dc1edeceae9fff65463cd80da05d8995fb7f.tar.gz
lanes-8fb8dc1edeceae9fff65463cd80da05d8995fb7f.tar.bz2
lanes-8fb8dc1edeceae9fff65463cd80da05d8995fb7f.zip
version 3.5.2
* stricter validation of with_timers config option: validator was accepting any non-boolean value * new configuration option protect_allocator for VMs with thread unsafe allocators (such as LuaJIT) * removed some obsolete bits of dead code
Diffstat (limited to 'src')
-rw-r--r--src/lanes.c467
-rw-r--r--src/lanes.lua12
2 files changed, 252 insertions, 227 deletions
diff --git a/src/lanes.c b/src/lanes.c
index f792293..d058cde 100644
--- a/src/lanes.c
+++ b/src/lanes.c
@@ -52,7 +52,7 @@
52 * ... 52 * ...
53 */ 53 */
54 54
55char const* VERSION = "3.5.1"; 55char const* VERSION = "3.5.2";
56 56
57/* 57/*
58=============================================================================== 58===============================================================================
@@ -1275,150 +1275,156 @@ static bool_t selfdestruct_remove( struct s_lane *s )
1275volatile DEEP_PRELUDE* timer_deep; // = NULL 1275volatile DEEP_PRELUDE* timer_deep; // = NULL
1276 1276
1277/* 1277/*
1278** mutex-protected allocator for use with Lua states that have non-threadsafe allocators (such as LuaJIT)
1279*/
1280struct ProtectedAllocator_s
1281{
1282 lua_Alloc allocf;
1283 void* ud;
1284 MUTEX_T lock;
1285};
1286void * protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize)
1287{
1288 void* p;
1289 struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) ud;
1290 MUTEX_LOCK( &s->lock);
1291 p = s->allocf( s->ud, ptr, osize, nsize);
1292 MUTEX_UNLOCK( &s->lock);
1293 return p;
1294}
1295
1296/*
1278* Process end; cancel any still free-running threads 1297* Process end; cancel any still free-running threads
1279*/ 1298*/
1280static int selfdestruct_gc( lua_State*L) 1299static int selfdestruct_gc( lua_State* L)
1281{ 1300{
1282 (void)L; // unused 1301 while( selfdestruct_first != SELFDESTRUCT_END) // true at most once!
1283 if (selfdestruct_first == SELFDESTRUCT_END) return 0; // no free-running threads 1302 {
1303 // Signal _all_ still running threads to exit (including the timer thread)
1304 //
1305 MUTEX_LOCK( &selfdestruct_cs );
1306 {
1307 struct s_lane* s = selfdestruct_first;
1308 while( s != SELFDESTRUCT_END )
1309 {
1310 // attempt a regular unforced cancel with a small timeout
1311 bool_t cancelled = THREAD_ISNULL( s->thread) || thread_cancel( s, 0.0001, FALSE);
1312 // if we failed, and we know the thread is waiting on a linda
1313 if( cancelled == FALSE && s->status == WAITING && s->waiting_on != NULL)
1314 {
1315 // signal the linda the wake up the thread so that it can react to the cancel query
1316 // let us hope we never land here with a pointer on a linda that has been destroyed...
1317 SIGNAL_T *waiting_on = s->waiting_on;
1318 //s->waiting_on = NULL; // useful, or not?
1319 SIGNAL_ALL( waiting_on);
1320 }
1321 s = s->selfdestruct_next;
1322 }
1323 }
1324 MUTEX_UNLOCK( &selfdestruct_cs );
1284 1325
1285 // Signal _all_ still running threads to exit (including the timer thread) 1326 // When noticing their cancel, the lanes will remove themselves from
1286 // 1327 // the selfdestruct chain.
1287 MUTEX_LOCK( &selfdestruct_cs );
1288 {
1289 struct s_lane *s= selfdestruct_first;
1290 while( s != SELFDESTRUCT_END )
1291 {
1292 // attempt a regular unforced cancel with a small timeout
1293 bool_t cancelled = THREAD_ISNULL( s->thread) || thread_cancel( s, 0.0001, FALSE);
1294 // if we failed, and we know the thread is waiting on a linda
1295 if( cancelled == FALSE && s->status == WAITING && s->waiting_on != NULL)
1296 {
1297 // signal the linda the wake up the thread so that it can react to the cancel query
1298 // let us hope we never land here with a pointer on a linda that has been destroyed...
1299 SIGNAL_T *waiting_on = s->waiting_on;
1300 //s->waiting_on = NULL; // useful, or not?
1301 SIGNAL_ALL( waiting_on);
1302 }
1303 s = s->selfdestruct_next;
1304 }
1305 }
1306 MUTEX_UNLOCK( &selfdestruct_cs );
1307 1328
1308 // When noticing their cancel, the lanes will remove themselves from 1329 // TBD: Not sure if Windows (multi core) will require the timed approach,
1309 // the selfdestruct chain. 1330 // or single Yield. I don't have machine to test that (so leaving
1310 1331 // for timed approach). -- AKa 25-Oct-2008
1311 // TBD: Not sure if Windows (multi core) will require the timed approach,
1312 // or single Yield. I don't have machine to test that (so leaving
1313 // for timed approach). -- AKa 25-Oct-2008
1314
1315#if 0 // def PLATFORM_LINUX
1316 // It seems enough for Linux to have a single yield here, which allows
1317 // other threads (timer lane) to proceed. Without the yield, there is
1318 // segfault.
1319 //
1320 YIELD();
1321#else
1322 // OS X 10.5 (Intel) needs more to avoid segfaults.
1323 //
1324 // "make test" is okay. 100's of "make require" are okay.
1325 //
1326 // Tested on MacBook Core Duo 2GHz and 10.5.5:
1327 // -- AKa 25-Oct-2008
1328 //
1329 {
1330 lua_Number const shutdown_timeout = lua_tonumber( L, lua_upvalueindex( 1));
1331 double const t_until = now_secs() + shutdown_timeout;
1332 1332
1333 while( selfdestruct_first != SELFDESTRUCT_END ) 1333 // OS X 10.5 (Intel) needs more to avoid segfaults.
1334 { 1334 //
1335 YIELD(); // give threads time to act on their cancel 1335 // "make test" is okay. 100's of "make require" are okay.
1336 { 1336 //
1337 // count the number of cancelled thread that didn't have the time to act yet 1337 // Tested on MacBook Core Duo 2GHz and 10.5.5:
1338 int n = 0; 1338 // -- AKa 25-Oct-2008
1339 double t_now = 0.0; 1339 //
1340 MUTEX_LOCK( &selfdestruct_cs ); 1340 {
1341 { 1341 lua_Number const shutdown_timeout = lua_tonumber( L, lua_upvalueindex( 1));
1342 struct s_lane *s = selfdestruct_first; 1342 double const t_until = now_secs() + shutdown_timeout;
1343 while( s != SELFDESTRUCT_END)
1344 {
1345 if( s->cancel_request)
1346 ++ n;
1347 s = s->selfdestruct_next;
1348 }
1349 }
1350 MUTEX_UNLOCK( &selfdestruct_cs );
1351 // if timeout elapsed, or we know all threads have acted, stop waiting
1352 t_now = now_secs();
1353 if( n == 0 || ( t_now >= t_until))
1354 {
1355 DEBUGSPEW_CODE( fprintf( stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now)));
1356 break;
1357 }
1358 }
1359 }
1360 }
1361#endif
1362 1343
1363 //--- 1344 while( selfdestruct_first != SELFDESTRUCT_END)
1364 // Kill the still free running threads 1345 {
1365 // 1346 YIELD(); // give threads time to act on their cancel
1366 if ( selfdestruct_first != SELFDESTRUCT_END ) { 1347 {
1367 unsigned n=0; 1348 // count the number of cancelled thread that didn't have the time to act yet
1368#if 0 1349 int n = 0;
1369 MUTEX_LOCK( &selfdestruct_cs ); 1350 double t_now = 0.0;
1370 { 1351 MUTEX_LOCK( &selfdestruct_cs);
1371 struct s_lane *s= selfdestruct_first; 1352 {
1372 while( s != SELFDESTRUCT_END ) { 1353 struct s_lane* s = selfdestruct_first;
1373 n++; 1354 while( s != SELFDESTRUCT_END)
1374 s= s->selfdestruct_next; 1355 {
1375 } 1356 if( s->cancel_request)
1376 } 1357 ++ n;
1377 MUTEX_UNLOCK( &selfdestruct_cs ); 1358 s = s->selfdestruct_next;
1359 }
1360 }
1361 MUTEX_UNLOCK( &selfdestruct_cs);
1362 // if timeout elapsed, or we know all threads have acted, stop waiting
1363 t_now = now_secs();
1364 if( n == 0 || ( t_now >= t_until))
1365 {
1366 DEBUGSPEW_CODE( fprintf( stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now)));
1367 break;
1368 }
1369 }
1370 }
1371 }
1378 1372
1379 // Linux (at least 64-bit): CAUSES A SEGFAULT IF THIS BLOCK IS ENABLED 1373 //---
1380 // and works without the block (so let's leave those lanes running) 1374 // Kill the still free running threads
1381 // 1375 //
1382//we want to free memory and such when we exit. 1376 if( selfdestruct_first != SELFDESTRUCT_END)
1383 // 2.0.2: at least timer lane is still here 1377 {
1384 // 1378 unsigned int n = 0;
1385 DEBUGSPEW_CODE( fprintf( stderr, "Left %d lane(s) with cancel request at process end.\n", n )); 1379 // first thing we did was to raise the linda signals the threads were waiting on (if any)
1386 n=0; 1380 // therefore, any well-behaved thread should be in CANCELLED state
1387#else 1381 // these are not running, and the state can be closed
1388 // first thing we did was to raise the linda signals the threads were waiting on (if any) 1382 MUTEX_LOCK( &selfdestruct_cs);
1389 // therefore, any well-behaved thread should be in CANCELLED state 1383 {
1390 // these are not running, and the state can be closed 1384 struct s_lane* s= selfdestruct_first;
1391 MUTEX_LOCK( &selfdestruct_cs ); 1385 while( s != SELFDESTRUCT_END)
1392 { 1386 {
1393 struct s_lane *s= selfdestruct_first; 1387 struct s_lane* next_s = s->selfdestruct_next;
1394 while( s != SELFDESTRUCT_END) 1388 s->selfdestruct_next = NULL; // detach from selfdestruct chain
1395 { 1389 if( !THREAD_ISNULL( s->thread)) // can be NULL if previous 'soft' termination succeeded
1396 struct s_lane *next_s= s->selfdestruct_next; 1390 {
1397 s->selfdestruct_next= NULL; // detach from selfdestruct chain 1391 THREAD_KILL( &s->thread);
1398 if( !THREAD_ISNULL( s->thread)) // can be NULL if previous 'soft' termination succeeded
1399 {
1400 THREAD_KILL( &s->thread);
1401#if THREADAPI == THREADAPI_PTHREAD 1392#if THREADAPI == THREADAPI_PTHREAD
1402 // pthread: make sure the thread is really stopped! 1393 // pthread: make sure the thread is really stopped!
1403 THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status); 1394 THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status);
1404#endif // THREADAPI == THREADAPI_PTHREAD 1395#endif // THREADAPI == THREADAPI_PTHREAD
1405 } 1396 }
1406 // NO lua_close() in this case because we don't know where execution of the state was interrupted 1397 // NO lua_close() in this case because we don't know where execution of the state was interrupted
1407 lane_cleanup( s); 1398 lane_cleanup( s);
1408 s = next_s; 1399 s = next_s;
1409 n++; 1400 ++ n;
1410 } 1401 }
1411 selfdestruct_first= SELFDESTRUCT_END; 1402 selfdestruct_first = SELFDESTRUCT_END;
1412 } 1403 }
1413 MUTEX_UNLOCK( &selfdestruct_cs ); 1404 MUTEX_UNLOCK( &selfdestruct_cs);
1414 1405
1415 DEBUGSPEW_CODE( fprintf( stderr, "Killed %d lane(s) at process end.\n", n)); 1406 DEBUGSPEW_CODE( fprintf( stderr, "Killed %d lane(s) at process end.\n", n));
1416#endif 1407 }
1417 } 1408 }
1418#if !HAVE_KEEPER_ATEXIT_DESINIT 1409#if !HAVE_KEEPER_ATEXIT_DESINIT
1419 close_keepers(); 1410 close_keepers();
1420#endif // !HAVE_KEEPER_ATEXIT_DESINIT 1411#endif // !HAVE_KEEPER_ATEXIT_DESINIT
1421 return 0; 1412
1413 // remove the protected allocator, if any
1414 {
1415 void* ud;
1416 lua_Alloc allocf = lua_getallocf( L, &ud);
1417
1418 if( allocf == protected_lua_Alloc)
1419 {
1420 struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) ud;
1421 lua_setallocf( L, s->allocf, s->ud);
1422 MUTEX_FREE( &s->lock);
1423 s->allocf( s->ud, s, sizeof( struct ProtectedAllocator_s), 0);
1424 }
1425 }
1426
1427 return 0;
1422} 1428}
1423 1429
1424 1430
@@ -2603,110 +2609,121 @@ void register_core_libfuncs_for_keeper( lua_State* L)
2603} 2609}
2604 2610
2605/* 2611/*
2606* One-time initializations 2612** One-time initializations
2607*/ 2613*/
2608static void init_once_LOCKED( lua_State* L, int const _on_state_create, int const nbKeepers, lua_Number _shutdown_timeout, bool_t _track_lanes) 2614static void init_once_LOCKED( lua_State* L, int const _on_state_create, int const nbKeepers, lua_Number _shutdown_timeout, bool_t _track_lanes, bool_t _protect_allocator)
2609{ 2615{
2610 char const* err;
2611
2612#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) 2616#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC)
2613 now_secs(); // initialize 'now_secs()' internal offset 2617 now_secs(); // initialize 'now_secs()' internal offset
2614#endif 2618#endif
2615 2619
2616#if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU) 2620#if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU)
2617 chudInitialize(); 2621 chudInitialize();
2618#endif 2622#endif
2619 2623
2624 if( _protect_allocator)
2625 {
2626 void* ud;
2627 lua_Alloc allocf = lua_getallocf( L, &ud);
2628 struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) allocf( ud, NULL, 0, sizeof( struct ProtectedAllocator_s));
2629 s->allocf = allocf;
2630 s->ud = ud;
2631 MUTEX_INIT( &s->lock);
2632 lua_setallocf( L, protected_lua_Alloc, s);
2633 }
2634
2620#if HAVE_LANE_TRACKING 2635#if HAVE_LANE_TRACKING
2621 tracking_first = _track_lanes ? TRACKING_END : NULL; 2636 tracking_first = _track_lanes ? TRACKING_END : NULL;
2622#endif // HAVE_LANE_TRACKING 2637#endif // HAVE_LANE_TRACKING
2623 2638
2624 // Locks for 'tools.c' inc/dec counters 2639 // Locks for 'tools.c' inc/dec counters
2625 // 2640 //
2626 MUTEX_INIT( &deep_lock ); 2641 MUTEX_INIT( &deep_lock );
2627 MUTEX_INIT( &mtid_lock ); 2642 MUTEX_INIT( &mtid_lock );
2628 2643
2629 // Serialize calls to 'require' from now on, also in the primary state 2644 // Serialize calls to 'require' from now on, also in the primary state
2630 // 2645 //
2631 MUTEX_RECURSIVE_INIT( &require_cs ); 2646 MUTEX_RECURSIVE_INIT( &require_cs );
2632 2647
2633 serialize_require( L); 2648 serialize_require( L);
2634 2649
2635 // Linked chains handling 2650 // Linked chains handling
2636 // 2651 //
2637 MUTEX_INIT( &selfdestruct_cs ); 2652 MUTEX_INIT( &selfdestruct_cs );
2638#if HAVE_LANE_TRACKING 2653#if HAVE_LANE_TRACKING
2639 MUTEX_INIT( &tracking_cs); 2654 MUTEX_INIT( &tracking_cs);
2640#endif // HAVE_LANE_TRACKING 2655#endif // HAVE_LANE_TRACKING
2641 2656
2642 //--- 2657 //---
2643 // Linux needs SCHED_RR to change thread priorities, and that is only 2658 // Linux needs SCHED_RR to change thread priorities, and that is only
2644 // allowed for sudo'ers. SCHED_OTHER (default) has no priorities. 2659 // allowed for sudo'ers. SCHED_OTHER (default) has no priorities.
2645 // SCHED_OTHER threads are always lower priority than SCHED_RR. 2660 // SCHED_OTHER threads are always lower priority than SCHED_RR.
2646 // 2661 //
2647 // ^-- those apply to 2.6 kernel. IF **wishful thinking** these 2662 // ^-- those apply to 2.6 kernel. IF **wishful thinking** these
2648 // constraints will change in the future, non-sudo priorities can 2663 // constraints will change in the future, non-sudo priorities can
2649 // be enabled also for Linux. 2664 // be enabled also for Linux.
2650 // 2665 //
2651#ifdef PLATFORM_LINUX 2666#ifdef PLATFORM_LINUX
2652 sudo= geteuid()==0; // we are root? 2667 sudo= geteuid()==0; // we are root?
2653
2654 // If lower priorities (-2..-1) are wanted, we need to lift the main
2655 // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below
2656 // the launched threads (even -2).
2657 //
2658 #ifdef LINUX_SCHED_RR
2659 if (sudo) {
2660 struct sched_param sp= {0}; sp.sched_priority= _PRIO_0;
2661 PT_CALL( pthread_setschedparam( pthread_self(), SCHED_RR, &sp) );
2662 }
2663 #endif
2664#endif
2665 err = init_keepers( L, _on_state_create, nbKeepers);
2666 if (err)
2667 {
2668 (void) luaL_error( L, "Unable to initialize: %s", err );
2669 }
2670 2668
2671 // Initialize 'timer_deep'; a common Linda object shared by all states 2669 // If lower priorities (-2..-1) are wanted, we need to lift the main
2672 // 2670 // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below
2673 ASSERT_L( timer_deep == NULL); 2671 // the launched threads (even -2).
2672 //
2673#ifdef LINUX_SCHED_RR
2674 if (sudo) {
2675 struct sched_param sp= {0}; sp.sched_priority= _PRIO_0;
2676 PT_CALL( pthread_setschedparam( pthread_self(), SCHED_RR, &sp) );
2677 }
2678#endif // LINUX_SCHED_RR
2679#endif // PLATFORM_LINUX
2680 {
2681 char const* err = init_keepers( L, _on_state_create, nbKeepers);
2682 if (err)
2683 {
2684 (void) luaL_error( L, "Unable to initialize: %s", err );
2685 }
2686 }
2674 2687
2675 STACK_CHECK( L); 2688 // Initialize 'timer_deep'; a common Linda object shared by all states
2676 { 2689 //
2677 // proxy_ud= deep_userdata( idfunc ) 2690 ASSERT_L( timer_deep == NULL);
2678 //
2679 lua_pushliteral( L, "lanes-timer"); // push a name for debug purposes
2680 luaG_deep_userdata( L, linda_id);
2681 STACK_MID( L, 2);
2682 lua_remove( L, -2); // remove the name as we no longer need it
2683 2691
2684 ASSERT_L( lua_isuserdata(L,-1) ); 2692 STACK_CHECK( L);
2685 2693 {
2686 // Proxy userdata contents is only a 'DEEP_PRELUDE*' pointer 2694 // proxy_ud= deep_userdata( idfunc )
2687 // 2695 //
2688 timer_deep = * (DEEP_PRELUDE**) lua_touserdata( L, -1); 2696 lua_pushliteral( L, "lanes-timer"); // push a name for debug purposes
2689 ASSERT_L( timer_deep && (timer_deep->refcount == 1) && timer_deep->deep); 2697 luaG_deep_userdata( L, linda_id);
2698 STACK_MID( L, 2);
2699 lua_remove( L, -2); // remove the name as we no longer need it
2690 2700
2691 // The host Lua state must always have a reference to this Linda object in order for our 'timer_deep_ref' to be valid. 2701 ASSERT_L( lua_isuserdata(L,-1) );
2692 // So store a reference that we will never actually use.
2693 // at the same time, use this object as a 'desinit' marker:
2694 // when the main lua State is closed, this object will be GC'ed
2695 {
2696 lua_newuserdata( L, 1);
2697 lua_newtable( L);
2698 lua_pushnumber( L, _shutdown_timeout);
2699 lua_pushcclosure( L, selfdestruct_gc, 1);
2700 lua_setfield( L, -2, "__gc");
2701 lua_pushliteral( L, "AtExit");
2702 lua_setfield( L, -2, "__metatable");
2703 lua_setmetatable( L, -2);
2704 }
2705 lua_insert( L, -2); // Swap key with the Linda object
2706 lua_rawset( L, LUA_REGISTRYINDEX);
2707 2702
2708 } 2703 // Proxy userdata contents is only a 'DEEP_PRELUDE*' pointer
2709 STACK_END( L, 0); 2704 //
2705 timer_deep = * (DEEP_PRELUDE**) lua_touserdata( L, -1);
2706 ASSERT_L( timer_deep && (timer_deep->refcount == 1) && timer_deep->deep);
2707
2708 // The host Lua state must always have a reference to this Linda object in order for the timer_deep pointer to be valid.
2709 // So store a reference that we will never actually use.
2710 // at the same time, use this object as a 'desinit' marker:
2711 // when the main lua State is closed, this object will be GC'ed
2712 {
2713 lua_newuserdata( L, 1);
2714 lua_newtable( L);
2715 lua_pushnumber( L, _shutdown_timeout);
2716 lua_pushcclosure( L, selfdestruct_gc, 1);
2717 lua_setfield( L, -2, "__gc");
2718 lua_pushliteral( L, "AtExit");
2719 lua_setfield( L, -2, "__metatable");
2720 lua_setmetatable( L, -2);
2721 }
2722 lua_insert( L, -2); // Swap key with the Linda object
2723 lua_rawset( L, LUA_REGISTRYINDEX);
2724
2725 }
2726 STACK_END( L, 0);
2710} 2727}
2711 2728
2712static volatile long s_initCount = 0; 2729static volatile long s_initCount = 0;
@@ -2721,6 +2738,7 @@ LUAG_FUNC( configure)
2721 int const on_state_create = lua_isfunction( L, 2) ? 2 : 0; 2738 int const on_state_create = lua_isfunction( L, 2) ? 2 : 0;
2722 lua_Number shutdown_timeout = lua_tonumber( L, 3); 2739 lua_Number shutdown_timeout = lua_tonumber( L, 3);
2723 bool_t track_lanes = lua_toboolean( L, 4); 2740 bool_t track_lanes = lua_toboolean( L, 4);
2741 bool_t protect_allocator = lua_toboolean( L, 5);
2724 2742
2725 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); 2743 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L));
2726 DEBUGSPEW_CODE( ++ debugspew_indent_depth); 2744 DEBUGSPEW_CODE( ++ debugspew_indent_depth);
@@ -2786,21 +2804,20 @@ LUAG_FUNC( configure)
2786 2804
2787 STACK_MID( L, 1); 2805 STACK_MID( L, 1);
2788 /* 2806 /*
2789 * Making one-time initializations. 2807 ** Making one-time initializations.
2790 * 2808 **
2791 * When the host application is single-threaded (and all threading happens via Lanes) 2809 ** When the host application is single-threaded (and all threading happens via Lanes)
2792 * there is no problem. But if the host is multithreaded, we need to lock around the 2810 ** there is no problem. But if the host is multithreaded, we need to lock around the
2793 * initializations. 2811 ** initializations.
2794 * 2812 ** we must do this after the populate_func_lookup_table is called, else populating the keepers will fail
2795 * we must do this after the populate_func_lookup_table is called, else populating the keepers will fail 2813 ** because this makes a copy of packages.loaders, which requires the lookup tables to exist!
2796 * because this makes a copy of packages.loaders, which requires the lookup tables to exist!
2797 */ 2814 */
2798#if THREADAPI == THREADAPI_WINDOWS 2815#if THREADAPI == THREADAPI_WINDOWS
2799 { 2816 {
2800 static volatile int /*bool*/ go_ahead; // = 0 2817 static volatile int /*bool*/ go_ahead; // = 0
2801 if( InterlockedCompareExchange( &s_initCount, 1, 0) == 0) 2818 if( InterlockedCompareExchange( &s_initCount, 1, 0) == 0)
2802 { 2819 {
2803 init_once_LOCKED( L, on_state_create, nbKeepers, shutdown_timeout, track_lanes); 2820 init_once_LOCKED( L, on_state_create, nbKeepers, shutdown_timeout, track_lanes, protect_allocator);
2804 go_ahead = 1; // let others pass 2821 go_ahead = 1; // let others pass
2805 } 2822 }
2806 else 2823 else
@@ -2818,16 +2835,16 @@ LUAG_FUNC( configure)
2818 // 2835 //
2819 if( s_initCount == 0) 2836 if( s_initCount == 0)
2820 { 2837 {
2821 init_once_LOCKED( L, on_state_create, nbKeepers, shutdown_timeout, track_lanes); 2838 init_once_LOCKED( L, on_state_create, nbKeepers, shutdown_timeout, track_lanes, protect_allocator);
2822 s_initCount = 1; 2839 s_initCount = 1;
2823 } 2840 }
2824 } 2841 }
2825 pthread_mutex_unlock( &my_lock); 2842 pthread_mutex_unlock( &my_lock);
2826 } 2843 }
2827#endif // THREADAPI == THREADAPI_PTHREAD 2844#endif // THREADAPI == THREADAPI_PTHREAD
2828 assert( timer_deep != NULL);
2829 STACK_MID( L, 1); 2845 STACK_MID( L, 1);
2830 2846
2847 assert( timer_deep != NULL);
2831 // init_once_LOCKED initializes timer_deep, so we must do this after, of course 2848 // init_once_LOCKED initializes timer_deep, so we must do this after, of course
2832 luaG_push_proxy( L, linda_id, (DEEP_PRELUDE*) timer_deep); // ... M timer_deep 2849 luaG_push_proxy( L, linda_id, (DEEP_PRELUDE*) timer_deep); // ... M timer_deep
2833 lua_setfield( L, -2, "timer_gateway"); // ... M 2850 lua_setfield( L, -2, "timer_gateway"); // ... M
diff --git a/src/lanes.lua b/src/lanes.lua
index c1c641a..c02281d 100644
--- a/src/lanes.lua
+++ b/src/lanes.lua
@@ -62,7 +62,7 @@ lanes.configure = function( _params)
62 local tostring = assert( tostring) 62 local tostring = assert( tostring)
63 local error = assert( error) 63 local error = assert( error)
64 64
65 local default_params = { nb_keepers = 1, on_state_create = nil, shutdown_timeout = 0.25, with_timers = true, track_lanes = nil} 65 local default_params = { nb_keepers = 1, on_state_create = nil, shutdown_timeout = 0.25, with_timers = true, track_lanes = nil, protect_allocator = false}
66 local param_checkers = 66 local param_checkers =
67 { 67 {
68 nb_keepers = function( _val) 68 nb_keepers = function( _val)
@@ -77,6 +77,14 @@ lanes.configure = function( _params)
77 return true -- _val is either false or nil 77 return true -- _val is either false or nil
78 end 78 end
79 end, 79 end,
80 protect_allocator = function( _val)
81 -- protect_allocator may be nil or boolean
82 if _val then
83 return type( _val) == "boolean"
84 else
85 return true -- _val is either false or nil
86 end
87 end,
80 on_state_create = function( _val) 88 on_state_create = function( _val)
81 -- on_state_create may be nil or a function 89 -- on_state_create may be nil or a function
82 return _val and type( _val) == "function" or true 90 return _val and type( _val) == "function" or true
@@ -121,7 +129,7 @@ lanes.configure = function( _params)
121 assert( type( core)=="table") 129 assert( type( core)=="table")
122 130
123 -- configure() is available only the first time lanes.core is required process-wide, and we *must* call it to have the other functions in the interface 131 -- configure() is available only the first time lanes.core is required process-wide, and we *must* call it to have the other functions in the interface
124 if core.configure then core.configure( _params.nb_keepers, _params.on_state_create, _params.shutdown_timeout, _params.track_lanes) end 132 if core.configure then core.configure( _params.nb_keepers, _params.on_state_create, _params.shutdown_timeout, _params.track_lanes, _params.protect_allocator) end
125 133
126 local thread_new = assert( core.thread_new) 134 local thread_new = assert( core.thread_new)
127 135