aboutsummaryrefslogtreecommitdiff
path: root/src/lanes.c
diff options
context:
space:
mode:
authorPeter Drahoš <drahosp@gmail.com>2010-10-01 03:22:32 +0200
committerPeter Drahoš <drahosp@gmail.com>2010-10-01 03:22:32 +0200
commit89d9c98af1ac352ba4d49d660e61b0853d6e1a86 (patch)
tree15c56d2ce66b4ab147171c0f674cdb4a435ff13f /src/lanes.c
downloadlanes-89d9c98af1ac352ba4d49d660e61b0853d6e1a86.tar.gz
lanes-89d9c98af1ac352ba4d49d660e61b0853d6e1a86.tar.bz2
lanes-89d9c98af1ac352ba4d49d660e61b0853d6e1a86.zip
Import to git
Diffstat (limited to 'src/lanes.c')
-rw-r--r--src/lanes.c1849
1 files changed, 1849 insertions, 0 deletions
diff --git a/src/lanes.c b/src/lanes.c
new file mode 100644
index 0000000..9b36e4d
--- /dev/null
+++ b/src/lanes.c
@@ -0,0 +1,1849 @@
1/*
2 * LANES.C Copyright (c) 2007-08, Asko Kauppi
3 *
4 * Multithreading in Lua.
5 *
6 * History:
7 * 20-Oct-08 (2.0.2): Added closing of free-running threads, but it does
8 * not seem to eliminate the occasional segfaults at process
9 * exit.
10 * ...
11 * 24-Jun-08 .. 14-Aug-08 AKa: Major revise, Lanes 2008 version (2.0 rc1)
12 * ...
13 * 18-Sep-06 AKa: Started the module.
14 *
15 * Platforms (tested internally):
16 * OS X (10.5.4 PowerPC/Intel)
17 * Linux x86 (Ubuntu 8.04)
18 * Win32 (Windows XP Home SP2, Visual C++ 2005/2008 Express)
19 * PocketPC (TBD)
20 *
21 * Platforms (tested externally):
22 * Win32 (MSYS) by Ross Berteig.
23 *
24 * Platforms (testers appreciated):
25 * Win64 - should work???
26 * Linux x64 - should work
27 * FreeBSD - should work
28 * QNX - porting shouldn't be hard
29 * Sun Solaris - porting shouldn't be hard
30 *
31 * References:
32 * "Porting multithreaded applications from Win32 to Mac OS X":
33 * <http://developer.apple.com/macosx/multithreadedprogramming.html>
34 *
35 * Pthreads:
36 * <http://vergil.chemistry.gatech.edu/resources/programming/threads.html>
37 *
38 * MSDN: <http://msdn2.microsoft.com/en-us/library/ms686679.aspx>
39 *
40 * <http://ridiculousfish.com/blog/archives/2007/02/17/barrier>
41 *
42 * Defines:
43 * -DLINUX_SCHED_RR: all threads are lifted to SCHED_RR category, to
44 * allow negative priorities (-2,-1) be used. Even without this,
45 * using priorities will require 'sudo' privileges on Linux.
46 *
47 * -DUSE_PTHREAD_TIMEDJOIN: use 'pthread_timedjoin_np()' for waiting
48 * for threads with a timeout. This changes the thread cleanup
49 * mechanism slightly (cleans up at the join, not once the thread
50 * has finished). May or may not be a good idea to use it.
51 * Available only in selected operating systems (Linux).
52 *
53 * Bugs:
54 *
55 * To-do:
56 *
57 * ...
58 */
59
60const char *VERSION= "2.0.3";
61
62/*
63===============================================================================
64
65Copyright (C) 2007-08 Asko Kauppi <akauppi@gmail.com>
66
67Permission is hereby granted, free of charge, to any person obtaining a copy
68of this software and associated documentation files (the "Software"), to deal
69in the Software without restriction, including without limitation the rights
70to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
71copies of the Software, and to permit persons to whom the Software is
72furnished to do so, subject to the following conditions:
73
74The above copyright notice and this permission notice shall be included in
75all copies or substantial portions of the Software.
76
77THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
78IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
79FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
80AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
81LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
82OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
83THE SOFTWARE.
84
85===============================================================================
86*/
87#include <string.h>
88#include <stdio.h>
89#include <ctype.h>
90#include <stdlib.h>
91
92#include "lua.h"
93#include "lauxlib.h"
94
95#include "threading.h"
96#include "tools.h"
97
98#if !((defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC))
99# include <sys/time.h>
100#endif
101
102/* geteuid() */
103#ifdef PLATFORM_LINUX
104# include <unistd.h>
105# include <sys/types.h>
106#endif
107
108/* The selected number is not optimal; needs to be tested. Even using just
109* one keeper state may be good enough (depends on the number of Lindas used
110* in the applications).
111*/
112#define KEEPER_STATES_N 1 // 6
113
114/* Do you want full call stacks, or just the line where the error happened?
115*
116* TBD: The full stack feature does not seem to work (try 'make error').
117*/
118#define ERROR_FULL_STACK
119
120#ifdef ERROR_FULL_STACK
121# define STACK_TRACE_KEY ((void*)lane_error) // used as registry key
122#endif
123
124/*
125* Lua code for the keeper states (baked in)
126*/
127static char keeper_chunk[]=
128#include "keeper.lch"
129
130struct s_lane;
131static bool_t cancel_test( lua_State *L );
132static void cancel_error( lua_State *L );
133
134#define CANCEL_TEST_KEY ((void*)cancel_test) // used as registry key
135#define CANCEL_ERROR ((void*)cancel_error) // 'cancel_error' sentinel
136
137/*
138* registry[FINALIZER_REG_KEY] is either nil (no finalizers) or a table
139* of functions that Lanes will call after the executing 'pcall' has ended.
140*
141* We're NOT using the GC system for finalizer mainly because providing the
142* error (and maybe stack trace) parameters to the finalizer functions would
143* anyways complicate that approach.
144*/
145#define FINALIZER_REG_KEY ((void*)LG_set_finalizer)
146
147struct s_Linda;
148
149#if 1
150# define DEBUG_SIGNAL( msg, signal_ref ) /* */
151#else
152# define DEBUG_SIGNAL( msg, signal_ref ) \
153 { int i; unsigned char *ptr; char buf[999]; \
154 sprintf( buf, ">>> " msg ": %p\t", (signal_ref) ); \
155 ptr= (unsigned char *)signal_ref; \
156 for( i=0; i<sizeof(*signal_ref); i++ ) { \
157 sprintf( strchr(buf,'\0'), "%02x %c ", ptr[i], ptr[i] ); \
158 } \
159 fprintf( stderr, "%s\n", buf ); \
160 }
161#endif
162
163static bool_t thread_cancel( struct s_lane *s, double secs, bool_t force );
164
165
166/*
167* Push a table stored in registry onto Lua stack.
168*
169* If there is no existing table, create one if 'create' is TRUE.
170*
171* Returns: TRUE if a table was pushed
172* FALSE if no table found, not created, and nothing pushed
173*/
174static bool_t push_registry_table( lua_State *L, void *key, bool_t create ) {
175
176 STACK_GROW(L,3);
177
178 lua_pushlightuserdata( L, key );
179 lua_gettable( L, LUA_REGISTRYINDEX );
180
181 if (lua_isnil(L,-1)) {
182 lua_pop(L,1);
183
184 if (!create) return FALSE; // nothing pushed
185
186 lua_newtable(L);
187 lua_pushlightuserdata( L, key );
188 lua_pushvalue(L,-2); // duplicate of the table
189 lua_settable( L, LUA_REGISTRYINDEX );
190
191 // [-1]: table that's also bound in registry
192 }
193 return TRUE; // table pushed
194}
195
196
197/*---=== Serialize require ===---
198*/
199
200static MUTEX_T require_cs;
201
202//---
203// [val]= new_require( ... )
204//
205// Call 'old_require' but only one lane at a time.
206//
207// Upvalues: [1]: original 'require' function
208//
209static int new_require( lua_State *L ) {
210 int rc;
211 int args= lua_gettop(L);
212
213 STACK_GROW(L,1);
214 STACK_CHECK(L)
215
216 // Using 'lua_pcall()' to catch errors; otherwise a failing 'require' would
217 // leave us locked, blocking any future 'require' calls from other lanes.
218 //
219 MUTEX_LOCK( &require_cs );
220 {
221 lua_pushvalue( L, lua_upvalueindex(1) );
222 lua_insert( L, 1 );
223
224 rc= lua_pcall( L, args, 1 /*retvals*/, 0 /*errfunc*/ );
225 //
226 // LUA_ERRRUN / LUA_ERRMEM
227 }
228 MUTEX_UNLOCK( &require_cs );
229
230 if (rc) lua_error(L); // error message already at [-1]
231
232 STACK_END(L,0)
233 return 1;
234}
235
236/*
237* Serialize calls to 'require', if it exists
238*/
239static
240void serialize_require( lua_State *L ) {
241
242 STACK_GROW(L,1);
243 STACK_CHECK(L)
244
245 // Check 'require' is there; if not, do nothing
246 //
247 lua_getglobal( L, "require" );
248 if (lua_isfunction( L, -1 )) {
249 // [-1]: original 'require' function
250
251 lua_pushcclosure( L, new_require, 1 /*upvalues*/ );
252 lua_setglobal( L, "require" );
253
254 } else {
255 // [-1]: nil
256 lua_pop(L,1);
257 }
258
259 STACK_END(L,0)
260}
261
262
263/*---=== Keeper states ===---
264*/
265
266/*
267* Pool of keeper states
268*
269* Access to keeper states is locked (only one OS thread at a time) so the
270* bigger the pool, the less chances of unnecessary waits. Lindas map to the
271* keepers randomly, by a hash.
272*/
273struct s_Keeper {
274 MUTEX_T lock_;
275 lua_State *L;
276} keeper[ KEEPER_STATES_N ];
277
278/* We could use an empty table in 'keeper.lua' as the sentinel, but maybe
279* checking for a lightuserdata is faster.
280*/
281static bool_t nil_sentinel;
282
283/*
284* Initialize keeper states
285*
286* If there is a problem, return an error message (NULL for okay).
287*
288* Note: Any problems would be design flaws; the created Lua state is left
289* unclosed, because it does not really matter. In production code, this
290* function never fails.
291*/
292static const char *init_keepers(void) {
293 unsigned int i;
294 for( i=0; i<KEEPER_STATES_N; i++ ) {
295
296 // Initialize Keeper states with bare minimum of libs (those required
297 // by 'keeper.lua')
298 //
299 lua_State *L= luaL_newstate();
300 if (!L) return "out of memory";
301
302 luaG_openlibs( L, "io,table" ); // 'io' for debugging messages
303
304 lua_pushlightuserdata( L, &nil_sentinel );
305 lua_setglobal( L, "nil_sentinel" );
306
307 // Read in the preloaded chunk (and run it)
308 //
309 if (luaL_loadbuffer( L, keeper_chunk, sizeof(keeper_chunk), "=lanes_keeper" ))
310 return "luaL_loadbuffer() failed"; // LUA_ERRMEM
311
312 if (lua_pcall( L, 0 /*args*/, 0 /*results*/, 0 /*errfunc*/ )) {
313 // LUA_ERRRUN / LUA_ERRMEM / LUA_ERRERR
314 //
315 const char *err= lua_tostring(L,-1);
316 assert(err);
317 return err;
318 }
319
320 MUTEX_INIT( &keeper[i].lock_ );
321 keeper[i].L= L;
322 }
323 return NULL; // ok
324}
325
326static
327struct s_Keeper *keeper_acquire( const void *ptr ) {
328 /*
329 * Any hashing will do that maps pointers to 0..KEEPER_STATES_N-1
330 * consistently.
331 *
332 * Pointers are often aligned by 8 or so - ignore the low order bits
333 */
334 unsigned int i= ((unsigned long)(ptr) >> 3) % KEEPER_STATES_N;
335 struct s_Keeper *K= &keeper[i];
336
337 MUTEX_LOCK( &K->lock_ );
338 return K;
339}
340
341static
342void keeper_release( struct s_Keeper *K ) {
343 MUTEX_UNLOCK( &K->lock_ );
344}
345
346/*
347* Call a function ('func_name') in the keeper state, and pass on the returned
348* values to 'L'.
349*
350* 'linda': deep Linda pointer (used only as a unique table key, first parameter)
351* 'starting_index': first of the rest of parameters (none if 0)
352*
353* Returns: number of return values (pushed to 'L')
354*/
355static
356int keeper_call( lua_State* K, const char *func_name,
357 lua_State *L, struct s_Linda *linda, uint_t starting_index ) {
358
359 int args= starting_index ? (lua_gettop(L) - starting_index +1) : 0;
360 int Ktos= lua_gettop(K);
361 int retvals;
362
363 lua_getglobal( K, func_name );
364 ASSERT_L( lua_isfunction(K,-1) );
365
366 STACK_GROW( K, 1 );
367 lua_pushlightuserdata( K, linda );
368
369 luaG_inter_copy( L,K, args ); // L->K
370 lua_call( K, 1+args, LUA_MULTRET );
371
372 retvals= lua_gettop(K) - Ktos;
373
374 luaG_inter_move( K,L, retvals ); // K->L
375 return retvals;
376}
377
378
379/*---=== Linda ===---
380*/
381
382/*
383* Actual data is kept within a keeper state, which is hashed by the 's_Linda'
384* pointer (which is same to all userdatas pointing to it).
385*/
386struct s_Linda {
387 SIGNAL_T read_happened;
388 SIGNAL_T write_happened;
389};
390
391static int LG_linda_id( lua_State* );
392
393#define lua_toLinda(L,n) ((struct s_Linda *)luaG_todeep( L, LG_linda_id, n ))
394
395
396/*
397* bool= linda_send( linda_ud, [timeout_secs=-1,] key_num|str|bool|lightuserdata, ... )
398*
399* Send one or more values to a Linda. If there is a limit, all values must fit.
400*
401* Returns: 'true' if the value was queued
402* 'false' for timeout (only happens when the queue size is limited)
403*/
404LUAG_FUNC( linda_send ) {
405 struct s_Linda *linda= lua_toLinda( L, 1 );
406 bool_t ret;
407 bool_t cancel= FALSE;
408 struct s_Keeper *K;
409 time_d timeout= -1.0;
410 uint_t key_i= 2; // index of first key, if timeout not there
411
412 if (lua_isnumber(L,2)) {
413 timeout= SIGNAL_TIMEOUT_PREPARE( lua_tonumber(L,2) );
414 key_i++;
415 } else if (lua_isnil(L,2))
416 key_i++;
417
418 if (lua_isnil(L,key_i))
419 luaL_error( L, "nil key" );
420
421 STACK_GROW(L,1);
422
423 K= keeper_acquire( linda );
424 {
425 lua_State *KL= K->L; // need to do this for 'STACK_CHECK'
426STACK_CHECK(KL)
427 while(TRUE) {
428 int pushed;
429
430STACK_MID(KL,0)
431 pushed= keeper_call( K->L, "send", L, linda, key_i );
432 ASSERT_L( pushed==1 );
433
434 ret= lua_toboolean(L,-1);
435 lua_pop(L,1);
436
437 if (ret) {
438 // Wake up ALL waiting threads
439 //
440 SIGNAL_ALL( &linda->write_happened );
441 break;
442
443 } else if (timeout==0.0) {
444 break; /* no wait; instant timeout */
445
446 } else {
447 /* limit faced; push until timeout */
448
449 cancel= cancel_test( L ); // testing here causes no delays
450 if (cancel) break;
451
452 // K lock will be released for the duration of wait and re-acquired
453 //
454 if (!SIGNAL_WAIT( &linda->read_happened, &K->lock_, timeout ))
455 break; // timeout
456 }
457 }
458STACK_END(KL,0)
459 }
460 keeper_release(K);
461
462 if (cancel)
463 cancel_error(L);
464
465 lua_pushboolean( L, ret );
466 return 1;
467}
468
469
470/*
471* [val, key]= linda_receive( linda_ud, [timeout_secs_num=-1], key_num|str|bool|lightuserdata [, ...] )
472*
473* Receive a value from Linda, consuming it.
474*
475* Returns: value received (which is consumed from the slot)
476* key which had it
477*/
478LUAG_FUNC( linda_receive ) {
479 struct s_Linda *linda= lua_toLinda( L, 1 );
480 int pushed;
481 bool_t cancel= FALSE;
482 struct s_Keeper *K;
483 time_d timeout= -1.0;
484 uint_t key_i= 2;
485
486 if (lua_isnumber(L,2)) {
487 timeout= SIGNAL_TIMEOUT_PREPARE( lua_tonumber(L,2) );
488 key_i++;
489 } else if (lua_isnil(L,2))
490 key_i++;
491
492 K= keeper_acquire( linda );
493 {
494 while(TRUE) {
495 pushed= keeper_call( K->L, "receive", L, linda, key_i );
496 if (pushed) {
497 ASSERT_L( pushed==2 );
498
499 // To be done from within the 'K' locking area
500 //
501 SIGNAL_ALL( &linda->read_happened );
502 break;
503
504 } else if (timeout==0.0) {
505 break; /* instant timeout */
506
507 } else { /* nothing received; wait until timeout */
508
509 cancel= cancel_test( L ); // testing here causes no delays
510 if (cancel) break;
511
512 // Release the K lock for the duration of wait, and re-acquire
513 //
514 if (!SIGNAL_WAIT( &linda->write_happened, &K->lock_, timeout ))
515 break;
516 }
517 }
518 }
519 keeper_release(K);
520
521 if (cancel)
522 cancel_error(L);
523
524 return pushed;
525}
526
527
528/*
529* = linda_set( linda_ud, key_num|str|bool|lightuserdata [,value] )
530*
531* Set a value to Linda.
532*
533* Existing slot value is replaced, and possible queue entries removed.
534*/
535LUAG_FUNC( linda_set ) {
536 struct s_Linda *linda= lua_toLinda( L, 1 );
537 bool_t has_value= !lua_isnil(L,3);
538
539 struct s_Keeper *K= keeper_acquire( linda );
540 {
541 int pushed= keeper_call( K->L, "set", L, linda, 2 );
542 ASSERT_L( pushed==0 );
543
544 /* Set the signal from within 'K' locking.
545 */
546 if (has_value) {
547 SIGNAL_ALL( &linda->write_happened );
548 }
549 }
550 keeper_release(K);
551
552 return 0;
553}
554
555
556/*
557* [val]= linda_get( linda_ud, key_num|str|bool|lightuserdata )
558*
559* Get a value from Linda.
560*/
561LUAG_FUNC( linda_get ) {
562 struct s_Linda *linda= lua_toLinda( L, 1 );
563 int pushed;
564
565 struct s_Keeper *K= keeper_acquire( linda );
566 {
567 pushed= keeper_call( K->L, "get", L, linda, 2 );
568 ASSERT_L( pushed==0 || pushed==1 );
569 }
570 keeper_release(K);
571
572 return pushed;
573}
574
575
576/*
577* = linda_limit( linda_ud, key_num|str|bool|lightuserdata, uint [, ...] )
578*
579* Set limits to 1 or more Linda keys.
580*/
581LUAG_FUNC( linda_limit ) {
582 struct s_Linda *linda= lua_toLinda( L, 1 );
583
584 struct s_Keeper *K= keeper_acquire( linda );
585 {
586 int pushed= keeper_call( K->L, "limit", L, linda, 2 );
587 ASSERT_L( pushed==0 );
588 }
589 keeper_release(K);
590
591 return 0;
592}
593
594
595/*
596* lightuserdata= linda_deep( linda_ud )
597*
598* Return the 'deep' userdata pointer, identifying the Linda.
599*
600* This is needed for using Lindas as key indices (timer system needs it);
601* separately created proxies of the same underlying deep object will have
602* different userdata and won't be known to be essentially the same deep one
603* without this.
604*/
605LUAG_FUNC( linda_deep ) {
606 struct s_Linda *linda= lua_toLinda( L, 1 );
607 lua_pushlightuserdata( L, linda ); // just the address
608 return 1;
609}
610
611
612/*
613* Identity function of a shared userdata object.
614*
615* lightuserdata= linda_id( "new" [, ...] )
616* = linda_id( "delete", lightuserdata )
617*
618* Creation and cleanup of actual 'deep' objects. 'luaG_...' will wrap them into
619* regular userdata proxies, per each state using the deep data.
620*
621* tbl= linda_id( "metatable" )
622*
623* Returns a metatable for the proxy objects ('__gc' method not needed; will
624* be added by 'luaG_...')
625*
626* = linda_id( str, ... )
627*
628* For any other strings, the ID function must not react at all. This allows
629* future extensions of the system.
630*/
631LUAG_FUNC( linda_id ) {
632 const char *which= lua_tostring(L,1);
633
634 if (strcmp( which, "new" )==0) {
635 struct s_Linda *s;
636
637 /* We don't use any parameters, but one could (they're at [2..TOS])
638 */
639 ASSERT_L( lua_gettop(L)==1 );
640
641 /* The deep data is allocated separately of Lua stack; we might no
642 * longer be around when last reference to it is being released.
643 * One can use any memory allocation scheme.
644 */
645 s= (struct s_Linda *) malloc( sizeof(struct s_Linda) );
646 ASSERT_L(s);
647
648 SIGNAL_INIT( &s->read_happened );
649 SIGNAL_INIT( &s->write_happened );
650
651 lua_pushlightuserdata( L, s );
652 return 1;
653
654 } else if (strcmp( which, "delete" )==0) {
655 struct s_Keeper *K;
656 struct s_Linda *s= lua_touserdata(L,2);
657 ASSERT_L(s);
658
659 /* Clean associated structures in the keeper state.
660 */
661 K= keeper_acquire(s);
662 {
663 keeper_call( K->L, "clear", L, s, 0 );
664 }
665 keeper_release(K);
666
667 /* There aren't any lanes waiting on these lindas, since all proxies
668 * have been gc'ed. Right?
669 */
670 SIGNAL_FREE( &s->read_happened );
671 SIGNAL_FREE( &s->write_happened );
672 free(s);
673
674 return 0;
675
676 } else if (strcmp( which, "metatable" )==0) {
677
678 STACK_CHECK(L)
679 lua_newtable(L);
680 lua_newtable(L);
681 //
682 // [-2]: linda metatable
683 // [-1]: metatable's to-be .__index table
684
685 lua_pushcfunction( L, LG_linda_send );
686 lua_setfield( L, -2, "send" );
687
688 lua_pushcfunction( L, LG_linda_receive );
689 lua_setfield( L, -2, "receive" );
690
691 lua_pushcfunction( L, LG_linda_limit );
692 lua_setfield( L, -2, "limit" );
693
694 lua_pushcfunction( L, LG_linda_set );
695 lua_setfield( L, -2, "set" );
696
697 lua_pushcfunction( L, LG_linda_get );
698 lua_setfield( L, -2, "get" );
699
700 lua_pushcfunction( L, LG_linda_deep );
701 lua_setfield( L, -2, "deep" );
702
703 lua_setfield( L, -2, "__index" );
704 STACK_END(L,1)
705
706 return 1;
707 }
708
709 return 0; // unknown request, be quiet
710}
711
712
713/*---=== Finalizer ===---
714*/
715
716//---
717// void= finalizer( finalizer_func )
718//
719// finalizer_func( [err, stack_tbl] )
720//
721// Add a function that will be called when exiting the lane, either via
722// normal return or an error.
723//
724LUAG_FUNC( set_finalizer )
725{
726 STACK_GROW(L,3);
727
728 // Get the current finalizer table (if any)
729 //
730 push_registry_table( L, FINALIZER_REG_KEY, TRUE /*do create if none*/ );
731
732 lua_pushinteger( L, lua_objlen(L,-1)+1 );
733 lua_pushvalue( L, 1 ); // copy of the function
734 lua_settable( L, -3 );
735
736 lua_pop(L,1);
737 return 0;
738}
739
740
741//---
742// Run finalizers - if any - with the given parameters
743//
744// If 'rc' is nonzero, error message and stack index are available as:
745// [-1]: stack trace (table)
746// [-2]: error message (any type)
747//
748// Returns:
749// 0 if finalizers were run without error (or there were none)
750// LUA_ERRxxx return code if any of the finalizers failed
751//
752// TBD: should we add stack trace on failing finalizer, wouldn't be hard..
753//
754static int run_finalizers( lua_State *L, int lua_rc )
755{
756 unsigned error_index, tbl_index;
757 unsigned n;
758 int rc= 0;
759
760 if (!push_registry_table(L, FINALIZER_REG_KEY, FALSE /*don't create one*/))
761 return 0; // no finalizers
762
763 tbl_index= lua_gettop(L);
764 error_index= (lua_rc!=0) ? tbl_index-1 : 0; // absolute indices
765
766 STACK_GROW(L,4);
767
768 // [-1]: { func [, ...] }
769 //
770 for( n= lua_objlen(L,-1); n>0; n-- ) {
771 unsigned args= 0;
772 lua_pushinteger( L,n );
773 lua_gettable( L, -2 );
774
775 // [-1]: function
776 // [-2]: finalizers table
777
778 if (error_index) {
779 lua_pushvalue( L, error_index );
780 lua_pushvalue( L, error_index+1 ); // stack trace
781 args= 2;
782 }
783
784 rc= lua_pcall( L, args, 0 /*retvals*/, 0 /*no errfunc*/ );
785 //
786 // LUA_ERRRUN / LUA_ERRMEM
787
788 if (rc!=0) {
789 // [-1]: error message
790 //
791 // If one finalizer fails, don't run the others. Return this
792 // as the 'real' error, preceding that we could have had (or not)
793 // from the actual code.
794 //
795 break;
796 }
797 }
798
799 lua_remove(L,tbl_index); // take finalizer table out of stack
800
801 return rc;
802}
803
804
805/*---=== Threads ===---
806*/
807
808// NOTE: values to be changed by either thread, during execution, without
809// locking, are marked "volatile"
810//
811struct s_lane {
812 THREAD_T thread;
813 //
814 // M: sub-thread OS thread
815 // S: not used
816
817 lua_State *L;
818 //
819 // M: prepares the state, and reads results
820 // S: while S is running, M must keep out of modifying the state
821
822 volatile enum e_status status;
823 //
824 // M: sets to PENDING (before launching)
825 // S: updates -> RUNNING/WAITING -> DONE/ERROR_ST/CANCELLED
826
827 volatile bool_t cancel_request;
828 //
829 // M: sets to FALSE, flags TRUE for cancel request
830 // S: reads to see if cancel is requested
831
832#if !( (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) || (defined PTHREAD_TIMEDJOIN) )
833 SIGNAL_T done_signal_;
834 //
835 // M: Waited upon at lane ending (if Posix with no PTHREAD_TIMEDJOIN)
836 // S: sets the signal once cancellation is noticed (avoids a kill)
837
838 MUTEX_T done_lock_;
839 //
840 // Lock required by 'done_signal' condition variable, protecting
841 // lane status changes to DONE/ERROR_ST/CANCELLED.
842#endif
843
844 volatile enum {
845 NORMAL, // normal master side state
846 KILLED // issued an OS kill
847 } mstatus;
848 //
849 // M: sets to NORMAL, if issued a kill changes to KILLED
850 // S: not used
851
852 struct s_lane * volatile selfdestruct_next;
853 //
854 // M: sets to non-NULL if facing lane handle '__gc' cycle but the lane
855 // is still running
856 // S: cleans up after itself if non-NULL at lane exit
857};
858
859static MUTEX_T selfdestruct_cs;
860 //
861 // Protects modifying the selfdestruct chain
862
863#define SELFDESTRUCT_END ((struct s_lane *)(-1))
864 //
865 // The chain is ended by '(struct s_lane*)(-1)', not NULL:
866 // 'selfdestruct_first -> ... -> ... -> (-1)'
867
868struct s_lane * volatile selfdestruct_first= SELFDESTRUCT_END;
869
870/*
871* Add the lane to selfdestruct chain; the ones still running at the end of the
872* whole process will be cancelled.
873*/
874static void selfdestruct_add( struct s_lane *s ) {
875
876 MUTEX_LOCK( &selfdestruct_cs );
877 {
878 assert( s->selfdestruct_next == NULL );
879
880 s->selfdestruct_next= selfdestruct_first;
881 selfdestruct_first= s;
882 }
883 MUTEX_UNLOCK( &selfdestruct_cs );
884}
885
886/*
887* A free-running lane has ended; remove it from selfdestruct chain
888*/
889static void selfdestruct_remove( struct s_lane *s ) {
890
891 MUTEX_LOCK( &selfdestruct_cs );
892 {
893 // Make sure (within the MUTEX) that we actually are in the chain
894 // still (at process exit they will remove us from chain and then
895 // cancel/kill).
896 //
897 if (s->selfdestruct_next != NULL) {
898 struct s_lane **ref= (struct s_lane **) &selfdestruct_first;
899 bool_t found= FALSE;
900
901 while( *ref != SELFDESTRUCT_END ) {
902 if (*ref == s) {
903 *ref= s->selfdestruct_next;
904 s->selfdestruct_next= NULL;
905 found= TRUE;
906 break;
907 }
908 ref= (struct s_lane **) &((*ref)->selfdestruct_next);
909 }
910 assert( found );
911 }
912 }
913 MUTEX_UNLOCK( &selfdestruct_cs );
914}
915
916/*
917* Process end; cancel any still free-running threads
918*/
919static void selfdestruct_atexit( void ) {
920
921 if (selfdestruct_first == SELFDESTRUCT_END) return; // no free-running threads
922
923 // Signal _all_ still running threads to exit
924 //
925 MUTEX_LOCK( &selfdestruct_cs );
926 {
927 struct s_lane *s= selfdestruct_first;
928 while( s != SELFDESTRUCT_END ) {
929 s->cancel_request= TRUE;
930 s= s->selfdestruct_next;
931 }
932 }
933 MUTEX_UNLOCK( &selfdestruct_cs );
934
935 // When noticing their cancel, the lanes will remove themselves from
936 // the selfdestruct chain.
937
938 // TBD: Not sure if Windows (multi core) will require the timed approach,
939 // or single Yield. I don't have machine to test that (so leaving
940 // for timed approach). -- AKa 25-Oct-2008
941
942#ifdef PLATFORM_LINUX
943 // It seems enough for Linux to have a single yield here, which allows
944 // other threads (timer lane) to proceed. Without the yield, there is
945 // segfault.
946 //
947 YIELD();
948#else
949 // OS X 10.5 (Intel) needs more to avoid segfaults.
950 //
951 // "make test" is okay. 100's of "make require" are okay.
952 //
953 // Tested on MacBook Core Duo 2GHz and 10.5.5:
954 // -- AKa 25-Oct-2008
955 //
956 #ifndef ATEXIT_WAIT_SECS
957 # define ATEXIT_WAIT_SECS (0.1)
958 #endif
959 {
960 double t_until= now_secs() + ATEXIT_WAIT_SECS;
961
962 while( selfdestruct_first != SELFDESTRUCT_END ) {
963 YIELD(); // give threads time to act on their cancel
964
965 if (now_secs() >= t_until) break;
966 }
967 }
968#endif
969
970 //---
971 // Kill the still free running threads
972 //
973 if ( selfdestruct_first != SELFDESTRUCT_END ) {
974 unsigned n=0;
975 MUTEX_LOCK( &selfdestruct_cs );
976 {
977 struct s_lane *s= selfdestruct_first;
978 while( s != SELFDESTRUCT_END ) {
979 n++;
980 s= s->selfdestruct_next;
981 }
982 }
983 MUTEX_UNLOCK( &selfdestruct_cs );
984
985 // Linux (at least 64-bit): CAUSES A SEGFAULT IF THIS BLOCK IS ENABLED
986 // and works without the block (so let's leave those lanes running)
987 //
988#if 1
989 // 2.0.2: at least timer lane is still here
990 //
991 //fprintf( stderr, "Left %d lane(s) with cancel request at process end.\n", n );
992#else
993 MUTEX_LOCK( &selfdestruct_cs );
994 {
995 struct s_lane *s= selfdestruct_first;
996 while( s != SELFDESTRUCT_END ) {
997 struct s_lane *next_s= s->selfdestruct_next;
998 s->selfdestruct_next= NULL; // detach from selfdestruct chain
999
1000 THREAD_KILL( &s->thread );
1001 s= next_s;
1002 n++;
1003 }
1004 selfdestruct_first= SELFDESTRUCT_END;
1005 }
1006 MUTEX_UNLOCK( &selfdestruct_cs );
1007
1008 fprintf( stderr, "Killed %d lane(s) at process end.\n", n );
1009#endif
1010 }
1011}
1012
1013
1014// To allow free-running threads (longer lifespan than the handle's)
1015// 'struct s_lane' are malloc/free'd and the handle only carries a pointer.
1016// This is not deep userdata since the handle's not portable among lanes.
1017//
1018#define lua_toLane(L,i) (* ((struct s_lane**) lua_touserdata(L,i)))
1019
1020
1021/*
1022* Check if the thread in question ('L') has been signalled for cancel.
1023*
1024* Called by cancellation hooks and/or pending Linda operations (because then
1025* the check won't affect performance).
1026*
1027* Returns TRUE if any locks are to be exited, and 'cancel_error()' called,
1028* to make execution of the lane end.
1029*/
1030static bool_t cancel_test( lua_State *L ) {
1031 struct s_lane *s;
1032
1033 STACK_GROW(L,1);
1034
1035 STACK_CHECK(L)
1036 lua_pushlightuserdata( L, CANCEL_TEST_KEY );
1037 lua_rawget( L, LUA_REGISTRYINDEX );
1038 s= lua_touserdata( L, -1 ); // lightuserdata (true 's_lane' pointer) / nil
1039 lua_pop(L,1);
1040 STACK_END(L,0)
1041
1042 // 's' is NULL for the original main state (no-one can cancel that)
1043 //
1044 return s && s->cancel_request;
1045}
1046
1047static void cancel_error( lua_State *L ) {
1048 STACK_GROW(L,1);
1049 lua_pushlightuserdata( L, CANCEL_ERROR ); // special error value
1050 lua_error(L); // no return
1051}
1052
1053static void cancel_hook( lua_State *L, lua_Debug *ar ) {
1054 (void)ar;
1055 if (cancel_test(L)) cancel_error(L);
1056}
1057
1058
1059//---
1060// = _single( [cores_uint=1] )
1061//
1062// Limits the process to use only 'cores' CPU cores. To be used for performance
1063// testing on multicore devices. DEBUGGING ONLY!
1064//
1065LUAG_FUNC( _single ) {
1066 uint_t cores= luaG_optunsigned(L,1,1);
1067
1068#ifdef PLATFORM_OSX
1069 #ifdef _UTILBINDTHREADTOCPU
1070 if (cores > 1) luaL_error( L, "Limiting to N>1 cores not possible." );
1071 // requires 'chudInitialize()'
1072 utilBindThreadToCPU(0); // # of CPU to run on (we cannot limit to 2..N CPUs?)
1073 #else
1074 luaL_error( L, "Not available: compile with _UTILBINDTHREADTOCPU" );
1075 #endif
1076#else
1077 luaL_error( L, "not implemented!" );
1078#endif
1079 (void)cores;
1080
1081 return 0;
1082}
1083
1084
1085/*
1086* str= lane_error( error_val|str )
1087*
1088* Called if there's an error in some lane; add call stack to error message
1089* just like 'lua.c' normally does.
1090*
1091* ".. will be called with the error message and its return value will be the
1092* message returned on the stack by lua_pcall."
1093*
1094* Note: Rather than modifying the error message itself, it would be better
1095* to provide the call stack (as string) completely separated. This would
1096* work great with non-string error values as well (current system does not).
1097* (This is NOT possible with the Lua 5.1 'lua_pcall()'; we could of course
1098* implement a Lanes-specific 'pcall' of our own that does this). TBD!!! :)
1099* --AKa 22-Jan-2009
1100*/
1101#ifdef ERROR_FULL_STACK
1102
1103static int lane_error( lua_State *L ) {
1104 lua_Debug ar;
1105 unsigned lev,n;
1106
1107 // [1]: error message (any type)
1108
1109 assert( lua_gettop(L)==1 );
1110
1111 // Don't do stack survey for cancelled lanes.
1112 //
1113#if 1
1114 if (lua_touserdata(L,1) == CANCEL_ERROR)
1115 return 1; // just pass on
1116#endif
1117
1118 // Place stack trace at 'registry[lane_error]' for the 'luc_pcall()'
1119 // caller to fetch. This bypasses the Lua 5.1 limitation of only one
1120 // return value from error handler to 'lua_pcall()' caller.
1121
1122 // It's adequate to push stack trace as a table. This gives the receiver
1123 // of the stack best means to format it to their liking. Also, it allows
1124 // us to add more stack info later, if needed.
1125 //
1126 // table of { "sourcefile.lua:<line>", ... }
1127 //
1128 STACK_GROW(L,3);
1129 lua_newtable(L);
1130
1131 // Best to start from level 1, but in some cases it might be a C function
1132 // and we don't get '.currentline' for that. It's okay - just keep level
1133 // and table index growing separate. --AKa 22-Jan-2009
1134 //
1135 lev= 0;
1136 n=1;
1137 while( lua_getstack(L, ++lev, &ar ) ) {
1138 lua_getinfo(L, "Sl", &ar);
1139 if (ar.currentline > 0) {
1140 lua_pushinteger( L, n++ );
1141 lua_pushfstring( L, "%s:%d", ar.short_src, ar.currentline );
1142 lua_settable( L, -3 );
1143 }
1144 }
1145
1146 lua_pushlightuserdata( L, STACK_TRACE_KEY );
1147 lua_insert(L,-2);
1148 lua_settable( L, LUA_REGISTRYINDEX );
1149
1150 assert( lua_gettop(L)== 1 );
1151
1152 return 1; // the untouched error value
1153}
1154#endif
1155
1156
1157//---
1158#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC)
1159 static THREAD_RETURN_T __stdcall lane_main( void *vs )
1160#else
1161 static THREAD_RETURN_T lane_main( void *vs )
1162#endif
1163{
1164 struct s_lane *s= (struct s_lane *)vs;
1165 int rc, rc2;
1166 lua_State *L= s->L;
1167
1168 s->status= RUNNING; // PENDING -> RUNNING
1169
1170 // Tie "set_finalizer()" to the state
1171 //
1172 lua_pushcfunction( L, LG_set_finalizer );
1173 lua_setglobal( L, "set_finalizer" );
1174
1175#ifdef ERROR_FULL_STACK
1176 STACK_GROW( L, 1 );
1177 lua_pushcfunction( L, lane_error );
1178 lua_insert( L, 1 );
1179
1180 // [1]: error handler
1181 // [2]: function to run
1182 // [3..top]: parameters
1183 //
1184 rc= lua_pcall( L, lua_gettop(L)-2, LUA_MULTRET, 1 /*error handler*/ );
1185 // 0: no error
1186 // LUA_ERRRUN: a runtime error (error pushed on stack)
1187 // LUA_ERRMEM: memory allocation error
1188 // LUA_ERRERR: error while running the error handler (if any)
1189
1190 assert( rc!=LUA_ERRERR ); // since we've authored it
1191
1192 lua_remove(L,1); // remove error handler
1193
1194 // Lua 5.1 error handler is limited to one return value; taking stack trace
1195 // via registry
1196 //
1197 if (rc!=0) {
1198 STACK_GROW(L,1);
1199 lua_pushlightuserdata( L, STACK_TRACE_KEY );
1200 lua_gettable(L, LUA_REGISTRYINDEX);
1201
1202 // For cancellation, a stack trace isn't placed
1203 //
1204 assert( lua_istable(L,2) || (lua_touserdata(L,1)==CANCEL_ERROR) );
1205
1206 // Just leaving the stack trace table on the stack is enough to get
1207 // it through to the master.
1208 }
1209
1210#else
1211 // This code does not use 'lane_error'
1212 //
1213 // [1]: function to run
1214 // [2..top]: parameters
1215 //
1216 rc= lua_pcall( L, lua_gettop(L)-1, LUA_MULTRET, 0 /*no error handler*/ );
1217 // 0: no error
1218 // LUA_ERRRUN: a runtime error (error pushed on stack)
1219 // LUA_ERRMEM: memory allocation error
1220#endif
1221
1222//STACK_DUMP(L);
1223 // Call finalizers, if the script has set them up.
1224 //
1225 rc2= run_finalizers(L,rc);
1226 if (rc2!=0) {
1227 // Error within a finalizer!
1228 //
1229 // [-1]: error message
1230
1231 rc= rc2; // we're overruling the earlier script error or normal return
1232
1233 lua_insert( L,1 ); // make error message [1]
1234 lua_settop( L,1 ); // remove all rest
1235
1236 // Place an empty stack table just to keep the API simple (always when
1237 // there's an error, there's also stack table - though it may be empty).
1238 //
1239 lua_newtable(L);
1240 }
1241
1242 if (s->selfdestruct_next != NULL) {
1243 // We're a free-running thread and no-one's there to clean us up.
1244 //
1245 lua_close( s->L );
1246 L= 0;
1247
1248 #if !( (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) || (defined PTHREAD_TIMEDJOIN) )
1249 SIGNAL_FREE( &s->done_signal_ );
1250 MUTEX_FREE( &s->done_lock_ );
1251 #endif
1252 selfdestruct_remove(s); // away from selfdestruct chain
1253 free(s);
1254
1255 } else {
1256 // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them
1257
1258 enum e_status st=
1259 (rc==0) ? DONE
1260 : (lua_touserdata(L,1)==CANCEL_ERROR) ? CANCELLED
1261 : ERROR_ST;
1262
1263 // Posix no PTHREAD_TIMEDJOIN:
1264 // 'done_lock' protects the -> DONE|ERROR_ST|CANCELLED state change
1265 //
1266 #if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) || (defined PTHREAD_TIMEDJOIN)
1267 s->status= st;
1268 #else
1269 MUTEX_LOCK( &s->done_lock_ );
1270 {
1271 s->status= st;
1272 SIGNAL_ONE( &s->done_signal_ ); // wake up master (while 's->done_lock' is on)
1273 }
1274 MUTEX_UNLOCK( &s->done_lock_ );
1275 #endif
1276 }
1277
1278 return 0; // ignored
1279}
1280
1281
1282//---
1283// lane_ud= thread_new( function, [libs_str],
1284// [cancelstep_uint=0],
1285// [prio_int=0],
1286// [globals_tbl],
1287// [... args ...] )
1288//
1289// Upvalues: metatable to use for 'lane_ud'
1290//
1291LUAG_FUNC( thread_new )
1292{
1293 lua_State *L2;
1294 struct s_lane *s;
1295 struct s_lane **ud;
1296
1297 const char *libs= lua_tostring( L, 2 );
1298 uint_t cs= luaG_optunsigned( L, 3,0);
1299 int prio= luaL_optinteger( L, 4,0);
1300 uint_t glob= luaG_isany(L,5) ? 5:0;
1301
1302 #define FIXED_ARGS (5)
1303 uint_t args= lua_gettop(L) - FIXED_ARGS;
1304
1305 if (prio < THREAD_PRIO_MIN || prio > THREAD_PRIO_MAX) {
1306 luaL_error( L, "Priority out of range: %d..+%d (%d)",
1307 THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio );
1308 }
1309
1310 /* --- Create and prepare the sub state --- */
1311
1312 L2 = luaL_newstate(); // uses standard 'realloc()'-based allocator,
1313 // sets the panic callback
1314
1315 if (!L2) luaL_error( L, "'luaL_newstate()' failed; out of memory" );
1316
1317 STACK_GROW( L,2 );
1318
1319 // Setting the globals table (needs to be done before loading stdlibs,
1320 // and the lane function)
1321 //
1322 if (glob!=0) {
1323STACK_CHECK(L)
1324 if (!lua_istable(L,glob))
1325 luaL_error( L, "Expected table, got %s", luaG_typename(L,glob) );
1326
1327 lua_pushvalue( L, glob );
1328 luaG_inter_move( L,L2, 1 ); // moves the table to L2
1329
1330 // L2 [-1]: table of globals
1331
1332 // "You can change the global environment of a Lua thread using lua_replace"
1333 // (refman-5.0.pdf p. 30)
1334 //
1335 lua_replace( L2, LUA_GLOBALSINDEX );
1336STACK_END(L,0)
1337 }
1338
1339 // Selected libraries
1340 //
1341 if (libs) {
1342 const char *err= luaG_openlibs( L2, libs );
1343 ASSERT_L( !err ); // bad libs should have been noticed by 'lanes.lua'
1344
1345 serialize_require( L2 );
1346 }
1347
1348 // Lane main function
1349 //
1350STACK_CHECK(L)
1351 lua_pushvalue( L, 1 );
1352 luaG_inter_move( L,L2, 1 ); // L->L2
1353STACK_MID(L,0)
1354
1355 ASSERT_L( lua_gettop(L2) == 1 );
1356 ASSERT_L( lua_isfunction(L2,1) );
1357
1358 // revive arguments
1359 //
1360 if (args) luaG_inter_copy( L,L2, args ); // L->L2
1361STACK_MID(L,0)
1362
1363ASSERT_L( (uint_t)lua_gettop(L2) == 1+args );
1364ASSERT_L( lua_isfunction(L2,1) );
1365
1366 // 's' is allocated from heap, not Lua, since its life span may surpass
1367 // the handle's (if free running thread)
1368 //
1369 ud= lua_newuserdata( L, sizeof(struct s_lane*) );
1370 ASSERT_L(ud);
1371
1372 s= *ud= malloc( sizeof(struct s_lane) );
1373 ASSERT_L(s);
1374
1375 //memset( s, 0, sizeof(struct s_lane) );
1376 s->L= L2;
1377 s->status= PENDING;
1378 s->cancel_request= FALSE;
1379
1380#if !( (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) || (defined PTHREAD_TIMEDJOIN) )
1381 MUTEX_INIT( &s->done_lock_ );
1382 SIGNAL_INIT( &s->done_signal_ );
1383#endif
1384 s->mstatus= NORMAL;
1385 s->selfdestruct_next= NULL;
1386
1387 // Set metatable for the userdata
1388 //
1389 lua_pushvalue( L, lua_upvalueindex(1) );
1390 lua_setmetatable( L, -2 );
1391STACK_MID(L,1)
1392
1393 // Place 's' to registry, for 'cancel_test()' (even if 'cs'==0 we still
1394 // do cancel tests at pending send/receive).
1395 //
1396 lua_pushlightuserdata( L2, CANCEL_TEST_KEY );
1397 lua_pushlightuserdata( L2, s );
1398 lua_rawset( L2, LUA_REGISTRYINDEX );
1399
1400 if (cs) {
1401 lua_sethook( L2, cancel_hook, LUA_MASKCOUNT, cs );
1402 }
1403
1404 THREAD_CREATE( &s->thread, lane_main, s, prio );
1405STACK_END(L,1)
1406
1407 return 1;
1408}
1409
1410
1411//---
1412// = thread_gc( lane_ud )
1413//
1414// Cleanup for a thread userdata. If the thread is still executing, leave it
1415// alive as a free-running thread (will clean up itself).
1416//
1417// * Why NOT cancel/kill a loose thread:
1418//
1419// At least timer system uses a free-running thread, they should be handy
1420// and the issue of cancelling/killing threads at gc is not very nice, either
1421// (would easily cause waits at gc cycle, which we don't want).
1422//
1423// * Why YES kill a loose thread:
1424//
1425// Current way causes segfaults at program exit, if free-running threads are
1426// in certain stages. Details are not clear, but this is the core reason.
1427// If gc would kill threads then at process exit only one thread would remain.
1428//
1429// Todo: Maybe we should have a clear #define for selecting either behaviour.
1430//
1431LUAG_FUNC( thread_gc ) {
1432 struct s_lane *s= lua_toLane(L,1);
1433
1434 // We can read 's->status' without locks, but not wait for it
1435 //
1436 if (s->status < DONE) {
1437 //
1438 selfdestruct_add(s);
1439 assert( s->selfdestruct_next );
1440 return 0;
1441
1442 } else if (s->mstatus==KILLED) {
1443 // Make sure a kill has proceeded, before cleaning up the data structure.
1444 //
1445 // If not doing 'THREAD_WAIT()' we should close the Lua state here
1446 // (can it be out of order, since we killed the lane abruptly?)
1447 //
1448#if 0
1449 lua_close( s->L );
1450#else
1451fprintf( stderr, "** Joining with a killed thread (needs testing) **" );
1452#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) || (defined PTHREAD_TIMEDJOIN)
1453 THREAD_WAIT( &s->thread, -1 );
1454#else
1455 THREAD_WAIT( &s->thread, &s->done_signal_, &s->done_lock_, &s->status, -1 );
1456#endif
1457fprintf( stderr, "** Joined ok **" );
1458#endif
1459 }
1460
1461 // Clean up after a (finished) thread
1462 //
1463#if (! ((defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) || (defined PTHREAD_TIMEDJOIN)))
1464 SIGNAL_FREE( &s->done_signal_ );
1465 MUTEX_FREE( &s->done_lock_ );
1466 free(s);
1467#endif
1468
1469 return 0;
1470}
1471
1472
1473//---
1474// = thread_cancel( lane_ud [,timeout_secs=0.0] [,force_kill_bool=false] )
1475//
1476// The originator thread asking us specifically to cancel the other thread.
1477//
1478// 'timeout': <0: wait forever, until the lane is finished
1479// 0.0: just signal it to cancel, no time waited
1480// >0: time to wait for the lane to detect cancellation
1481//
1482// 'force_kill': if true, and lane does not detect cancellation within timeout,
1483// it is forcefully killed. Using this with 0.0 timeout means just kill
1484// (unless the lane is already finished).
1485//
1486// Returns: true if the lane was already finished (DONE/ERROR_ST/CANCELLED) or if we
1487// managed to cancel it.
1488// false if the cancellation timed out, or a kill was needed.
1489//
1490LUAG_FUNC( thread_cancel )
1491{
1492 struct s_lane *s= lua_toLane(L,1);
1493 double secs= 0.0;
1494 uint_t force_i=2;
1495 bool_t force, done= TRUE;
1496
1497 if (lua_isnumber(L,2)) {
1498 secs= lua_tonumber(L,2);
1499 force_i++;
1500 } else if (lua_isnil(L,2))
1501 force_i++;
1502
1503 force= lua_toboolean(L,force_i); // FALSE if nothing there
1504
1505 // We can read 's->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN)
1506 //
1507 if (s->status < DONE) {
1508 s->cancel_request= TRUE; // it's now signalled to stop
1509
1510 done= thread_cancel( s, secs, force );
1511 }
1512
1513 lua_pushboolean( L, done );
1514 return 1;
1515}
1516
1517static bool_t thread_cancel( struct s_lane *s, double secs, bool_t force )
1518{
1519 bool_t done=
1520#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) || (defined PTHREAD_TIMEDJOIN)
1521 THREAD_WAIT( &s->thread, secs );
1522#else
1523 THREAD_WAIT( &s->thread, &s->done_signal_, &s->done_lock_, &s->status, secs );
1524#endif
1525
1526 if ((!done) && force) {
1527 // Killing is asynchronous; we _will_ wait for it to be done at
1528 // GC, to make sure the data structure can be released (alternative
1529 // would be use of "cancellation cleanup handlers" that at least
1530 // PThread seems to have).
1531 //
1532 THREAD_KILL( &s->thread );
1533 s->mstatus= KILLED; // mark 'gc' to wait for it
1534 }
1535 return done;
1536}
1537
1538
1539//---
1540// str= thread_status( lane_ud )
1541//
1542// Returns: "pending" not started yet
1543// -> "running" started, doing its work..
1544// <-> "waiting" blocked in a receive()
1545// -> "done" finished, results are there
1546// / "error" finished at an error, error value is there
1547// / "cancelled" execution cancelled by M (state gone)
1548//
1549LUAG_FUNC( thread_status )
1550{
1551 struct s_lane *s= lua_toLane(L,1);
1552 enum e_status st= s->status; // read just once (volatile)
1553 const char *str;
1554
1555 if (s->mstatus == KILLED)
1556 st= CANCELLED;
1557
1558 str= (st==PENDING) ? "pending" :
1559 (st==RUNNING) ? "running" : // like in 'co.status()'
1560 (st==WAITING) ? "waiting" :
1561 (st==DONE) ? "done" :
1562 (st==ERROR_ST) ? "error" :
1563 (st==CANCELLED) ? "cancelled" : NULL;
1564 ASSERT_L(str);
1565
1566 lua_pushstring( L, str );
1567 return 1;
1568}
1569
1570
1571//---
1572// [...] | [nil, err_any, stack_tbl]= thread_join( lane_ud [, wait_secs=-1] )
1573//
1574// timeout: returns nil
1575// done: returns return values (0..N)
1576// error: returns nil + error value + stack table
1577// cancelled: returns nil
1578//
1579LUAG_FUNC( thread_join )
1580{
1581 struct s_lane *s= lua_toLane(L,1);
1582 double wait_secs= luaL_optnumber(L,2,-1.0);
1583 lua_State *L2= s->L;
1584 int ret;
1585
1586 bool_t done=
1587#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) || (defined PTHREAD_TIMEDJOIN)
1588 THREAD_WAIT( &s->thread, wait_secs );
1589#else
1590 THREAD_WAIT( &s->thread, &s->done_signal_, &s->done_lock_, &s->status, wait_secs );
1591#endif
1592 if (!done)
1593 return 0; // timeout: pushes none, leaves 'L2' alive
1594
1595 // Thread is DONE/ERROR_ST/CANCELLED; all ours now
1596
1597 STACK_GROW( L, 1 );
1598
1599 switch( s->status ) {
1600 case DONE: {
1601 uint_t n= lua_gettop(L2); // whole L2 stack
1602 luaG_inter_move( L2,L, n );
1603 ret= n;
1604 } break;
1605
1606 case ERROR_ST:
1607 lua_pushnil(L);
1608 luaG_inter_move( L2,L, 2 ); // error message at [-2], stack trace at [-1]
1609 ret= 3;
1610 break;
1611
1612 case CANCELLED:
1613 ret= 0;
1614 break;
1615
1616 default:
1617 fprintf( stderr, "Status: %d\n", s->status );
1618 ASSERT_L( FALSE ); ret= 0;
1619 }
1620 lua_close(L2);
1621
1622 return ret;
1623}
1624
1625
1626/*---=== Timer support ===---
1627*/
1628
1629/*
1630* Push a timer gateway Linda object; only one deep userdata is
1631* created for this, each lane will get its own proxy.
1632*
1633* Note: this needs to be done on the C side; Lua wouldn't be able
1634* to even see, when we've been initialized for the very first
1635* time (with us, they will be).
1636*/
1637static
1638void push_timer_gateway( lua_State *L ) {
1639
1640 /* No need to lock; 'static' is just fine
1641 */
1642 static DEEP_PRELUDE *p; // = NULL
1643
1644 STACK_CHECK(L)
1645 if (!p) {
1646 // Create the Linda (only on first time)
1647 //
1648 // proxy_ud= deep_userdata( idfunc )
1649 //
1650 lua_pushcfunction( L, luaG_deep_userdata );
1651 lua_pushcfunction( L, LG_linda_id );
1652 lua_call( L, 1 /*args*/, 1 /*retvals*/ );
1653
1654 ASSERT_L( lua_isuserdata(L,-1) );
1655
1656 // Proxy userdata contents is only a 'DEEP_PRELUDE*' pointer
1657 //
1658 p= * (DEEP_PRELUDE**) lua_touserdata( L, -1 );
1659 ASSERT_L(p && p->refcount==1 && p->deep);
1660
1661 // [-1]: proxy for accessing the Linda
1662
1663 } else {
1664 /* Push a proxy based on the deep userdata we stored.
1665 */
1666 luaG_push_proxy( L, LG_linda_id, p );
1667 }
1668 STACK_END(L,1)
1669}
1670
1671/*
1672* secs= now_secs()
1673*
1674* Returns the current time, as seconds (millisecond resolution).
1675*/
1676LUAG_FUNC( now_secs )
1677{
1678 lua_pushnumber( L, now_secs() );
1679 return 1;
1680}
1681
1682/*
1683* wakeup_at_secs= wakeup_conv( date_tbl )
1684*/
1685LUAG_FUNC( wakeup_conv )
1686{
1687 int year, month, day, hour, min, sec, isdst;
1688 struct tm tm= {0};
1689 //
1690 // .year (four digits)
1691 // .month (1..12)
1692 // .day (1..31)
1693 // .hour (0..23)
1694 // .min (0..59)
1695 // .sec (0..61)
1696 // .yday (day of the year)
1697 // .isdst (daylight saving on/off)
1698
1699 STACK_CHECK(L)
1700 lua_getfield( L, 1, "year" ); year= lua_tointeger(L,-1); lua_pop(L,1);
1701 lua_getfield( L, 1, "month" ); month= lua_tointeger(L,-1); lua_pop(L,1);
1702 lua_getfield( L, 1, "day" ); day= lua_tointeger(L,-1); lua_pop(L,1);
1703 lua_getfield( L, 1, "hour" ); hour= lua_tointeger(L,-1); lua_pop(L,1);
1704 lua_getfield( L, 1, "min" ); min= lua_tointeger(L,-1); lua_pop(L,1);
1705 lua_getfield( L, 1, "sec" ); sec= lua_tointeger(L,-1); lua_pop(L,1);
1706
1707 // If Lua table has '.isdst' we trust that. If it does not, we'll let
1708 // 'mktime' decide on whether the time is within DST or not (value -1).
1709 //
1710 lua_getfield( L, 1, "isdst" );
1711 isdst= lua_isboolean(L,-1) ? lua_toboolean(L,-1) : -1;
1712 lua_pop(L,1);
1713 STACK_END(L,0)
1714
1715 tm.tm_year= year-1900;
1716 tm.tm_mon= month-1; // 0..11
1717 tm.tm_mday= day; // 1..31
1718 tm.tm_hour= hour; // 0..23
1719 tm.tm_min= min; // 0..59
1720 tm.tm_sec= sec; // 0..60
1721 tm.tm_isdst= isdst; // 0/1/negative
1722
1723 lua_pushnumber( L, (double) mktime( &tm ) ); // ms=0
1724 return 1;
1725}
1726
1727
1728/*---=== Module linkage ===---
1729*/
1730
1731#define REG_FUNC( name ) \
1732 lua_pushcfunction( L, LG_##name ); \
1733 lua_setglobal( L, #name )
1734
1735#define REG_FUNC2( name, val ) \
1736 lua_pushcfunction( L, val ); \
1737 lua_setglobal( L, #name )
1738
1739#define REG_STR2( name, val ) \
1740 lua_pushstring( L, val ); \
1741 lua_setglobal( L, #name )
1742
1743#define REG_INT2( name, val ) \
1744 lua_pushinteger( L, val ); \
1745 lua_setglobal( L, #name )
1746
1747
1748int
1749#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC)
1750__declspec(dllexport)
1751#endif
1752 luaopen_lanes( lua_State *L ) {
1753 const char *err;
1754 static volatile char been_here; // =0
1755
1756 // One time initializations:
1757 //
1758 if (!been_here) {
1759 been_here= TRUE;
1760
1761#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC)
1762 now_secs(); // initialize 'now_secs()' internal offset
1763#endif
1764
1765#if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU)
1766 chudInitialize();
1767#endif
1768
1769 // Locks for 'tools.c' inc/dec counters
1770 //
1771 MUTEX_INIT( &deep_lock );
1772 MUTEX_INIT( &mtid_lock );
1773
1774 // Serialize calls to 'require' from now on, also in the primary state
1775 //
1776 MUTEX_RECURSIVE_INIT( &require_cs );
1777
1778 serialize_require( L );
1779
1780 // Selfdestruct chain handling
1781 //
1782 MUTEX_INIT( &selfdestruct_cs );
1783 atexit( selfdestruct_atexit );
1784
1785 //---
1786 // Linux needs SCHED_RR to change thread priorities, and that is only
1787 // allowed for sudo'ers. SCHED_OTHER (default) has no priorities.
1788 // SCHED_OTHER threads are always lower priority than SCHED_RR.
1789 //
1790 // ^-- those apply to 2.6 kernel. IF **wishful thinking** these
1791 // constraints will change in the future, non-sudo priorities can
1792 // be enabled also for Linux.
1793 //
1794#ifdef PLATFORM_LINUX
1795 sudo= geteuid()==0; // we are root?
1796
1797 // If lower priorities (-2..-1) are wanted, we need to lift the main
1798 // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below
1799 // the launched threads (even -2).
1800 //
1801 #ifdef LINUX_SCHED_RR
1802 if (sudo) {
1803 struct sched_param sp= {0}; sp.sched_priority= _PRIO_0;
1804 PT_CALL( pthread_setschedparam( pthread_self(), SCHED_RR, &sp) );
1805 }
1806 #endif
1807#endif
1808 err= init_keepers();
1809 if (err)
1810 luaL_error( L, "Unable to initialize: %s", err );
1811 }
1812
1813 // Linda identity function
1814 //
1815 REG_FUNC( linda_id );
1816
1817 // metatable for threads
1818 //
1819 lua_newtable( L );
1820 lua_pushcfunction( L, LG_thread_gc );
1821 lua_setfield( L, -2, "__gc" );
1822
1823 lua_pushcclosure( L, LG_thread_new, 1 ); // metatable as closure param
1824 lua_setglobal( L, "thread_new" );
1825
1826 REG_FUNC( thread_status );
1827 REG_FUNC( thread_join );
1828 REG_FUNC( thread_cancel );
1829
1830 REG_STR2( _version, VERSION );
1831 REG_FUNC( _single );
1832
1833 REG_FUNC2( _deep_userdata, luaG_deep_userdata );
1834
1835 REG_FUNC( now_secs );
1836 REG_FUNC( wakeup_conv );
1837
1838 push_timer_gateway(L);
1839 lua_setglobal( L, "timer_gateway" );
1840
1841 REG_INT2( max_prio, THREAD_PRIO_MAX );
1842
1843 lua_pushlightuserdata( L, CANCEL_ERROR );
1844 lua_setglobal( L, "cancel_error" );
1845
1846 return 0;
1847}
1848
1849