aboutsummaryrefslogtreecommitdiff
path: root/src/lanes.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/lanes.cpp')
-rw-r--r--src/lanes.cpp2142
1 files changed, 2142 insertions, 0 deletions
diff --git a/src/lanes.cpp b/src/lanes.cpp
new file mode 100644
index 0000000..deee90c
--- /dev/null
+++ b/src/lanes.cpp
@@ -0,0 +1,2142 @@
1/*
2 * LANES.C Copyright (c) 2007-08, Asko Kauppi
3 * Copyright (C) 2009-19, Benoit Germain
4 *
5 * Multithreading in Lua.
6 *
7 * History:
8 * See CHANGES
9 *
10 * Platforms (tested internally):
11 * OS X (10.5.7 PowerPC/Intel)
12 * Linux x86 (Ubuntu 8.04)
13 * Win32 (Windows XP Home SP2, Visual C++ 2005/2008 Express)
14 *
15 * Platforms (tested externally):
16 * Win32 (MSYS) by Ross Berteig.
17 *
18 * Platforms (testers appreciated):
19 * Win64 - should work???
20 * Linux x64 - should work
21 * FreeBSD - should work
22 * QNX - porting shouldn't be hard
23 * Sun Solaris - porting shouldn't be hard
24 *
25 * References:
26 * "Porting multithreaded applications from Win32 to Mac OS X":
27 * <http://developer.apple.com/macosx/multithreadedprogramming.html>
28 *
29 * Pthreads:
30 * <http://vergil.chemistry.gatech.edu/resources/programming/threads.html>
31 *
32 * MSDN: <http://msdn2.microsoft.com/en-us/library/ms686679.aspx>
33 *
34 * <http://ridiculousfish.com/blog/archives/2007/02/17/barrier>
35 *
36 * Defines:
37 * -DLINUX_SCHED_RR: all threads are lifted to SCHED_RR category, to
38 * allow negative priorities [-3,-1] be used. Even without this,
39 * using priorities will require 'sudo' privileges on Linux.
40 *
41 * -DUSE_PTHREAD_TIMEDJOIN: use 'pthread_timedjoin_np()' for waiting
42 * for threads with a timeout. This changes the thread cleanup
43 * mechanism slightly (cleans up at the join, not once the thread
44 * has finished). May or may not be a good idea to use it.
45 * Available only in selected operating systems (Linux).
46 *
47 * Bugs:
48 *
49 * To-do:
50 *
51 * Make waiting threads cancellable.
52 * ...
53 */
54
55/*
56===============================================================================
57
58Copyright (C) 2007-10 Asko Kauppi <akauppi@gmail.com>
59 2011-19 Benoit Germain <bnt.germain@gmail.com>
60
61Permission is hereby granted, free of charge, to any person obtaining a copy
62of this software and associated documentation files (the "Software"), to deal
63in the Software without restriction, including without limitation the rights
64to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
65copies of the Software, and to permit persons to whom the Software is
66furnished to do so, subject to the following conditions:
67
68The above copyright notice and this permission notice shall be included in
69all copies or substantial portions of the Software.
70
71THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
72IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
73FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
74AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
75LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
76OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
77THE SOFTWARE.
78
79===============================================================================
80*/
81
82#include <string.h>
83#include <stdio.h>
84#include <stdlib.h>
85#include <ctype.h>
86#include <assert.h>
87
88#include "lanes.h"
89#include "threading.h"
90#include "compat.h"
91#include "tools.h"
92#include "state.h"
93#include "universe.h"
94#include "keeper.h"
95#include "lanes_private.h"
96
97#if !(defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC))
98# include <sys/time.h>
99#endif
100
101/* geteuid() */
102#ifdef PLATFORM_LINUX
103# include <unistd.h>
104# include <sys/types.h>
105#endif
106
107/* Do you want full call stacks, or just the line where the error happened?
108*
109* TBD: The full stack feature does not seem to work (try 'make error').
110*/
111#define ERROR_FULL_STACK 1 // must be either 0 or 1 as we do some index arithmetics with it!
112
113// intern the debug name in the specified lua state so that the pointer remains valid when the lane's state is closed
114static void securize_debug_threadname( lua_State* L, Lane* s)
115{
116 STACK_CHECK( L, 0);
117 STACK_GROW( L, 3);
118 lua_getiuservalue( L, 1, 1);
119 lua_newtable( L);
120 // Lua 5.1 can't do 's->debug_name = lua_pushstring( L, s->debug_name);'
121 lua_pushstring( L, s->debug_name);
122 s->debug_name = lua_tostring( L, -1);
123 lua_rawset( L, -3);
124 lua_pop( L, 1);
125 STACK_END( L, 0);
126}
127
128#if ERROR_FULL_STACK
129static int lane_error( lua_State* L);
130// crc64/we of string "STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/
131static DECLARE_CONST_UNIQUE_KEY( STACKTRACE_REGKEY, 0x534af7d3226a429f);
132#endif // ERROR_FULL_STACK
133
134/*
135* registry[FINALIZER_REG_KEY] is either nil (no finalizers) or a table
136* of functions that Lanes will call after the executing 'pcall' has ended.
137*
138* We're NOT using the GC system for finalizer mainly because providing the
139* error (and maybe stack trace) parameters to the finalizer functions would
140* anyways complicate that approach.
141*/
142// crc64/we of string "FINALIZER_REGKEY" generated at http://www.nitrxgen.net/hashgen/
143static DECLARE_CONST_UNIQUE_KEY( FINALIZER_REGKEY, 0x188fccb8bf348e09);
144
145struct s_Linda;
146
147/*
148* Push a table stored in registry onto Lua stack.
149*
150* If there is no existing table, create one if 'create' is TRUE.
151*
152* Returns: TRUE if a table was pushed
153* FALSE if no table found, not created, and nothing pushed
154*/
155static bool_t push_registry_table( lua_State* L, UniqueKey key, bool_t create)
156{
157 STACK_GROW( L, 3);
158 STACK_CHECK( L, 0);
159
160 REGISTRY_GET( L, key); // ?
161 if( lua_isnil( L, -1)) // nil?
162 {
163 lua_pop( L, 1); //
164
165 if( !create)
166 {
167 return FALSE;
168 }
169
170 lua_newtable( L); // t
171 REGISTRY_SET( L, key, lua_pushvalue( L, -2));
172 }
173 STACK_END( L, 1);
174 return TRUE; // table pushed
175}
176
177#if HAVE_LANE_TRACKING()
178
179// The chain is ended by '(Lane*)(-1)', not NULL:
180// 'tracking_first -> ... -> ... -> (-1)'
181#define TRACKING_END ((Lane *)(-1))
182
183/*
184 * Add the lane to tracking chain; the ones still running at the end of the
185 * whole process will be cancelled.
186 */
187static void tracking_add( Lane* s)
188{
189
190 MUTEX_LOCK( &s->U->tracking_cs);
191 {
192 assert( s->tracking_next == NULL);
193
194 s->tracking_next = s->U->tracking_first;
195 s->U->tracking_first = s;
196 }
197 MUTEX_UNLOCK( &s->U->tracking_cs);
198}
199
200/*
201 * A free-running lane has ended; remove it from tracking chain
202 */
203static bool_t tracking_remove( Lane* s)
204{
205 bool_t found = FALSE;
206 MUTEX_LOCK( &s->U->tracking_cs);
207 {
208 // Make sure (within the MUTEX) that we actually are in the chain
209 // still (at process exit they will remove us from chain and then
210 // cancel/kill).
211 //
212 if( s->tracking_next != NULL)
213 {
214 Lane** ref = (Lane**) &s->U->tracking_first;
215
216 while( *ref != TRACKING_END)
217 {
218 if( *ref == s)
219 {
220 *ref = s->tracking_next;
221 s->tracking_next = NULL;
222 found = TRUE;
223 break;
224 }
225 ref = (Lane**) &((*ref)->tracking_next);
226 }
227 assert( found);
228 }
229 }
230 MUTEX_UNLOCK( &s->U->tracking_cs);
231 return found;
232}
233
234#endif // HAVE_LANE_TRACKING()
235
236//---
237// low-level cleanup
238
239static void lane_cleanup( Lane* s)
240{
241 // Clean up after a (finished) thread
242 //
243#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
244 SIGNAL_FREE( &s->done_signal);
245 MUTEX_FREE( &s->done_lock);
246#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
247
248#if HAVE_LANE_TRACKING()
249 if( s->U->tracking_first != NULL)
250 {
251 // Lane was cleaned up, no need to handle at process termination
252 tracking_remove( s);
253 }
254#endif // HAVE_LANE_TRACKING()
255
256 {
257 AllocatorDefinition* const allocD = &s->U->internal_allocator;
258 (void) allocD->allocF(allocD->allocUD, s, sizeof(Lane), 0);
259 }
260}
261
262/*
263 * ###############################################################################################
264 * ########################################## Finalizer ##########################################
265 * ###############################################################################################
266 */
267
268//---
269// void= finalizer( finalizer_func )
270//
271// finalizer_func( [err, stack_tbl] )
272//
273// Add a function that will be called when exiting the lane, either via
274// normal return or an error.
275//
276LUAG_FUNC( set_finalizer)
277{
278 luaL_argcheck( L, lua_isfunction( L, 1), 1, "finalizer should be a function");
279 luaL_argcheck( L, lua_gettop( L) == 1, 1, "too many arguments");
280 // Get the current finalizer table (if any)
281 push_registry_table( L, FINALIZER_REGKEY, TRUE /*do create if none*/); // finalizer {finalisers}
282 STACK_GROW( L, 2);
283 lua_pushinteger( L, lua_rawlen( L, -1) + 1); // finalizer {finalisers} idx
284 lua_pushvalue( L, 1); // finalizer {finalisers} idx finalizer
285 lua_rawset( L, -3); // finalizer {finalisers}
286 lua_pop( L, 2); //
287 return 0;
288}
289
290
291//---
292// Run finalizers - if any - with the given parameters
293//
294// If 'rc' is nonzero, error message and stack index (the latter only when ERROR_FULL_STACK == 1) are available as:
295// [-1]: stack trace (table)
296// [-2]: error message (any type)
297//
298// Returns:
299// 0 if finalizers were run without error (or there were none)
300// LUA_ERRxxx return code if any of the finalizers failed
301//
302// TBD: should we add stack trace on failing finalizer, wouldn't be hard..
303//
304static void push_stack_trace( lua_State* L, int rc_, int stk_base_);
305
306static int run_finalizers( lua_State* L, int lua_rc)
307{
308 int finalizers_index;
309 int n;
310 int err_handler_index = 0;
311 int rc = LUA_OK; // ...
312 if( !push_registry_table( L, FINALIZER_REGKEY, FALSE)) // ... finalizers?
313 {
314 return 0; // no finalizers
315 }
316
317 STACK_GROW( L, 5);
318
319 finalizers_index = lua_gettop( L);
320
321#if ERROR_FULL_STACK
322 lua_pushcfunction( L, lane_error); // ... finalizers lane_error
323 err_handler_index = lua_gettop( L);
324#endif // ERROR_FULL_STACK
325
326 for( n = (int) lua_rawlen( L, finalizers_index); n > 0; -- n)
327 {
328 int args = 0;
329 lua_pushinteger( L, n); // ... finalizers lane_error n
330 lua_rawget( L, finalizers_index); // ... finalizers lane_error finalizer
331 ASSERT_L( lua_isfunction( L, -1));
332 if( lua_rc != LUA_OK) // we have an error message and an optional stack trace at the bottom of the stack
333 {
334 ASSERT_L( finalizers_index == 2 || finalizers_index == 3);
335 //char const* err_msg = lua_tostring( L, 1);
336 lua_pushvalue( L, 1); // ... finalizers lane_error finalizer err_msg
337 // note we don't always have a stack trace for example when CANCEL_ERROR, or when we got an error that doesn't call our handler, such as LUA_ERRMEM
338 if( finalizers_index == 3)
339 {
340 lua_pushvalue( L, 2); // ... finalizers lane_error finalizer err_msg stack_trace
341 }
342 args = finalizers_index - 1;
343 }
344
345 // if no error from the main body, finalizer doesn't receive any argument, else it gets the error message and optional stack trace
346 rc = lua_pcall( L, args, 0, err_handler_index); // ... finalizers lane_error err_msg2?
347 if( rc != LUA_OK)
348 {
349 push_stack_trace( L, rc, lua_gettop( L));
350 // If one finalizer fails, don't run the others. Return this
351 // as the 'real' error, replacing what we could have had (or not)
352 // from the actual code.
353 break;
354 }
355 // no error, proceed to next finalizer // ... finalizers lane_error
356 }
357
358 if( rc != LUA_OK)
359 {
360 // ERROR_FULL_STACK accounts for the presence of lane_error on the stack
361 int nb_err_slots = lua_gettop( L) - finalizers_index - ERROR_FULL_STACK;
362 // a finalizer generated an error, this is what we leave of the stack
363 for( n = nb_err_slots; n > 0; -- n)
364 {
365 lua_replace( L, n);
366 }
367 // leave on the stack only the error and optional stack trace produced by the error in the finalizer
368 lua_settop( L, nb_err_slots);
369 }
370 else // no error from the finalizers, make sure only the original return values from the lane body remain on the stack
371 {
372 lua_settop( L, finalizers_index - 1);
373 }
374
375 return rc;
376}
377
378/*
379 * ###############################################################################################
380 * ########################################### Threads ###########################################
381 * ###############################################################################################
382 */
383
384//
385// Protects modifying the selfdestruct chain
386
387#define SELFDESTRUCT_END ((Lane*)(-1))
388//
389// The chain is ended by '(Lane*)(-1)', not NULL:
390// 'selfdestruct_first -> ... -> ... -> (-1)'
391
392/*
393 * Add the lane to selfdestruct chain; the ones still running at the end of the
394 * whole process will be cancelled.
395 */
396static void selfdestruct_add( Lane* s)
397{
398 MUTEX_LOCK( &s->U->selfdestruct_cs);
399 assert( s->selfdestruct_next == NULL);
400
401 s->selfdestruct_next = s->U->selfdestruct_first;
402 s->U->selfdestruct_first= s;
403 MUTEX_UNLOCK( &s->U->selfdestruct_cs);
404}
405
406/*
407 * A free-running lane has ended; remove it from selfdestruct chain
408 */
409static bool_t selfdestruct_remove( Lane* s)
410{
411 bool_t found = FALSE;
412 MUTEX_LOCK( &s->U->selfdestruct_cs);
413 {
414 // Make sure (within the MUTEX) that we actually are in the chain
415 // still (at process exit they will remove us from chain and then
416 // cancel/kill).
417 //
418 if( s->selfdestruct_next != NULL)
419 {
420 Lane** ref = (Lane**) &s->U->selfdestruct_first;
421
422 while( *ref != SELFDESTRUCT_END )
423 {
424 if( *ref == s)
425 {
426 *ref = s->selfdestruct_next;
427 s->selfdestruct_next = NULL;
428 // the terminal shutdown should wait until the lane is done with its lua_close()
429 ++ s->U->selfdestructing_count;
430 found = TRUE;
431 break;
432 }
433 ref = (Lane**) &((*ref)->selfdestruct_next);
434 }
435 assert( found);
436 }
437 }
438 MUTEX_UNLOCK( &s->U->selfdestruct_cs);
439 return found;
440}
441
442/*
443* Process end; cancel any still free-running threads
444*/
445static int selfdestruct_gc( lua_State* L)
446{
447 Universe* U = (Universe*) lua_touserdata( L, 1);
448
449 while( U->selfdestruct_first != SELFDESTRUCT_END) // true at most once!
450 {
451 // Signal _all_ still running threads to exit (including the timer thread)
452 //
453 MUTEX_LOCK( &U->selfdestruct_cs);
454 {
455 Lane* s = U->selfdestruct_first;
456 while( s != SELFDESTRUCT_END)
457 {
458 // attempt a regular unforced hard cancel with a small timeout
459 bool_t cancelled = THREAD_ISNULL( s->thread) || thread_cancel( L, s, CO_Hard, 0.0001, FALSE, 0.0);
460 // if we failed, and we know the thread is waiting on a linda
461 if( cancelled == FALSE && s->status == WAITING && s->waiting_on != NULL)
462 {
463 // signal the linda to wake up the thread so that it can react to the cancel query
464 // let us hope we never land here with a pointer on a linda that has been destroyed...
465 SIGNAL_T* waiting_on = s->waiting_on;
466 //s->waiting_on = NULL; // useful, or not?
467 SIGNAL_ALL( waiting_on);
468 }
469 s = s->selfdestruct_next;
470 }
471 }
472 MUTEX_UNLOCK( &U->selfdestruct_cs);
473
474 // When noticing their cancel, the lanes will remove themselves from
475 // the selfdestruct chain.
476
477 // TBD: Not sure if Windows (multi core) will require the timed approach,
478 // or single Yield. I don't have machine to test that (so leaving
479 // for timed approach). -- AKa 25-Oct-2008
480
481 // OS X 10.5 (Intel) needs more to avoid segfaults.
482 //
483 // "make test" is okay. 100's of "make require" are okay.
484 //
485 // Tested on MacBook Core Duo 2GHz and 10.5.5:
486 // -- AKa 25-Oct-2008
487 //
488 {
489 lua_Number const shutdown_timeout = lua_tonumber( L, lua_upvalueindex( 1));
490 double const t_until = now_secs() + shutdown_timeout;
491
492 while( U->selfdestruct_first != SELFDESTRUCT_END)
493 {
494 YIELD(); // give threads time to act on their cancel
495 {
496 // count the number of cancelled thread that didn't have the time to act yet
497 int n = 0;
498 double t_now = 0.0;
499 MUTEX_LOCK( &U->selfdestruct_cs);
500 {
501 Lane* s = U->selfdestruct_first;
502 while( s != SELFDESTRUCT_END)
503 {
504 if( s->cancel_request == CANCEL_HARD)
505 ++ n;
506 s = s->selfdestruct_next;
507 }
508 }
509 MUTEX_UNLOCK( &U->selfdestruct_cs);
510 // if timeout elapsed, or we know all threads have acted, stop waiting
511 t_now = now_secs();
512 if( n == 0 || (t_now >= t_until))
513 {
514 DEBUGSPEW_CODE( fprintf( stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now)));
515 break;
516 }
517 }
518 }
519 }
520
521 // If some lanes are currently cleaning after themselves, wait until they are done.
522 // They are no longer listed in the selfdestruct chain, but they still have to lua_close().
523 while( U->selfdestructing_count > 0)
524 {
525 YIELD();
526 }
527
528 //---
529 // Kill the still free running threads
530 //
531 if( U->selfdestruct_first != SELFDESTRUCT_END)
532 {
533 unsigned int n = 0;
534 // first thing we did was to raise the linda signals the threads were waiting on (if any)
535 // therefore, any well-behaved thread should be in CANCELLED state
536 // these are not running, and the state can be closed
537 MUTEX_LOCK( &U->selfdestruct_cs);
538 {
539 Lane* s = U->selfdestruct_first;
540 while( s != SELFDESTRUCT_END)
541 {
542 Lane* next_s = s->selfdestruct_next;
543 s->selfdestruct_next = NULL; // detach from selfdestruct chain
544 if( !THREAD_ISNULL( s->thread)) // can be NULL if previous 'soft' termination succeeded
545 {
546 THREAD_KILL( &s->thread);
547#if THREADAPI == THREADAPI_PTHREAD
548 // pthread: make sure the thread is really stopped!
549 THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status);
550#endif // THREADAPI == THREADAPI_PTHREAD
551 }
552 // NO lua_close() in this case because we don't know where execution of the state was interrupted
553 lane_cleanup( s);
554 s = next_s;
555 ++ n;
556 }
557 U->selfdestruct_first = SELFDESTRUCT_END;
558 }
559 MUTEX_UNLOCK( &U->selfdestruct_cs);
560
561 DEBUGSPEW_CODE( fprintf( stderr, "Killed %d lane(s) at process end.\n", n));
562 }
563 }
564
565 // If some lanes are currently cleaning after themselves, wait until they are done.
566 // They are no longer listed in the selfdestruct chain, but they still have to lua_close().
567 while( U->selfdestructing_count > 0)
568 {
569 YIELD();
570 }
571
572 // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1
573 lua_settop( L, 0);
574 // no need to mutex-protect this as all threads in the universe are gone at that point
575 if( U->timer_deep != NULL) // test ins case some early internal error prevented Lanes from creating the deep timer
576 {
577 -- U->timer_deep->refcount; // should be 0 now
578 free_deep_prelude( L, (DeepPrelude*) U->timer_deep);
579 U->timer_deep = NULL;
580 }
581
582 close_keepers( U);
583
584 // remove the protected allocator, if any
585 cleanup_allocator_function( U, L);
586
587#if HAVE_LANE_TRACKING()
588 MUTEX_FREE( &U->tracking_cs);
589#endif // HAVE_LANE_TRACKING()
590 // Linked chains handling
591 MUTEX_FREE( &U->selfdestruct_cs);
592 MUTEX_FREE( &U->require_cs);
593 // Locks for 'tools.c' inc/dec counters
594 MUTEX_FREE( &U->deep_lock);
595 MUTEX_FREE( &U->mtid_lock);
596 // universe is no longer available (nor necessary)
597 // we need to do this in case some deep userdata objects were created before Lanes was initialized,
598 // as potentially they will be garbage collected after Lanes at application shutdown
599 universe_store( L, NULL);
600 return 0;
601}
602
603
604//---
605// = _single( [cores_uint=1] )
606//
607// Limits the process to use only 'cores' CPU cores. To be used for performance
608// testing on multicore devices. DEBUGGING ONLY!
609//
610LUAG_FUNC( set_singlethreaded)
611{
612 uint_t cores = luaG_optunsigned( L, 1, 1);
613 (void) cores; // prevent "unused" warning
614
615#ifdef PLATFORM_OSX
616#ifdef _UTILBINDTHREADTOCPU
617 if( cores > 1)
618 {
619 return luaL_error( L, "Limiting to N>1 cores not possible");
620 }
621 // requires 'chudInitialize()'
622 utilBindThreadToCPU(0); // # of CPU to run on (we cannot limit to 2..N CPUs?)
623 return 0;
624#else
625 return luaL_error( L, "Not available: compile with _UTILBINDTHREADTOCPU");
626#endif
627#else
628 return luaL_error( L, "not implemented");
629#endif
630}
631
632
633/*
634* str= lane_error( error_val|str )
635*
636* Called if there's an error in some lane; add call stack to error message
637* just like 'lua.c' normally does.
638*
639* ".. will be called with the error message and its return value will be the
640* message returned on the stack by lua_pcall."
641*
642* Note: Rather than modifying the error message itself, it would be better
643* to provide the call stack (as string) completely separated. This would
644* work great with non-string error values as well (current system does not).
645* (This is NOT possible with the Lua 5.1 'lua_pcall()'; we could of course
646* implement a Lanes-specific 'pcall' of our own that does this). TBD!!! :)
647* --AKa 22-Jan-2009
648*/
649#if ERROR_FULL_STACK
650
651// crc64/we of string "EXTENDED_STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/
652static DECLARE_CONST_UNIQUE_KEY( EXTENDED_STACKTRACE_REGKEY, 0x2357c69a7c92c936); // used as registry key
653
654LUAG_FUNC( set_error_reporting)
655{
656 bool_t equal;
657 luaL_checktype( L, 1, LUA_TSTRING);
658 lua_pushliteral( L, "extended");
659 equal = lua_rawequal( L, -1, 1);
660 lua_pop( L, 1);
661 if( equal)
662 {
663 goto done;
664 }
665 lua_pushliteral( L, "basic");
666 equal = !lua_rawequal( L, -1, 1);
667 lua_pop( L, 1);
668 if( equal)
669 {
670 return luaL_error( L, "unsupported error reporting model");
671 }
672done:
673 REGISTRY_SET( L, EXTENDED_STACKTRACE_REGKEY, lua_pushboolean( L, equal));
674 return 0;
675}
676
677static int lane_error( lua_State* L)
678{
679 lua_Debug ar;
680 int n;
681 bool_t extended;
682
683 // error message (any type)
684 STACK_CHECK_ABS( L, 1); // some_error
685
686 // Don't do stack survey for cancelled lanes.
687 //
688 if( equal_unique_key( L, 1, CANCEL_ERROR))
689 {
690 return 1; // just pass on
691 }
692
693 STACK_GROW( L, 3);
694 REGISTRY_GET( L, EXTENDED_STACKTRACE_REGKEY); // some_error basic|extended
695 extended = lua_toboolean( L, -1);
696 lua_pop( L, 1); // some_error
697
698 // Place stack trace at 'registry[lane_error]' for the 'lua_pcall()'
699 // caller to fetch. This bypasses the Lua 5.1 limitation of only one
700 // return value from error handler to 'lua_pcall()' caller.
701
702 // It's adequate to push stack trace as a table. This gives the receiver
703 // of the stack best means to format it to their liking. Also, it allows
704 // us to add more stack info later, if needed.
705 //
706 // table of { "sourcefile.lua:<line>", ... }
707 //
708 lua_newtable( L); // some_error {}
709
710 // Best to start from level 1, but in some cases it might be a C function
711 // and we don't get '.currentline' for that. It's okay - just keep level
712 // and table index growing separate. --AKa 22-Jan-2009
713 //
714 for( n = 1; lua_getstack( L, n, &ar); ++ n)
715 {
716 lua_getinfo( L, extended ? "Sln" : "Sl", &ar);
717 if( extended)
718 {
719 lua_newtable( L); // some_error {} {}
720
721 lua_pushstring( L, ar.source); // some_error {} {} source
722 lua_setfield( L, -2, "source"); // some_error {} {}
723
724 lua_pushinteger( L, ar.currentline); // some_error {} {} currentline
725 lua_setfield( L, -2, "currentline"); // some_error {} {}
726
727 lua_pushstring( L, ar.name); // some_error {} {} name
728 lua_setfield( L, -2, "name"); // some_error {} {}
729
730 lua_pushstring( L, ar.namewhat); // some_error {} {} namewhat
731 lua_setfield( L, -2, "namewhat"); // some_error {} {}
732
733 lua_pushstring( L, ar.what); // some_error {} {} what
734 lua_setfield( L, -2, "what"); // some_error {} {}
735 }
736 else if( ar.currentline > 0)
737 {
738 lua_pushfstring( L, "%s:%d", ar.short_src, ar.currentline); // some_error {} "blah:blah"
739 }
740 else
741 {
742 lua_pushfstring( L, "%s:?", ar.short_src); // some_error {} "blah"
743 }
744 lua_rawseti( L, -2, (lua_Integer) n); // some_error {}
745 }
746
747 REGISTRY_SET( L, STACKTRACE_REGKEY, lua_insert( L, -2)); // some_error
748
749 STACK_END( L, 1);
750 return 1; // the untouched error value
751}
752#endif // ERROR_FULL_STACK
753
754static void push_stack_trace( lua_State* L, int rc_, int stk_base_)
755{
756 // Lua 5.1 error handler is limited to one return value; it stored the stack trace in the registry
757 switch( rc_)
758 {
759 case LUA_OK: // no error, body return values are on the stack
760 break;
761
762 case LUA_ERRRUN: // cancellation or a runtime error
763#if ERROR_FULL_STACK // when ERROR_FULL_STACK, we installed a handler
764 {
765 STACK_CHECK( L, 0);
766 // fetch the call stack table from the registry where the handler stored it
767 STACK_GROW( L, 1);
768 // yields nil if no stack was generated (in case of cancellation for example)
769 REGISTRY_GET( L, STACKTRACE_REGKEY); // err trace|nil
770 STACK_END( L, 1);
771
772 // For cancellation the error message is CANCEL_ERROR, and a stack trace isn't placed
773 // For other errors, the message can be whatever was thrown, and we should have a stack trace table
774 ASSERT_L( lua_type( L, 1 + stk_base_) == (equal_unique_key( L, stk_base_, CANCEL_ERROR) ? LUA_TNIL : LUA_TTABLE));
775 // Just leaving the stack trace table on the stack is enough to get it through to the master.
776 break;
777 }
778#endif // fall through if not ERROR_FULL_STACK
779
780 case LUA_ERRMEM: // memory allocation error (handler not called)
781 case LUA_ERRERR: // error while running the error handler (if any, for example an out-of-memory condition)
782 default:
783 // we should have a single value which is either a string (the error message) or CANCEL_ERROR
784 ASSERT_L( (lua_gettop( L) == stk_base_) && ((lua_type( L, stk_base_) == LUA_TSTRING) || equal_unique_key( L, stk_base_, CANCEL_ERROR)));
785 break;
786 }
787}
788
789LUAG_FUNC( set_debug_threadname)
790{
791 DECLARE_CONST_UNIQUE_KEY( hidden_regkey, LG_set_debug_threadname);
792 // C s_lane structure is a light userdata upvalue
793 Lane* s = (Lane*) lua_touserdata( L, lua_upvalueindex( 1));
794 luaL_checktype( L, -1, LUA_TSTRING); // "name"
795 lua_settop( L, 1);
796 STACK_CHECK_ABS( L, 1);
797 // store a hidden reference in the registry to make sure the string is kept around even if a lane decides to manually change the "decoda_name" global...
798 REGISTRY_SET( L, hidden_regkey, lua_pushvalue( L, -2));
799 STACK_MID( L, 1);
800 s->debug_name = lua_tostring( L, -1);
801 // keep a direct pointer on the string
802 THREAD_SETNAME( s->debug_name);
803 // to see VM name in Decoda debugger Virtual Machine window
804 lua_setglobal( L, "decoda_name"); //
805 STACK_END( L, 0);
806 return 0;
807}
808
809LUAG_FUNC( get_debug_threadname)
810{
811 Lane* const s = lua_toLane( L, 1);
812 luaL_argcheck( L, lua_gettop( L) == 1, 2, "too many arguments");
813 lua_pushstring( L, s->debug_name);
814 return 1;
815}
816
817LUAG_FUNC( set_thread_priority)
818{
819 int const prio = (int) luaL_checkinteger( L, 1);
820 // public Lanes API accepts a generic range -3/+3
821 // that will be remapped into the platform-specific scheduler priority scheme
822 // On some platforms, -3 is equivalent to -2 and +3 to +2
823 if( prio < THREAD_PRIO_MIN || prio > THREAD_PRIO_MAX)
824 {
825 return luaL_error( L, "priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio);
826 }
827 THREAD_SET_PRIORITY( prio);
828 return 0;
829}
830
831LUAG_FUNC( set_thread_affinity)
832{
833 lua_Integer affinity = luaL_checkinteger( L, 1);
834 if( affinity <= 0)
835 {
836 return luaL_error( L, "invalid affinity (%d)", affinity);
837 }
838 THREAD_SET_AFFINITY( (unsigned int) affinity);
839 return 0;
840}
841
842#if USE_DEBUG_SPEW()
843// can't use direct LUA_x errcode indexing because the sequence is not the same between Lua 5.1 and 5.2 :-(
844// LUA_ERRERR doesn't have the same value
845struct errcode_name
846{
847 int code;
848 char const* name;
849};
850
851static struct errcode_name s_errcodes[] =
852{
853 { LUA_OK, "LUA_OK"},
854 { LUA_YIELD, "LUA_YIELD"},
855 { LUA_ERRRUN, "LUA_ERRRUN"},
856 { LUA_ERRSYNTAX, "LUA_ERRSYNTAX"},
857 { LUA_ERRMEM, "LUA_ERRMEM"},
858 { LUA_ERRGCMM, "LUA_ERRGCMM"},
859 { LUA_ERRERR, "LUA_ERRERR"},
860};
861static char const* get_errcode_name( int _code)
862{
863 int i;
864 for( i = 0; i < 7; ++ i)
865 {
866 if( s_errcodes[i].code == _code)
867 {
868 return s_errcodes[i].name;
869 }
870 }
871 return "<NULL>";
872}
873#endif // USE_DEBUG_SPEW()
874
875#if THREADWAIT_METHOD == THREADWAIT_CONDVAR // implies THREADAPI == THREADAPI_PTHREAD
876static void thread_cleanup_handler( void* opaque)
877{
878 Lane* s= (Lane*) opaque;
879 MUTEX_LOCK( &s->done_lock);
880 s->status = CANCELLED;
881 SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on)
882 MUTEX_UNLOCK( &s->done_lock);
883}
884#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
885
886static THREAD_RETURN_T THREAD_CALLCONV lane_main( void* vs)
887{
888 Lane* s = (Lane*) vs;
889 int rc, rc2;
890 lua_State* L = s->L;
891 // Called with the lane function and arguments on the stack
892 int const nargs = lua_gettop( L) - 1;
893 DEBUGSPEW_CODE( Universe* U = universe_get( L));
894 THREAD_MAKE_ASYNCH_CANCELLABLE();
895 THREAD_CLEANUP_PUSH( thread_cleanup_handler, s);
896 s->status = RUNNING; // PENDING -> RUNNING
897
898 // Tie "set_finalizer()" to the state
899 lua_pushcfunction( L, LG_set_finalizer);
900 populate_func_lookup_table( L, -1, "set_finalizer");
901 lua_setglobal( L, "set_finalizer");
902
903 // Tie "set_debug_threadname()" to the state
904 // But don't register it in the lookup database because of the s_lane pointer upvalue
905 lua_pushlightuserdata( L, s);
906 lua_pushcclosure( L, LG_set_debug_threadname, 1);
907 lua_setglobal( L, "set_debug_threadname");
908
909 // Tie "cancel_test()" to the state
910 lua_pushcfunction( L, LG_cancel_test);
911 populate_func_lookup_table( L, -1, "cancel_test");
912 lua_setglobal( L, "cancel_test");
913
914 // this could be done in lane_new before the lane body function is pushed on the stack to avoid unnecessary stack slot shifting around
915#if ERROR_FULL_STACK
916 // Tie "set_error_reporting()" to the state
917 lua_pushcfunction( L, LG_set_error_reporting);
918 populate_func_lookup_table( L, -1, "set_error_reporting");
919 lua_setglobal( L, "set_error_reporting");
920
921 STACK_GROW( L, 1);
922 lua_pushcfunction( L, lane_error); // func args handler
923 lua_insert( L, 1); // handler func args
924#endif // ERROR_FULL_STACK
925
926 rc = lua_pcall( L, nargs, LUA_MULTRET, ERROR_FULL_STACK); // retvals|err
927
928#if ERROR_FULL_STACK
929 lua_remove( L, 1); // retvals|error
930# endif // ERROR_FULL_STACK
931
932 // in case of error and if it exists, fetch stack trace from registry and push it
933 push_stack_trace( L, rc, 1); // retvals|error [trace]
934
935 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p body: %s (%s)\n" INDENT_END, L, get_errcode_name( rc), equal_unique_key( L, 1, CANCEL_ERROR) ? "cancelled" : lua_typename( L, lua_type( L, 1))));
936 //STACK_DUMP(L);
937 // Call finalizers, if the script has set them up.
938 //
939 rc2 = run_finalizers( L, rc);
940 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p finalizer: %s\n" INDENT_END, L, get_errcode_name( rc2)));
941 if( rc2 != LUA_OK) // Error within a finalizer!
942 {
943 // the finalizer generated an error, and left its own error message [and stack trace] on the stack
944 rc = rc2; // we're overruling the earlier script error or normal return
945 }
946 s->waiting_on = NULL; // just in case
947 if( selfdestruct_remove( s)) // check and remove (under lock!)
948 {
949 // We're a free-running thread and no-one's there to clean us up.
950 //
951 lua_close( s->L);
952
953 MUTEX_LOCK( &s->U->selfdestruct_cs);
954 // done with lua_close(), terminal shutdown sequence may proceed
955 -- s->U->selfdestructing_count;
956 MUTEX_UNLOCK( &s->U->selfdestruct_cs);
957
958 lane_cleanup( s); // s is freed at this point
959 }
960 else
961 {
962 // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them
963
964 enum e_status st = (rc == 0) ? DONE : equal_unique_key( L, 1, CANCEL_ERROR) ? CANCELLED : ERROR_ST;
965
966 // Posix no PTHREAD_TIMEDJOIN:
967 // 'done_lock' protects the -> DONE|ERROR_ST|CANCELLED state change
968 //
969#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
970 MUTEX_LOCK( &s->done_lock);
971 {
972#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
973 s->status = st;
974#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
975 SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on)
976 }
977 MUTEX_UNLOCK( &s->done_lock);
978#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
979 }
980 THREAD_CLEANUP_POP( FALSE);
981 return 0; // ignored
982}
983
984// --- If a client wants to transfer stuff of a given module from the current state to another Lane, the module must be required
985// with lanes.require, that will call the regular 'require', then populate the lookup database in the source lane
986// module = lanes.require( "modname")
987// upvalue[1]: _G.require
988LUAG_FUNC( require)
989{
990 char const* name = lua_tostring( L, 1);
991 int const nargs = lua_gettop( L);
992 DEBUGSPEW_CODE( Universe* U = universe_get( L));
993 STACK_CHECK( L, 0);
994 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name));
995 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
996 lua_pushvalue( L, lua_upvalueindex(1)); // "name" require
997 lua_insert( L, 1); // require "name"
998 lua_call( L, nargs, 1); // module
999 populate_func_lookup_table( L, -1, name);
1000 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name));
1001 DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
1002 STACK_END( L, 0);
1003 return 1;
1004}
1005
1006
1007// --- If a client wants to transfer stuff of a previously required module from the current state to another Lane, the module must be registered
1008// to populate the lookup database in the source lane (and in the destination too, of course)
1009// lanes.register( "modname", module)
1010LUAG_FUNC( register)
1011{
1012 char const* name = luaL_checkstring( L, 1);
1013 int const mod_type = lua_type( L, 2);
1014 // ignore extra parameters, just in case
1015 lua_settop( L, 2);
1016 luaL_argcheck( L, (mod_type == LUA_TTABLE) || (mod_type == LUA_TFUNCTION), 2, "unexpected module type");
1017 DEBUGSPEW_CODE( Universe* U = universe_get( L));
1018 STACK_CHECK( L, 0); // "name" mod_table
1019 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.register %s BEGIN\n" INDENT_END, name));
1020 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
1021 populate_func_lookup_table( L, -1, name);
1022 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.register %s END\n" INDENT_END, name));
1023 DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
1024 STACK_END( L, 0);
1025 return 0;
1026}
1027
1028// crc64/we of string "GCCB_KEY" generated at http://www.nitrxgen.net/hashgen/
1029static DECLARE_CONST_UNIQUE_KEY( GCCB_KEY, 0xcfb1f046ef074e88);
1030
1031//---
1032// lane_ud = lane_new( function
1033// , [libs_str]
1034// , [priority_int=0]
1035// , [globals_tbl]
1036// , [package_tbl]
1037// , [required_tbl]
1038// , [gc_cb_func]
1039// [, ... args ...])
1040//
1041// Upvalues: metatable to use for 'lane_ud'
1042//
1043LUAG_FUNC( lane_new)
1044{
1045 lua_State* L2;
1046 Lane* s;
1047 Lane** ud;
1048
1049 char const* libs_str = lua_tostring( L, 2);
1050 bool_t const have_priority = !lua_isnoneornil( L, 3);
1051 int const priority = have_priority ? (int) lua_tointeger( L, 3) : THREAD_PRIO_DEFAULT;
1052 uint_t const globals_idx = lua_isnoneornil( L, 4) ? 0 : 4;
1053 uint_t const package_idx = lua_isnoneornil( L, 5) ? 0 : 5;
1054 uint_t const required_idx = lua_isnoneornil( L, 6) ? 0 : 6;
1055 uint_t const gc_cb_idx = lua_isnoneornil( L, 7) ? 0 : 7;
1056
1057#define FIXED_ARGS 7
1058 int const nargs = lua_gettop(L) - FIXED_ARGS;
1059 Universe* const U = universe_get( L);
1060 ASSERT_L( nargs >= 0);
1061
1062 // public Lanes API accepts a generic range -3/+3
1063 // that will be remapped into the platform-specific scheduler priority scheme
1064 // On some platforms, -3 is equivalent to -2 and +3 to +2
1065 if( have_priority && (priority < THREAD_PRIO_MIN || priority > THREAD_PRIO_MAX))
1066 {
1067 return luaL_error( L, "Priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, priority);
1068 }
1069
1070 /* --- Create and prepare the sub state --- */
1071 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: setup\n" INDENT_END));
1072 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
1073
1074 // populate with selected libraries at the same time
1075 L2 = luaG_newstate( U, L, libs_str); // L // L2
1076
1077 STACK_GROW( L2, nargs + 3); //
1078 STACK_CHECK( L2, 0);
1079
1080 STACK_GROW( L, 3); // func libs priority globals package required gc_cb [... args ...]
1081 STACK_CHECK( L, 0);
1082
1083 // give a default "Lua" name to the thread to see VM name in Decoda debugger
1084 lua_pushfstring( L2, "Lane #%p", L2); // "..."
1085 lua_setglobal( L2, "decoda_name"); //
1086 ASSERT_L( lua_gettop( L2) == 0);
1087
1088 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: update 'package'\n" INDENT_END));
1089 // package
1090 if( package_idx != 0)
1091 {
1092 // when copying with mode eLM_LaneBody, should raise an error in case of problem, not leave it one the stack
1093 (void) luaG_inter_copy_package( U, L, L2, package_idx, eLM_LaneBody);
1094 }
1095
1096 // modules to require in the target lane *before* the function is transfered!
1097
1098 if( required_idx != 0)
1099 {
1100 int nbRequired = 1;
1101 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require 'required' list\n" INDENT_END));
1102 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
1103 // should not happen, was checked in lanes.lua before calling lane_new()
1104 if( lua_type( L, required_idx) != LUA_TTABLE)
1105 {
1106 return luaL_error( L, "expected required module list as a table, got %s", luaL_typename( L, required_idx));
1107 }
1108
1109 lua_pushnil( L); // func libs priority globals package required gc_cb [... args ...] nil
1110 while( lua_next( L, required_idx) != 0) // func libs priority globals package required gc_cb [... args ...] n "modname"
1111 {
1112 if( lua_type( L, -1) != LUA_TSTRING || lua_type( L, -2) != LUA_TNUMBER || lua_tonumber( L, -2) != nbRequired)
1113 {
1114 return luaL_error( L, "required module list should be a list of strings");
1115 }
1116 else
1117 {
1118 // require the module in the target state, and populate the lookup table there too
1119 size_t len;
1120 char const* name = lua_tolstring( L, -1, &len);
1121 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require '%s'\n" INDENT_END, name));
1122
1123 // require the module in the target lane
1124 lua_getglobal( L2, "require"); // require()?
1125 if( lua_isnil( L2, -1))
1126 {
1127 lua_pop( L2, 1); //
1128 luaL_error( L, "cannot pre-require modules without loading 'package' library first");
1129 }
1130 else
1131 {
1132 lua_pushlstring( L2, name, len); // require() name
1133 if( lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode
1134 {
1135 // propagate error to main state if any
1136 luaG_inter_move( U, L2, L, 1, eLM_LaneBody); // func libs priority globals package required gc_cb [... args ...] n "modname" error
1137 return lua_error( L);
1138 }
1139 // after requiring the module, register the functions it exported in our name<->function database
1140 populate_func_lookup_table( L2, -1, name);
1141 lua_pop( L2, 1); //
1142 }
1143 }
1144 lua_pop( L, 1); // func libs priority globals package required gc_cb [... args ...] n
1145 ++ nbRequired;
1146 } // func libs priority globals package required gc_cb [... args ...]
1147 DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
1148 }
1149 STACK_MID( L, 0);
1150 STACK_MID( L2, 0); //
1151
1152 // Appending the specified globals to the global environment
1153 // *after* stdlibs have been loaded and modules required, in case we transfer references to native functions they exposed...
1154 //
1155 if( globals_idx != 0)
1156 {
1157 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer globals\n" INDENT_END));
1158 if( !lua_istable( L, globals_idx))
1159 {
1160 return luaL_error( L, "Expected table, got %s", luaL_typename( L, globals_idx));
1161 }
1162
1163 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
1164 lua_pushnil( L); // func libs priority globals package required gc_cb [... args ...] nil
1165 // Lua 5.2 wants us to push the globals table on the stack
1166 lua_pushglobaltable( L2); // _G
1167 while( lua_next( L, globals_idx)) // func libs priority globals package required gc_cb [... args ...] k v
1168 {
1169 luaG_inter_copy( U, L, L2, 2, eLM_LaneBody); // _G k v
1170 // assign it in L2's globals table
1171 lua_rawset( L2, -3); // _G
1172 lua_pop( L, 1); // func libs priority globals package required gc_cb [... args ...] k
1173 } // func libs priority globals package required gc_cb [... args ...]
1174 lua_pop( L2, 1); //
1175
1176 DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
1177 }
1178 STACK_MID( L, 0);
1179 STACK_MID( L2, 0);
1180
1181 // Lane main function
1182 if( lua_type( L, 1) == LUA_TFUNCTION)
1183 {
1184 int res;
1185 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane body\n" INDENT_END));
1186 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
1187 lua_pushvalue( L, 1); // func libs priority globals package required gc_cb [... args ...] func
1188 res = luaG_inter_move( U, L, L2, 1, eLM_LaneBody); // func libs priority globals package required gc_cb [... args ...] // func
1189 DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
1190 if( res != 0)
1191 {
1192 return luaL_error( L, "tried to copy unsupported types");
1193 }
1194 }
1195 else if( lua_type( L, 1) == LUA_TSTRING)
1196 {
1197 // compile the string
1198 if( luaL_loadstring( L2, lua_tostring( L, 1)) != 0) // func
1199 {
1200 return luaL_error( L, "error when parsing lane function code");
1201 }
1202 }
1203 STACK_MID( L, 0);
1204 STACK_MID( L2, 1);
1205 ASSERT_L( lua_isfunction( L2, 1));
1206
1207 // revive arguments
1208 if( nargs > 0)
1209 {
1210 int res;
1211 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane arguments\n" INDENT_END));
1212 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
1213 res = luaG_inter_move( U, L, L2, nargs, eLM_LaneBody); // func libs priority globals package required gc_cb // func [... args ...]
1214 DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
1215 if( res != 0)
1216 {
1217 return luaL_error( L, "tried to copy unsupported types");
1218 }
1219 }
1220 STACK_END( L, -nargs);
1221 ASSERT_L( lua_gettop( L) == FIXED_ARGS);
1222 STACK_CHECK( L, 0);
1223 STACK_MID( L2, 1 + nargs);
1224
1225 // 's' is allocated from heap, not Lua, since its life span may surpass the handle's (if free running thread)
1226 //
1227 // a Lane full userdata needs a single uservalue
1228 ud = (Lane**) lua_newuserdatauv( L, sizeof( Lane*), 1); // func libs priority globals package required gc_cb lane
1229 {
1230 AllocatorDefinition* const allocD = &U->internal_allocator;
1231 s = *ud = (Lane*) allocD->allocF(allocD->allocUD, NULL, 0, sizeof(Lane));
1232 }
1233 if( s == NULL)
1234 {
1235 return luaL_error( L, "could not create lane: out of memory");
1236 }
1237
1238 s->L = L2;
1239 s->U = U;
1240 s->status = PENDING;
1241 s->waiting_on = NULL;
1242 s->debug_name = "<unnamed>";
1243 s->cancel_request = CANCEL_NONE;
1244
1245#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
1246 MUTEX_INIT( &s->done_lock);
1247 SIGNAL_INIT( &s->done_signal);
1248#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
1249 s->mstatus = NORMAL;
1250 s->selfdestruct_next = NULL;
1251#if HAVE_LANE_TRACKING()
1252 s->tracking_next = NULL;
1253 if( s->U->tracking_first)
1254 {
1255 tracking_add( s);
1256 }
1257#endif // HAVE_LANE_TRACKING()
1258
1259 // Set metatable for the userdata
1260 //
1261 lua_pushvalue( L, lua_upvalueindex( 1)); // func libs priority globals package required gc_cb lane mt
1262 lua_setmetatable( L, -2); // func libs priority globals package required gc_cb lane
1263 STACK_MID( L, 1);
1264
1265 // Create uservalue for the userdata
1266 // (this is where lane body return values will be stored when the handle is indexed by a numeric key)
1267 lua_newtable( L); // func libs cancelstep priority globals package required gc_cb lane uv
1268
1269 // Store the gc_cb callback in the uservalue
1270 if( gc_cb_idx > 0)
1271 {
1272 push_unique_key( L, GCCB_KEY); // func libs priority globals package required gc_cb lane uv k
1273 lua_pushvalue( L, gc_cb_idx); // func libs priority globals package required gc_cb lane uv k gc_cb
1274 lua_rawset( L, -3); // func libs priority globals package required gc_cb lane uv
1275 }
1276
1277 lua_setiuservalue( L, -2, 1); // func libs priority globals package required gc_cb lane
1278
1279 // Store 's' in the lane's registry, for 'cancel_test()' (we do cancel tests at pending send/receive).
1280 REGISTRY_SET( L2, CANCEL_TEST_KEY, lua_pushlightuserdata( L2, s)); // func [... args ...]
1281
1282 STACK_END( L, 1);
1283 STACK_END( L2, 1 + nargs);
1284
1285 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END));
1286 THREAD_CREATE( &s->thread, lane_main, s, priority);
1287
1288 DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
1289 return 1;
1290}
1291
1292
1293//---
1294// = thread_gc( lane_ud )
1295//
1296// Cleanup for a thread userdata. If the thread is still executing, leave it
1297// alive as a free-running thread (will clean up itself).
1298//
1299// * Why NOT cancel/kill a loose thread:
1300//
1301// At least timer system uses a free-running thread, they should be handy
1302// and the issue of canceling/killing threads at gc is not very nice, either
1303// (would easily cause waits at gc cycle, which we don't want).
1304//
1305LUAG_FUNC( thread_gc)
1306{
1307 bool_t have_gc_cb = FALSE;
1308 Lane* s = lua_toLane( L, 1); // ud
1309
1310 // if there a gc callback?
1311 lua_getiuservalue( L, 1, 1); // ud uservalue
1312 push_unique_key( L, GCCB_KEY); // ud uservalue __gc
1313 lua_rawget( L, -2); // ud uservalue gc_cb|nil
1314 if( !lua_isnil( L, -1))
1315 {
1316 lua_remove( L, -2); // ud gc_cb|nil
1317 lua_pushstring( L, s->debug_name); // ud gc_cb name
1318 have_gc_cb = TRUE;
1319 }
1320 else
1321 {
1322 lua_pop( L, 2); // ud
1323 }
1324
1325 // We can read 's->status' without locks, but not wait for it
1326 // test KILLED state first, as it doesn't need to enter the selfdestruct chain
1327 if( s->mstatus == KILLED)
1328 {
1329 // Make sure a kill has proceeded, before cleaning up the data structure.
1330 //
1331 // NO lua_close() in this case because we don't know where execution of the state was interrupted
1332 DEBUGSPEW_CODE( fprintf( stderr, "** Joining with a killed thread (needs testing) **"));
1333 // make sure the thread is no longer running, just like thread_join()
1334 if(! THREAD_ISNULL( s->thread))
1335 {
1336 THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status);
1337 }
1338 if( s->status >= DONE && s->L)
1339 {
1340 // we know the thread was killed while the Lua VM was not doing anything: we should be able to close it without crashing
1341 // now, thread_cancel() will not forcefully kill a lane with s->status >= DONE, so I am not sure it can ever happen
1342 lua_close( s->L);
1343 s->L = 0;
1344 // just in case, but s will be freed soon so...
1345 s->debug_name = "<gc>";
1346 }
1347 DEBUGSPEW_CODE( fprintf( stderr, "** Joined ok **"));
1348 }
1349 else if( s->status < DONE)
1350 {
1351 // still running: will have to be cleaned up later
1352 selfdestruct_add( s);
1353 assert( s->selfdestruct_next);
1354 if( have_gc_cb)
1355 {
1356 lua_pushliteral( L, "selfdestruct"); // ud gc_cb name status
1357 lua_call( L, 2, 0); // ud
1358 }
1359 return 0;
1360 }
1361 else if( s->L)
1362 {
1363 // no longer accessing the Lua VM: we can close right now
1364 lua_close( s->L);
1365 s->L = 0;
1366 // just in case, but s will be freed soon so...
1367 s->debug_name = "<gc>";
1368 }
1369
1370 // Clean up after a (finished) thread
1371 lane_cleanup( s);
1372
1373 // do this after lane cleanup in case the callback triggers an error
1374 if( have_gc_cb)
1375 {
1376 lua_pushliteral( L, "closed"); // ud gc_cb name status
1377 lua_call( L, 2, 0); // ud
1378 }
1379 return 0;
1380}
1381
1382//---
1383// str= thread_status( lane )
1384//
1385// Returns: "pending" not started yet
1386// -> "running" started, doing its work..
1387// <-> "waiting" blocked in a receive()
1388// -> "done" finished, results are there
1389// / "error" finished at an error, error value is there
1390// / "cancelled" execution cancelled by M (state gone)
1391//
1392static char const * thread_status_string( Lane* s)
1393{
1394 enum e_status st = s->status; // read just once (volatile)
1395 char const* str =
1396 (s->mstatus == KILLED) ? "killed" : // new to v3.3.0!
1397 (st == PENDING) ? "pending" :
1398 (st == RUNNING) ? "running" : // like in 'co.status()'
1399 (st == WAITING) ? "waiting" :
1400 (st == DONE) ? "done" :
1401 (st == ERROR_ST) ? "error" :
1402 (st == CANCELLED) ? "cancelled" : NULL;
1403 return str;
1404}
1405
1406int push_thread_status( lua_State* L, Lane* s)
1407{
1408 char const* const str = thread_status_string( s);
1409 ASSERT_L( str);
1410
1411 lua_pushstring( L, str);
1412 return 1;
1413}
1414
1415
1416//---
1417// [...] | [nil, err_any, stack_tbl]= thread_join( lane_ud [, wait_secs=-1] )
1418//
1419// timeout: returns nil
1420// done: returns return values (0..N)
1421// error: returns nil + error value [+ stack table]
1422// cancelled: returns nil
1423//
1424LUAG_FUNC( thread_join)
1425{
1426 Lane* const s = lua_toLane( L, 1);
1427 double wait_secs = luaL_optnumber( L, 2, -1.0);
1428 lua_State* L2 = s->L;
1429 int ret;
1430 bool_t done = THREAD_ISNULL( s->thread) || THREAD_WAIT( &s->thread, wait_secs, &s->done_signal, &s->done_lock, &s->status);
1431 if( !done || !L2)
1432 {
1433 STACK_GROW( L, 2);
1434 lua_pushnil( L);
1435 lua_pushliteral( L, "timeout");
1436 return 2;
1437 }
1438
1439 STACK_CHECK( L, 0);
1440 // Thread is DONE/ERROR_ST/CANCELLED; all ours now
1441
1442 if( s->mstatus == KILLED) // OS thread was killed if thread_cancel was forced
1443 {
1444 // in that case, even if the thread was killed while DONE/ERROR_ST/CANCELLED, ignore regular return values
1445 STACK_GROW( L, 2);
1446 lua_pushnil( L);
1447 lua_pushliteral( L, "killed");
1448 ret = 2;
1449 }
1450 else
1451 {
1452 Universe* U = universe_get( L);
1453 // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed
1454 // so store it in the userdata uservalue at a key that can't possibly collide
1455 securize_debug_threadname( L, s);
1456 switch( s->status)
1457 {
1458 case DONE:
1459 {
1460 uint_t n = lua_gettop( L2); // whole L2 stack
1461 if( (n > 0) && (luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0))
1462 {
1463 return luaL_error( L, "tried to copy unsupported types");
1464 }
1465 ret = n;
1466 }
1467 break;
1468
1469 case ERROR_ST:
1470 {
1471 int const n = lua_gettop( L2);
1472 STACK_GROW( L, 3);
1473 lua_pushnil( L);
1474 // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ...
1475 if( luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0) // nil "err" [trace]
1476 {
1477 return luaL_error( L, "tried to copy unsupported types: %s", lua_tostring( L, -n));
1478 }
1479 ret = 1 + n;
1480 }
1481 break;
1482
1483 case CANCELLED:
1484 ret = 0;
1485 break;
1486
1487 default:
1488 DEBUGSPEW_CODE( fprintf( stderr, "Status: %d\n", s->status));
1489 ASSERT_L( FALSE);
1490 ret = 0;
1491 }
1492 lua_close( L2);
1493 }
1494 s->L = 0;
1495 STACK_END( L, ret);
1496 return ret;
1497}
1498
1499
1500//---
1501// thread_index( ud, key) -> value
1502//
1503// If key is found in the environment, return it
1504// If key is numeric, wait until the thread returns and populate the environment with the return values
1505// If the return values signal an error, propagate it
1506// If key is "status" return the thread status
1507// Else raise an error
1508LUAG_FUNC( thread_index)
1509{
1510 int const UD = 1;
1511 int const KEY = 2;
1512 int const USR = 3;
1513 Lane* const s = lua_toLane( L, UD);
1514 ASSERT_L( lua_gettop( L) == 2);
1515
1516 STACK_GROW( L, 8); // up to 8 positions are needed in case of error propagation
1517
1518 // If key is numeric, wait until the thread returns and populate the environment with the return values
1519 if( lua_type( L, KEY) == LUA_TNUMBER)
1520 {
1521 // first, check that we don't already have an environment that holds the requested value
1522 {
1523 // If key is found in the uservalue, return it
1524 lua_getiuservalue( L, UD, 1);
1525 lua_pushvalue( L, KEY);
1526 lua_rawget( L, USR);
1527 if( !lua_isnil( L, -1))
1528 {
1529 return 1;
1530 }
1531 lua_pop( L, 1);
1532 }
1533 {
1534 // check if we already fetched the values from the thread or not
1535 bool_t fetched;
1536 lua_Integer key = lua_tointeger( L, KEY);
1537 lua_pushinteger( L, 0);
1538 lua_rawget( L, USR);
1539 fetched = !lua_isnil( L, -1);
1540 lua_pop( L, 1); // back to our 2 args + uservalue on the stack
1541 if( !fetched)
1542 {
1543 lua_pushinteger( L, 0);
1544 lua_pushboolean( L, 1);
1545 lua_rawset( L, USR);
1546 // wait until thread has completed
1547 lua_pushcfunction( L, LG_thread_join);
1548 lua_pushvalue( L, UD);
1549 lua_call( L, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+
1550 switch( s->status)
1551 {
1552 default:
1553 if( s->mstatus != KILLED)
1554 {
1555 // this is an internal error, we probably never get here
1556 lua_settop( L, 0);
1557 lua_pushliteral( L, "Unexpected status: ");
1558 lua_pushstring( L, thread_status_string( s));
1559 lua_concat( L, 2);
1560 lua_error( L);
1561 break;
1562 }
1563 // fall through if we are killed, as we got nil, "killed" on the stack
1564
1565 case DONE: // got regular return values
1566 {
1567 int i, nvalues = lua_gettop( L) - 3;
1568 for( i = nvalues; i > 0; -- i)
1569 {
1570 // pop the last element of the stack, to store it in the uservalue at its proper index
1571 lua_rawseti( L, USR, i);
1572 }
1573 }
1574 break;
1575
1576 case ERROR_ST: // got 3 values: nil, errstring, callstack table
1577 // me[-2] could carry the stack table, but even
1578 // me[-1] is rather unnecessary (and undocumented);
1579 // use ':join()' instead. --AKa 22-Jan-2009
1580 ASSERT_L( lua_isnil( L, 4) && !lua_isnil( L, 5) && lua_istable( L, 6));
1581 // store errstring at key -1
1582 lua_pushnumber( L, -1);
1583 lua_pushvalue( L, 5);
1584 lua_rawset( L, USR);
1585 break;
1586
1587 case CANCELLED:
1588 // do nothing
1589 break;
1590 }
1591 }
1592 lua_settop( L, 3); // UD KEY ENV
1593 if( key != -1)
1594 {
1595 lua_pushnumber( L, -1); // UD KEY ENV -1
1596 lua_rawget( L, USR); // UD KEY ENV "error"
1597 if( !lua_isnil( L, -1)) // an error was stored
1598 {
1599 // Note: Lua 5.1 interpreter is not prepared to show
1600 // non-string errors, so we use 'tostring()' here
1601 // to get meaningful output. --AKa 22-Jan-2009
1602 //
1603 // Also, the stack dump we get is no good; it only
1604 // lists our internal Lanes functions. There seems
1605 // to be no way to switch it off, though.
1606 //
1607 // Level 3 should show the line where 'h[x]' was read
1608 // but this only seems to work for string messages
1609 // (Lua 5.1.4). No idea, why. --AKa 22-Jan-2009
1610 lua_getmetatable( L, UD); // UD KEY ENV "error" mt
1611 lua_getfield( L, -1, "cached_error"); // UD KEY ENV "error" mt error()
1612 lua_getfield( L, -2, "cached_tostring"); // UD KEY ENV "error" mt error() tostring()
1613 lua_pushvalue( L, 4); // UD KEY ENV "error" mt error() tostring() "error"
1614 lua_call( L, 1, 1); // tostring( errstring) -- just in case // UD KEY ENV "error" mt error() "error"
1615 lua_pushinteger( L, 3); // UD KEY ENV "error" mt error() "error" 3
1616 lua_call( L, 2, 0); // error( tostring( errstring), 3) // UD KEY ENV "error" mt
1617 }
1618 else
1619 {
1620 lua_pop( L, 1); // back to our 3 arguments on the stack
1621 }
1622 }
1623 lua_rawgeti( L, USR, (int)key);
1624 }
1625 return 1;
1626 }
1627 if( lua_type( L, KEY) == LUA_TSTRING)
1628 {
1629 char const * const keystr = lua_tostring( L, KEY);
1630 lua_settop( L, 2); // keep only our original arguments on the stack
1631 if( strcmp( keystr, "status") == 0)
1632 {
1633 return push_thread_status( L, s); // push the string representing the status
1634 }
1635 // return UD.metatable[key]
1636 lua_getmetatable( L, UD); // UD KEY mt
1637 lua_replace( L, -3); // mt KEY
1638 lua_rawget( L, -2); // mt value
1639 // only "cancel" and "join" are registered as functions, any other string will raise an error
1640 if( lua_iscfunction( L, -1))
1641 {
1642 return 1;
1643 }
1644 return luaL_error( L, "can't index a lane with '%s'", keystr);
1645 }
1646 // unknown key
1647 lua_getmetatable( L, UD);
1648 lua_getfield( L, -1, "cached_error");
1649 lua_pushliteral( L, "Unknown key: ");
1650 lua_pushvalue( L, KEY);
1651 lua_concat( L, 2);
1652 lua_call( L, 1, 0); // error( "Unknown key: " .. key) -> doesn't return
1653 return 0;
1654}
1655
1656#if HAVE_LANE_TRACKING()
1657//---
1658// threads() -> {}|nil
1659//
1660// Return a list of all known lanes
1661LUAG_FUNC( threads)
1662{
1663 int const top = lua_gettop( L);
1664 Universe* U = universe_get( L);
1665
1666 // List _all_ still running threads
1667 //
1668 MUTEX_LOCK( &U->tracking_cs);
1669 if( U->tracking_first && U->tracking_first != TRACKING_END)
1670 {
1671 Lane* s = U->tracking_first;
1672 int index = 0;
1673 lua_newtable( L); // {}
1674 while( s != TRACKING_END)
1675 {
1676 // insert a { name, status } tuple, so that several lanes with the same name can't clobber each other
1677 lua_newtable( L); // {} {}
1678 lua_pushstring( L, s->debug_name); // {} {} "name"
1679 lua_setfield( L, -2, "name"); // {} {}
1680 push_thread_status( L, s); // {} {} "status"
1681 lua_setfield( L, -2, "status"); // {} {}
1682 lua_rawseti( L, -2, ++ index); // {}
1683 s = s->tracking_next;
1684 }
1685 }
1686 MUTEX_UNLOCK( &U->tracking_cs);
1687 return lua_gettop( L) - top; // 0 or 1
1688}
1689#endif // HAVE_LANE_TRACKING()
1690
1691/*
1692 * ###############################################################################################
1693 * ######################################## Timer support ########################################
1694 * ###############################################################################################
1695 */
1696
1697/*
1698* secs= now_secs()
1699*
1700* Returns the current time, as seconds (millisecond resolution).
1701*/
1702LUAG_FUNC( now_secs )
1703{
1704 lua_pushnumber( L, now_secs() );
1705 return 1;
1706}
1707
1708/*
1709* wakeup_at_secs= wakeup_conv( date_tbl )
1710*/
1711LUAG_FUNC( wakeup_conv )
1712{
1713 int year, month, day, hour, min, sec, isdst;
1714 struct tm t;
1715 memset( &t, 0, sizeof( t));
1716 //
1717 // .year (four digits)
1718 // .month (1..12)
1719 // .day (1..31)
1720 // .hour (0..23)
1721 // .min (0..59)
1722 // .sec (0..61)
1723 // .yday (day of the year)
1724 // .isdst (daylight saving on/off)
1725
1726 STACK_CHECK( L, 0);
1727 lua_getfield( L, 1, "year" ); year= (int)lua_tointeger(L,-1); lua_pop(L,1);
1728 lua_getfield( L, 1, "month" ); month= (int)lua_tointeger(L,-1); lua_pop(L,1);
1729 lua_getfield( L, 1, "day" ); day= (int)lua_tointeger(L,-1); lua_pop(L,1);
1730 lua_getfield( L, 1, "hour" ); hour= (int)lua_tointeger(L,-1); lua_pop(L,1);
1731 lua_getfield( L, 1, "min" ); min= (int)lua_tointeger(L,-1); lua_pop(L,1);
1732 lua_getfield( L, 1, "sec" ); sec= (int)lua_tointeger(L,-1); lua_pop(L,1);
1733
1734 // If Lua table has '.isdst' we trust that. If it does not, we'll let
1735 // 'mktime' decide on whether the time is within DST or not (value -1).
1736 //
1737 lua_getfield( L, 1, "isdst" );
1738 isdst= lua_isboolean(L,-1) ? lua_toboolean(L,-1) : -1;
1739 lua_pop(L,1);
1740 STACK_END( L, 0);
1741
1742 t.tm_year= year-1900;
1743 t.tm_mon= month-1; // 0..11
1744 t.tm_mday= day; // 1..31
1745 t.tm_hour= hour; // 0..23
1746 t.tm_min= min; // 0..59
1747 t.tm_sec= sec; // 0..60
1748 t.tm_isdst= isdst; // 0/1/negative
1749
1750 lua_pushnumber( L, (double) mktime( &t)); // ms=0
1751 return 1;
1752}
1753
1754/*
1755 * ###############################################################################################
1756 * ######################################## Module linkage #######################################
1757 * ###############################################################################################
1758 */
1759
1760extern int LG_linda( lua_State* L);
1761static const struct luaL_Reg lanes_functions [] = {
1762 {"linda", LG_linda},
1763 {"now_secs", LG_now_secs},
1764 {"wakeup_conv", LG_wakeup_conv},
1765 {"set_thread_priority", LG_set_thread_priority},
1766 {"set_thread_affinity", LG_set_thread_affinity},
1767 {"nameof", luaG_nameof},
1768 {"register", LG_register},
1769 {"set_singlethreaded", LG_set_singlethreaded},
1770 {NULL, NULL}
1771};
1772
1773/*
1774 * One-time initializations
1775 * settings table it at position 1 on the stack
1776 * pushes an error string on the stack in case of problem
1777 */
1778static void init_once_LOCKED( void)
1779{
1780#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC)
1781 now_secs(); // initialize 'now_secs()' internal offset
1782#endif
1783
1784#if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU)
1785 chudInitialize();
1786#endif
1787
1788 //---
1789 // Linux needs SCHED_RR to change thread priorities, and that is only
1790 // allowed for sudo'ers. SCHED_OTHER (default) has no priorities.
1791 // SCHED_OTHER threads are always lower priority than SCHED_RR.
1792 //
1793 // ^-- those apply to 2.6 kernel. IF **wishful thinking** these
1794 // constraints will change in the future, non-sudo priorities can
1795 // be enabled also for Linux.
1796 //
1797#ifdef PLATFORM_LINUX
1798 sudo = (geteuid() == 0); // we are root?
1799
1800 // If lower priorities (-2..-1) are wanted, we need to lift the main
1801 // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below
1802 // the launched threads (even -2).
1803 //
1804#ifdef LINUX_SCHED_RR
1805 if( sudo)
1806 {
1807 struct sched_param sp;
1808 sp.sched_priority = _PRIO_0;
1809 PT_CALL( pthread_setschedparam( pthread_self(), SCHED_RR, &sp));
1810 }
1811#endif // LINUX_SCHED_RR
1812#endif // PLATFORM_LINUX
1813}
1814
1815static volatile long s_initCount = 0;
1816
1817// upvalue 1: module name
1818// upvalue 2: module table
1819// param 1: settings table
1820LUAG_FUNC( configure)
1821{
1822 Universe* U = universe_get( L);
1823 bool_t const from_master_state = (U == NULL);
1824 char const* name = luaL_checkstring( L, lua_upvalueindex( 1));
1825 _ASSERT_L( L, lua_type( L, 1) == LUA_TTABLE);
1826
1827 /*
1828 ** Making one-time initializations.
1829 **
1830 ** When the host application is single-threaded (and all threading happens via Lanes)
1831 ** there is no problem. But if the host is multithreaded, we need to lock around the
1832 ** initializations.
1833 */
1834#if THREADAPI == THREADAPI_WINDOWS
1835 {
1836 static volatile int /*bool*/ go_ahead; // = 0
1837 if( InterlockedCompareExchange( &s_initCount, 1, 0) == 0)
1838 {
1839 init_once_LOCKED();
1840 go_ahead = 1; // let others pass
1841 }
1842 else
1843 {
1844 while( !go_ahead) { Sleep(1); } // changes threads
1845 }
1846 }
1847#else // THREADAPI == THREADAPI_PTHREAD
1848 if( s_initCount == 0)
1849 {
1850 static pthread_mutex_t my_lock = PTHREAD_MUTEX_INITIALIZER;
1851 pthread_mutex_lock( &my_lock);
1852 {
1853 // Recheck now that we're within the lock
1854 //
1855 if( s_initCount == 0)
1856 {
1857 init_once_LOCKED();
1858 s_initCount = 1;
1859 }
1860 }
1861 pthread_mutex_unlock( &my_lock);
1862 }
1863#endif // THREADAPI == THREADAPI_PTHREAD
1864
1865 STACK_GROW( L, 4);
1866 STACK_CHECK_ABS( L, 1); // settings
1867
1868 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L));
1869 DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth);
1870
1871 if( U == NULL)
1872 {
1873 U = universe_create( L); // settings universe
1874 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
1875 lua_newtable( L); // settings universe mt
1876 lua_getfield( L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout
1877 lua_pushcclosure( L, selfdestruct_gc, 1); // settings universe mt selfdestruct_gc
1878 lua_setfield( L, -2, "__gc"); // settings universe mt
1879 lua_setmetatable( L, -2); // settings universe
1880 lua_pop( L, 1); // settings
1881 lua_getfield( L, 1, "verbose_errors"); // settings verbose_errors
1882 U->verboseErrors = lua_toboolean( L, -1);
1883 lua_pop( L, 1); // settings
1884 lua_getfield( L, 1, "demote_full_userdata"); // settings demote_full_userdata
1885 U->demoteFullUserdata = lua_toboolean( L, -1);
1886 lua_pop( L, 1); // settings
1887#if HAVE_LANE_TRACKING()
1888 MUTEX_INIT( &U->tracking_cs);
1889 lua_getfield( L, 1, "track_lanes"); // settings track_lanes
1890 U->tracking_first = lua_toboolean( L, -1) ? TRACKING_END : NULL;
1891 lua_pop( L, 1); // settings
1892#endif // HAVE_LANE_TRACKING()
1893 // Linked chains handling
1894 MUTEX_INIT( &U->selfdestruct_cs);
1895 MUTEX_RECURSIVE_INIT( &U->require_cs);
1896 // Locks for 'tools.c' inc/dec counters
1897 MUTEX_INIT( &U->deep_lock);
1898 MUTEX_INIT( &U->mtid_lock);
1899 U->selfdestruct_first = SELFDESTRUCT_END;
1900 initialize_allocator_function( U, L);
1901 initialize_on_state_create( U, L);
1902 init_keepers( U, L);
1903 STACK_MID( L, 1);
1904
1905 // Initialize 'timer_deep'; a common Linda object shared by all states
1906 lua_pushcfunction( L, LG_linda); // settings lanes.linda
1907 lua_pushliteral( L, "lanes-timer"); // settings lanes.linda "lanes-timer"
1908 lua_call( L, 1, 1); // settings linda
1909 STACK_MID( L, 2);
1910
1911 // Proxy userdata contents is only a 'DEEP_PRELUDE*' pointer
1912 U->timer_deep = *(DeepPrelude**) lua_touserdata( L, -1);
1913 // increment refcount so that this linda remains alive as long as the universe exists.
1914 ++ U->timer_deep->refcount;
1915 lua_pop( L, 1); // settings
1916 }
1917 STACK_MID( L, 1);
1918
1919 // Serialize calls to 'require' from now on, also in the primary state
1920 serialize_require( DEBUGSPEW_PARAM_COMMA( U) L);
1921
1922 // Retrieve main module interface table
1923 lua_pushvalue( L, lua_upvalueindex( 2)); // settings M
1924 // remove configure() (this function) from the module interface
1925 lua_pushnil( L); // settings M nil
1926 lua_setfield( L, -2, "configure"); // settings M
1927 // add functions to the module's table
1928 luaG_registerlibfuncs( L, lanes_functions);
1929#if HAVE_LANE_TRACKING()
1930 // register core.threads() only if settings say it should be available
1931 if( U->tracking_first != NULL)
1932 {
1933 lua_pushcfunction( L, LG_threads); // settings M LG_threads()
1934 lua_setfield( L, -2, "threads"); // settings M
1935 }
1936#endif // HAVE_LANE_TRACKING()
1937 STACK_MID( L, 2);
1938
1939 {
1940 char const* errmsg;
1941 errmsg = push_deep_proxy( U, L, (DeepPrelude*) U->timer_deep, 0, eLM_LaneBody); // settings M timer_deep
1942 if( errmsg != NULL)
1943 {
1944 return luaL_error( L, errmsg);
1945 }
1946 lua_setfield( L, -2, "timer_gateway"); // settings M
1947 }
1948 STACK_MID( L, 2);
1949
1950 // prepare the metatable for threads
1951 // contains keys: { __gc, __index, cached_error, cached_tostring, cancel, join, get_debug_threadname }
1952 //
1953 if( luaL_newmetatable( L, "Lane")) // settings M mt
1954 {
1955 lua_pushcfunction( L, LG_thread_gc); // settings M mt LG_thread_gc
1956 lua_setfield( L, -2, "__gc"); // settings M mt
1957 lua_pushcfunction( L, LG_thread_index); // settings M mt LG_thread_index
1958 lua_setfield( L, -2, "__index"); // settings M mt
1959 lua_getglobal( L, "error"); // settings M mt error
1960 ASSERT_L( lua_isfunction( L, -1));
1961 lua_setfield( L, -2, "cached_error"); // settings M mt
1962 lua_getglobal( L, "tostring"); // settings M mt tostring
1963 ASSERT_L( lua_isfunction( L, -1));
1964 lua_setfield( L, -2, "cached_tostring"); // settings M mt
1965 lua_pushcfunction( L, LG_thread_join); // settings M mt LG_thread_join
1966 lua_setfield( L, -2, "join"); // settings M mt
1967 lua_pushcfunction( L, LG_get_debug_threadname); // settings M mt LG_get_debug_threadname
1968 lua_setfield( L, -2, "get_debug_threadname"); // settings M mt
1969 lua_pushcfunction( L, LG_thread_cancel); // settings M mt LG_thread_cancel
1970 lua_setfield( L, -2, "cancel"); // settings M mt
1971 lua_pushliteral( L, "Lane"); // settings M mt "Lane"
1972 lua_setfield( L, -2, "__metatable"); // settings M mt
1973 }
1974
1975 lua_pushcclosure( L, LG_lane_new, 1); // settings M lane_new
1976 lua_setfield( L, -2, "lane_new"); // settings M
1977
1978 // we can't register 'lanes.require' normally because we want to create an upvalued closure
1979 lua_getglobal( L, "require"); // settings M require
1980 lua_pushcclosure( L, LG_require, 1); // settings M lanes.require
1981 lua_setfield( L, -2, "require"); // settings M
1982
1983 lua_pushfstring(
1984 L, "%d.%d.%d"
1985 , LANES_VERSION_MAJOR, LANES_VERSION_MINOR, LANES_VERSION_PATCH
1986 ); // settings M VERSION
1987 lua_setfield( L, -2, "version"); // settings M
1988
1989 lua_pushinteger(L, THREAD_PRIO_MAX); // settings M THREAD_PRIO_MAX
1990 lua_setfield( L, -2, "max_prio"); // settings M
1991
1992 push_unique_key( L, CANCEL_ERROR); // settings M CANCEL_ERROR
1993 lua_setfield( L, -2, "cancel_error"); // settings M
1994
1995 STACK_MID( L, 2); // reference stack contains only the function argument 'settings'
1996 // we'll need this every time we transfer some C function from/to this state
1997 REGISTRY_SET( L, LOOKUP_REGKEY, lua_newtable( L));
1998 STACK_MID( L, 2);
1999
2000 // register all native functions found in that module in the transferable functions database
2001 // we process it before _G because we don't want to find the module when scanning _G (this would generate longer names)
2002 // for example in package.loaded["lanes.core"].*
2003 populate_func_lookup_table( L, -1, name);
2004 STACK_MID( L, 2);
2005
2006 // record all existing C/JIT-fast functions
2007 // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack
2008 if( from_master_state)
2009 {
2010 // don't do this when called during the initialization of a new lane,
2011 // because we will do it after on_state_create() is called,
2012 // and we don't want to skip _G because of caching in case globals are created then
2013 lua_pushglobaltable( L); // settings M _G
2014 populate_func_lookup_table( L, -1, NULL);
2015 lua_pop( L, 1); // settings M
2016 }
2017 lua_pop( L, 1); // settings
2018
2019 // set _R[CONFIG_REGKEY] = settings
2020 REGISTRY_SET( L, CONFIG_REGKEY, lua_pushvalue( L, -2)); // -2 because CONFIG_REGKEY is pushed before the value itself
2021 STACK_END( L, 1);
2022 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L));
2023 DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
2024 // Return the settings table
2025 return 1;
2026}
2027
2028#if defined PLATFORM_WIN32 && !defined NDEBUG
2029#include <signal.h>
2030#include <conio.h>
2031
2032void signal_handler( int signal)
2033{
2034 if( signal == SIGABRT)
2035 {
2036 _cprintf( "caught abnormal termination!");
2037 abort();
2038 }
2039}
2040
2041// helper to have correct callstacks when crashing a Win32 running on 64 bits Windows
2042// don't forget to toggle Debug/Exceptions/Win32 in visual Studio too!
2043static volatile long s_ecoc_initCount = 0;
2044static volatile int s_ecoc_go_ahead = 0;
2045static void EnableCrashingOnCrashes( void)
2046{
2047 if( InterlockedCompareExchange( &s_ecoc_initCount, 1, 0) == 0)
2048 {
2049 typedef BOOL (WINAPI* tGetPolicy)( LPDWORD lpFlags);
2050 typedef BOOL (WINAPI* tSetPolicy)( DWORD dwFlags);
2051 const DWORD EXCEPTION_SWALLOWING = 0x1;
2052
2053 HMODULE kernel32 = LoadLibraryA("kernel32.dll");
2054 tGetPolicy pGetPolicy = (tGetPolicy)GetProcAddress(kernel32, "GetProcessUserModeExceptionPolicy");
2055 tSetPolicy pSetPolicy = (tSetPolicy)GetProcAddress(kernel32, "SetProcessUserModeExceptionPolicy");
2056 if( pGetPolicy && pSetPolicy)
2057 {
2058 DWORD dwFlags;
2059 if( pGetPolicy( &dwFlags))
2060 {
2061 // Turn off the filter
2062 pSetPolicy( dwFlags & ~EXCEPTION_SWALLOWING);
2063 }
2064 }
2065 //typedef void (* SignalHandlerPointer)( int);
2066 /*SignalHandlerPointer previousHandler =*/ signal( SIGABRT, signal_handler);
2067
2068 s_ecoc_go_ahead = 1; // let others pass
2069 }
2070 else
2071 {
2072 while( !s_ecoc_go_ahead) { Sleep(1); } // changes threads
2073 }
2074}
2075#endif // PLATFORM_WIN32
2076
2077LANES_API int luaopen_lanes_core( lua_State* L)
2078{
2079#if defined PLATFORM_WIN32 && !defined NDEBUG
2080 EnableCrashingOnCrashes();
2081#endif // defined PLATFORM_WIN32 && !defined NDEBUG
2082
2083 STACK_GROW( L, 4);
2084 STACK_CHECK( L, 0);
2085
2086 // Prevent PUC-Lua/LuaJIT mismatch. Hopefully this works for MoonJIT too
2087 lua_getglobal( L, "jit"); // {jit?}
2088#if LUAJIT_FLAVOR() == 0
2089 if (!lua_isnil( L, -1))
2090 return luaL_error( L, "Lanes is built for PUC-Lua, don't run from LuaJIT");
2091#else
2092 if (lua_isnil( L, -1))
2093 return luaL_error( L, "Lanes is built for LuaJIT, don't run from PUC-Lua");
2094#endif
2095 lua_pop( L, 1); //
2096
2097 // Create main module interface table
2098 // we only have 1 closure, which must be called to configure Lanes
2099 lua_newtable( L); // M
2100 lua_pushvalue( L, 1); // M "lanes.core"
2101 lua_pushvalue( L, -2); // M "lanes.core" M
2102 lua_pushcclosure( L, LG_configure, 2); // M LG_configure()
2103 REGISTRY_GET( L, CONFIG_REGKEY); // M LG_configure() settings
2104 if( !lua_isnil( L, -1)) // this is not the first require "lanes.core": call configure() immediately
2105 {
2106 lua_pushvalue( L, -1); // M LG_configure() settings settings
2107 lua_setfield( L, -4, "settings"); // M LG_configure() settings
2108 lua_call( L, 1, 0); // M
2109 }
2110 else
2111 {
2112 // will do nothing on first invocation, as we haven't stored settings in the registry yet
2113 lua_setfield( L, -3, "settings"); // M LG_configure()
2114 lua_setfield( L, -2, "configure"); // M
2115 }
2116
2117 STACK_END( L, 1);
2118 return 1;
2119}
2120
2121static int default_luaopen_lanes( lua_State* L)
2122{
2123 int rc = luaL_loadfile( L, "lanes.lua") || lua_pcall( L, 0, 1, 0);
2124 if( rc != LUA_OK)
2125 {
2126 return luaL_error( L, "failed to initialize embedded Lanes");
2127 }
2128 return 1;
2129}
2130
2131// call this instead of luaopen_lanes_core() when embedding Lua and Lanes in a custom application
2132LANES_API void luaopen_lanes_embedded( lua_State* L, lua_CFunction _luaopen_lanes)
2133{
2134 STACK_CHECK( L, 0);
2135 // pre-require lanes.core so that when lanes.lua calls require "lanes.core" it finds it is already loaded
2136 luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); // ... lanes.core
2137 lua_pop( L, 1); // ...
2138 STACK_MID( L, 0);
2139 // call user-provided function that runs the chunk "lanes.lua" from wherever they stored it
2140 luaL_requiref( L, "lanes", _luaopen_lanes ? _luaopen_lanes : default_luaopen_lanes, 0); // ... lanes
2141 STACK_END( L, 1);
2142}