diff options
author | Benoit Germain <bnt.germain@gmail.com> | 2019-04-19 14:58:25 +0200 |
---|---|---|
committer | Benoit Germain <bnt.germain@gmail.com> | 2019-04-19 14:58:25 +0200 |
commit | be1e9d37d9809ee55f26d811208fa64ea9b3785a (patch) | |
tree | 1b0122ed7e7f501c64814cdbbf03e5941dbab51c /src | |
parent | 52934d46eed850c23a9b21125be73e987f34e772 (diff) | |
download | lanes-be1e9d37d9809ee55f26d811208fa64ea9b3785a.tar.gz lanes-be1e9d37d9809ee55f26d811208fa64ea9b3785a.tar.bz2 lanes-be1e9d37d9809ee55f26d811208fa64ea9b3785a.zip |
lane:cancel internal code refactorization
Diffstat (limited to 'src')
-rw-r--r-- | src/lanes.c | 151 | ||||
-rw-r--r-- | src/linda.c | 4 |
2 files changed, 74 insertions, 81 deletions
diff --git a/src/lanes.c b/src/lanes.c index 90da9bf..abd4171 100644 --- a/src/lanes.c +++ b/src/lanes.c | |||
@@ -169,20 +169,6 @@ static DECLARE_CONST_UNIQUE_KEY( FINALIZER_REGKEY, 0x188fccb8bf348e09); | |||
169 | 169 | ||
170 | struct s_Linda; | 170 | struct s_Linda; |
171 | 171 | ||
172 | #if 1 | ||
173 | # define DEBUG_SIGNAL( msg, signal_ref ) /* */ | ||
174 | #else | ||
175 | # define DEBUG_SIGNAL( msg, signal_ref ) \ | ||
176 | { int i; unsigned char *ptr; char buf[999]; \ | ||
177 | sprintf( buf, ">>> " msg ": %p\t", (signal_ref) ); \ | ||
178 | ptr= (unsigned char *)signal_ref; \ | ||
179 | for( i=0; i<sizeof(*signal_ref); i++ ) { \ | ||
180 | sprintf( strchr(buf,'\0'), "%02x %c ", ptr[i], ptr[i] ); \ | ||
181 | } \ | ||
182 | fprintf( stderr, "%s\n", buf ); \ | ||
183 | } | ||
184 | #endif | ||
185 | |||
186 | /* | 172 | /* |
187 | * Push a table stored in registry onto Lua stack. | 173 | * Push a table stored in registry onto Lua stack. |
188 | * | 174 | * |
@@ -442,90 +428,97 @@ typedef enum | |||
442 | CR_Killed | 428 | CR_Killed |
443 | } cancel_result; | 429 | } cancel_result; |
444 | 430 | ||
445 | static cancel_result thread_cancel( lua_State* L, Lane* s, double secs, bool_t force, double waitkill_timeout_) | 431 | static cancel_result thread_cancel_soft( lua_State* L, Lane* s, bool_t wake_lindas_) |
446 | { | 432 | { |
447 | cancel_result result; | 433 | s->cancel_request = CANCEL_SOFT; // it's now signaled to stop |
448 | 434 | // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own | |
449 | // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here | 435 | if( wake_lindas_) // wake the thread so that execution returns from any pending linda operation if desired |
450 | // We can read 's->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN) | ||
451 | if( s->mstatus == KILLED) | ||
452 | { | 436 | { |
453 | result = CR_Killed; | 437 | SIGNAL_T *waiting_on = s->waiting_on; |
438 | if( s->status == WAITING && waiting_on != NULL) | ||
439 | { | ||
440 | SIGNAL_ALL( waiting_on); | ||
441 | } | ||
454 | } | 442 | } |
455 | else if( s->status < DONE) | 443 | // say we succeeded though |
444 | return CR_Cancelled; | ||
445 | } | ||
446 | |||
447 | static cancel_result thread_cancel_hard( lua_State* L, Lane* s, double secs_, bool_t force_, double waitkill_timeout_) | ||
448 | { | ||
449 | cancel_result result; | ||
450 | |||
451 | s->cancel_request = CANCEL_HARD; // it's now signaled to stop | ||
456 | { | 452 | { |
457 | // signal the linda the wake up the thread so that it can react to the cancel query | 453 | SIGNAL_T *waiting_on = s->waiting_on; |
458 | // let us hope we never land here with a pointer on a linda that has been destroyed... | 454 | if( s->status == WAITING && waiting_on != NULL) |
459 | if( secs < 0.0) | ||
460 | { | 455 | { |
461 | s->cancel_request = CANCEL_SOFT; // it's now signaled to stop | 456 | SIGNAL_ALL( waiting_on); |
462 | // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own | ||
463 | if( force) // wake the thread so that execution returns from any pending linda operation if desired | ||
464 | { | ||
465 | SIGNAL_T *waiting_on = s->waiting_on; | ||
466 | if( s->status == WAITING && waiting_on != NULL) | ||
467 | { | ||
468 | SIGNAL_ALL( waiting_on); | ||
469 | } | ||
470 | } | ||
471 | // say we succeeded though | ||
472 | result = CR_Cancelled; | ||
473 | } | 457 | } |
474 | else | 458 | } |
475 | { | ||
476 | s->cancel_request = CANCEL_HARD; // it's now signaled to stop | ||
477 | { | ||
478 | SIGNAL_T *waiting_on = s->waiting_on; | ||
479 | if( s->status == WAITING && waiting_on != NULL) | ||
480 | { | ||
481 | SIGNAL_ALL( waiting_on); | ||
482 | } | ||
483 | } | ||
484 | 459 | ||
485 | result = THREAD_WAIT( &s->thread, secs, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout; | 460 | result = THREAD_WAIT( &s->thread, secs_, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout; |
486 | 461 | ||
487 | if( (result == CR_Timeout) && force) | 462 | if( (result == CR_Timeout) && force_) |
488 | { | 463 | { |
489 | // Killing is asynchronous; we _will_ wait for it to be done at | 464 | // Killing is asynchronous; we _will_ wait for it to be done at |
490 | // GC, to make sure the data structure can be released (alternative | 465 | // GC, to make sure the data structure can be released (alternative |
491 | // would be use of "cancellation cleanup handlers" that at least | 466 | // would be use of "cancellation cleanup handlers" that at least |
492 | // PThread seems to have). | 467 | // PThread seems to have). |
493 | // | 468 | // |
494 | THREAD_KILL( &s->thread); | 469 | THREAD_KILL( &s->thread); |
495 | #if THREADAPI == THREADAPI_PTHREAD | 470 | #if THREADAPI == THREADAPI_PTHREAD |
496 | // pthread: make sure the thread is really stopped! | 471 | // pthread: make sure the thread is really stopped! |
497 | // note that this may block forever if the lane doesn't call a cancellation point and pthread doesn't honor PTHREAD_CANCEL_ASYNCHRONOUS | 472 | // note that this may block forever if the lane doesn't call a cancellation point and pthread doesn't honor PTHREAD_CANCEL_ASYNCHRONOUS |
498 | result = THREAD_WAIT( &s->thread, waitkill_timeout_, &s->done_signal, &s->done_lock, &s->status); | 473 | result = THREAD_WAIT( &s->thread, waitkill_timeout_, &s->done_signal, &s->done_lock, &s->status); |
499 | if( result == CR_Timeout) | 474 | if( result == CR_Timeout) |
500 | { | 475 | { |
501 | return luaL_error( L, "force-killed lane failed to terminate within %f second%s", waitkill_timeout_, waitkill_timeout_ > 1 ? "s" : ""); | 476 | return luaL_error( L, "force-killed lane failed to terminate within %f second%s", waitkill_timeout_, waitkill_timeout_ > 1 ? "s" : ""); |
502 | } | 477 | } |
503 | #else | 478 | #else |
504 | (void) waitkill_timeout_; // unused | 479 | (void) waitkill_timeout_; // unused |
505 | (void) L; // unused | 480 | (void) L; // unused |
506 | #endif // THREADAPI == THREADAPI_PTHREAD | 481 | #endif // THREADAPI == THREADAPI_PTHREAD |
507 | s->mstatus = KILLED; // mark 'gc' to wait for it | 482 | s->mstatus = KILLED; // mark 'gc' to wait for it |
508 | // note that s->status value must remain to whatever it was at the time of the kill | 483 | // note that s->status value must remain to whatever it was at the time of the kill |
509 | // because we need to know if we can lua_close() the Lua State or not. | 484 | // because we need to know if we can lua_close() the Lua State or not. |
510 | result = CR_Killed; | 485 | result = CR_Killed; |
511 | } | ||
512 | } | ||
513 | } | 486 | } |
514 | else | 487 | return result; |
488 | } | ||
489 | |||
490 | static cancel_result thread_cancel( lua_State* L, Lane* s, double secs_, bool_t force_, double waitkill_timeout_) | ||
491 | { | ||
492 | // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here | ||
493 | // We can read 's->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN) | ||
494 | if( s->mstatus == KILLED) | ||
495 | { | ||
496 | return CR_Killed; | ||
497 | } | ||
498 | |||
499 | if( s->status >= DONE) | ||
515 | { | 500 | { |
516 | // say "ok" by default, including when lane is already done | 501 | // say "ok" by default, including when lane is already done |
517 | result = CR_Cancelled; | 502 | return CR_Cancelled; |
518 | } | 503 | } |
519 | return result; | 504 | |
505 | // signal the linda the wake up the thread so that it can react to the cancel query | ||
506 | // let us hope we never land here with a pointer on a linda that has been destroyed... | ||
507 | if( secs_ < 0.0) | ||
508 | { | ||
509 | return thread_cancel_soft( L, s, force_); | ||
510 | } | ||
511 | |||
512 | return thread_cancel_hard( L, s, secs_, force_, waitkill_timeout_); | ||
520 | } | 513 | } |
521 | 514 | ||
522 | // | 515 | // |
523 | // Protects modifying the selfdestruct chain | 516 | // Protects modifying the selfdestruct chain |
524 | 517 | ||
525 | #define SELFDESTRUCT_END ((Lane*)(-1)) | 518 | #define SELFDESTRUCT_END ((Lane*)(-1)) |
526 | // | 519 | // |
527 | // The chain is ended by '(Lane*)(-1)', not NULL: | 520 | // The chain is ended by '(Lane*)(-1)', not NULL: |
528 | // 'selfdestruct_first -> ... -> ... -> (-1)' | 521 | // 'selfdestruct_first -> ... -> ... -> (-1)' |
529 | 522 | ||
530 | /* | 523 | /* |
531 | * Add the lane to selfdestruct chain; the ones still running at the end of the | 524 | * Add the lane to selfdestruct chain; the ones still running at the end of the |
diff --git a/src/linda.c b/src/linda.c index 69f41b6..150649d 100644 --- a/src/linda.c +++ b/src/linda.c | |||
@@ -245,7 +245,7 @@ LUAG_FUNC( linda_send) | |||
245 | { | 245 | { |
246 | case CANCEL_SOFT: | 246 | case CANCEL_SOFT: |
247 | // if user wants to soft-cancel, the call returns lanes.cancel_error | 247 | // if user wants to soft-cancel, the call returns lanes.cancel_error |
248 | push_unique_key( L, CANCEL_ERROR); | 248 | push_unique_key( L, CANCEL_ERROR); |
249 | return 1; | 249 | return 1; |
250 | 250 | ||
251 | case CANCEL_HARD: | 251 | case CANCEL_HARD: |
@@ -400,7 +400,7 @@ LUAG_FUNC( linda_receive) | |||
400 | { | 400 | { |
401 | case CANCEL_SOFT: | 401 | case CANCEL_SOFT: |
402 | // if user wants to soft-cancel, the call returns CANCEL_ERROR | 402 | // if user wants to soft-cancel, the call returns CANCEL_ERROR |
403 | push_unique_key( L, CANCEL_ERROR); | 403 | push_unique_key( L, CANCEL_ERROR); |
404 | return 1; | 404 | return 1; |
405 | 405 | ||
406 | case CANCEL_HARD: | 406 | case CANCEL_HARD: |