bool task::cancel(bool wait_until_finished, /*out*/ bool *finished /*= nullptr*/) { task_state READY_STATE = TASK_STATE_READY; task *current_tsk = get_current_task(); bool finish = false; bool succ = false; if (current_tsk != this) { if (_state.compare_exchange_strong( READY_STATE, TASK_STATE_CANCELLED, std::memory_order_relaxed)) { succ = true; finish = true; } else { task_state old_state = READY_STATE; if (old_state == TASK_STATE_CANCELLED) { succ = false; // this cancellation fails finish = true; } else if (old_state == TASK_STATE_FINISHED) { succ = false; finish = true; } else if (wait_until_finished) { _wait_for_cancel = true; bool r = wait_on_cancel(); dassert( r, "wait failed, it is only possible when task runs for more than 0x0fffffff ms"); succ = false; finish = true; } else { succ = false; finish = false; } } } else { // task cancel itself // for timer task, we should set _wait_for_cancel flag to // prevent timer task from enqueueing again _wait_for_cancel = true; } if (current_tsk != nullptr) { current_tsk->spec().on_task_cancel_post.execute(current_tsk, this, succ); } if (succ) { spec().on_task_cancelled.execute(this); signal_waiters(); // we call clear_callback only cancelling succeed. // otherwise, task will successfully exececuted and clear_callback will be called // in "exec_internal". clear_non_trivial_on_task_end(); } if (finished) *finished = finish; return succ; }
// // return - whether the task has completed (not necessarily cancelled though) // bool task::cancel(bool wait_until_finished, /*out*/ bool* cancel_success /*= nullptr*/) { task_state READY_STATE = TASK_STATE_READY; task *current_tsk = task::get_current_task(); bool ret = true; bool succ = false; if (current_tsk == this) { /*dwarn( "task %s (id=%016llx) cannot cancel itself", spec().name, id() );*/ if (cancel_success) *cancel_success = false; return false; } if (_state.compare_exchange_strong(READY_STATE, TASK_STATE_CANCELLED)) { succ = true; } else { task_state old_state = _state.load(); if ((old_state == TASK_STATE_CANCELLED) || (old_state == TASK_STATE_FINISHED)) { } else if (wait_until_finished) { _wait_for_cancel = true; bool r = wait(TIME_MS_MAX, true); dassert(r, "wait failed, it is only possible when task runs for more than 0x0fffffff ms"); } else { ret = false; } } if (current_tsk != nullptr) { current_tsk->spec().on_task_cancel_post.execute(current_tsk, this, succ); } if (succ) { spec().on_task_cancelled.execute(this); signal_waiters(); } if (cancel_success) *cancel_success = succ; return ret; }
void task::exec_internal() { task_state READY_STATE = TASK_STATE_READY; task_state RUNNING_STATE = TASK_STATE_RUNNING; bool notify_if_necessary = true; if (_state.compare_exchange_strong( READY_STATE, TASK_STATE_RUNNING, std::memory_order_relaxed)) { dassert(tls_dsn.magic == 0xdeadbeef, "thread is not inited with task::set_tls_dsn_context"); task *parent_task = tls_dsn.current_task; tls_dsn.current_task = this; _spec->on_task_begin.execute(this); exec(); // after exec(), one shot tasks are still in "running". // other tasks may call "set_retry" to reset tasks to "ready", // like timers and rpc_response_tasks if (_state.compare_exchange_strong(RUNNING_STATE, TASK_STATE_FINISHED, std::memory_order_release, std::memory_order_relaxed)) { _spec->on_task_end.execute(this); clear_non_trivial_on_task_end(); } else { if (!_wait_for_cancel) { // for retried tasks such as timer or rpc_response_task notify_if_necessary = false; _spec->on_task_end.execute(this); if (ERR_OK == _error) enqueue(); } else { // for cancelled if (_state.compare_exchange_strong(READY_STATE, TASK_STATE_CANCELLED, std::memory_order_release, std::memory_order_relaxed)) { _spec->on_task_cancelled.execute(this); } // always call on_task_end() _spec->on_task_end.execute(this); // for timer task, we must call reset_callback after cancelled, because we don't // reset callback after exec() clear_non_trivial_on_task_end(); } } tls_dsn.current_task = parent_task; } if (notify_if_necessary) { if (signal_waiters()) { spec().on_task_wait_notified.execute(this); } } if (!_spec->allow_inline && !_is_null) { lock_checker::check_dangling_lock(); } this->release_ref(); // added in enqueue(pool) }
// // return - whether this cancel succeed // bool task::cancel(bool wait_until_finished, /*out*/ bool* finished /*= nullptr*/) { task_state READY_STATE = TASK_STATE_READY; task *current_tsk = task::get_current_task(); bool finish = false; bool succ = false; if (current_tsk == this) { /*dwarn( "task %s (id=%016llx) cannot cancel itself", spec().name(), id() );*/ // make sure timers are cancelled _wait_for_cancel = true; if (finished) *finished = false; return false; } if (_state.compare_exchange_strong(READY_STATE, TASK_STATE_CANCELLED, std::memory_order_relaxed)) { succ = true; finish = true; } else { task_state old_state = READY_STATE; if (old_state == TASK_STATE_CANCELLED) { succ = false; // this cancellation fails finish = true; } else if (old_state == TASK_STATE_FINISHED) { succ = false; finish = true; } else if (wait_until_finished) { _wait_for_cancel = true; bool r = wait(TIME_MS_MAX, true); dassert(r, "wait failed, it is only possible when task runs for more than 0x0fffffff ms"); succ = false; finish = true; } else { succ = false; finish = false; } } if (current_tsk != nullptr) { current_tsk->spec().on_task_cancel_post.execute(current_tsk, this, succ); } if (succ) { // // TODO: pros and cons of executing on_cancel here // or in exec_internal // if (_on_cancel) { _on_cancel(_context); } spec().on_task_cancelled.execute(this); signal_waiters(); } if (finished) *finished = finish; return succ; }