void lock(volatile lock_t *v) { pid_t mypid = (current_process == NULL) ? -1 : current_process->pid, owner; while ((owner = __sync_val_compare_and_swap(v, 0, mypid)) != 0) { kassert(mypid != owner); yield_to(owner); } }
void action_script::update() { action::update(); auto& timer = global_timer_base::instance(); if (!_yielded || _yielded->done()) { yield_to(std::get<1>(_coroutine.resume<ptr>(timer.current_time(), timer.recent_delta()))); } else if (_yielded) { _yielded->update(); } }
void fiber_control::worker_init(size_t workerid) { /* * This is the "root" stack for each worker. * When there are active user threads associated with this worker, * it will switch directly between the fibers. * But, when the worker has no other fiber to run, it will return to this * stack and and wait in a condition variable */ // create a root context create_tls_ptr(); // set up the tls structure tls* t = get_tls_ptr(); t->prev_fiber = NULL; t->cur_fiber = NULL; t->garbage = NULL; t->workerid = workerid; t->parent = this; schedule[workerid].active_lock.lock(); while(!stop_workers) { // get a fiber to run fiber* next_fib = t->parent->active_queue_remove(workerid); if (next_fib != NULL) { // if there is a fiber. yield to it schedule[workerid].active_lock.unlock(); yield_to(next_fib); distributed_control* dc = distributed_control::get_instance(); if (dc) dc->flush_soon(); schedule[workerid].active_lock.lock(); } else { // if there is no fiber. wait. schedule[workerid].waiting = true; schedule[workerid].active_cond.wait(schedule[workerid].active_lock); schedule[workerid].waiting = false; } } schedule[workerid].active_lock.unlock(); }
void ata_yield_fn(){ if(last_pid) yield_to(last_pid); last_pid = 0; }
void action_script::on_start() { action::on_start(); auto& timer = global_timer_base::instance(); yield_to(std::get<1>(_coroutine.resume<ptr>(timer.current_time(), timer.recent_delta()))); }