Ejemplo n.º 1
0
int fiber_sleep(uint32_t seconds, uint32_t useconds)
{
    if(!fiber_loop) {
        fiber_do_real_sleep(seconds, useconds);
        return FIBER_SUCCESS;
    }

    //this code should really use ev_timer_init(), but ev_timer_init has compile warnings.
    ev_timer timer_event = {};
    ev_set_cb(&timer_event, &timer_trigger);
    const double sleep_time = seconds + useconds * 0.000001;
    timer_event.at = sleep_time;
    timer_event.repeat = 0;

    fiber_spinlock_lock(&fiber_loop_spinlock);

    fiber_manager_t* const manager = fiber_manager_get();
    fiber_t* const this_fiber = manager->current_fiber;

    timer_event.data = this_fiber;

    ev_timer_start(fiber_loop, &timer_event);

    this_fiber->state = FIBER_STATE_WAITING;
    manager->spinlock_to_unlock = &fiber_loop_spinlock;

    fiber_manager_yield(manager);

    return FIBER_SUCCESS;
}
Ejemplo n.º 2
0
int fiber_rwlock_wrlock(fiber_rwlock_t* rwlock)
{
    assert(rwlock);

    fiber_rwlock_state_t current_state;
    while(1) {
        const uint64_t snapshot = rwlock->state.blob;
        current_state.blob = snapshot;
        if(current_state.blob != 0) {
            current_state.state.waiting_writers += 1;
            if(__sync_bool_compare_and_swap(&rwlock->state.blob, snapshot, current_state.blob)) {
                //currently locked or a reader is waiting - be friendly and wait
                fiber_manager_wait_in_mpsc_queue(fiber_manager_get(), &rwlock->write_waiters);
                break;
            }
        } else {
            current_state.state.write_locked = 1;
            if(__sync_bool_compare_and_swap(&rwlock->state.blob, snapshot, current_state.blob)) {
                //currently write locked
                break;
            }
        }
    }
    return FIBER_SUCCESS;
}
Ejemplo n.º 3
0
void fiber_manager_yield(fiber_manager_t* manager)
{
    assert(fiber_manager_state == FIBER_MANAGER_STATE_STARTED);
    assert(manager);
    if(wsd_work_stealing_deque_size(manager->schedule_from) == 0) {
        wsd_work_stealing_deque_t* const temp = manager->schedule_from;
        manager->schedule_from = manager->store_to;
        manager->store_to = temp;
    }

    do {
        manager->yield_count += 1;
        //occasionally steal some work from threads with more load
        if((manager->yield_count & 1023) == 0) {
            fiber_load_balance(manager);
        }

        if(wsd_work_stealing_deque_size(manager->schedule_from) > 0) {
            fiber_t* const new_fiber = (fiber_t*)wsd_work_stealing_deque_pop_bottom(manager->schedule_from);
            if(new_fiber != WSD_EMPTY && new_fiber != WSD_ABORT) {
                fiber_t* const old_fiber = manager->current_fiber;
                if(old_fiber->state == FIBER_STATE_RUNNING) {
                    old_fiber->state = FIBER_STATE_READY;
                    manager->to_schedule = old_fiber;/* must schedule it *after* fiber_swap_context, else another thread can start executing an invalid context */
                }
                manager->current_fiber = new_fiber;
                new_fiber->state = FIBER_STATE_RUNNING;
                write_barrier();
                fiber_swap_context(&old_fiber->context, &new_fiber->context);

                fiber_manager_do_maintenance();
            }
        }
    } while((manager = fiber_manager_get()) && FIBER_STATE_WAITING == manager->current_fiber->state && fiber_load_balance(manager));
}
Ejemplo n.º 4
0
int fiber_wait_for_event(int fd, uint32_t events)
{
    ev_io fd_event = {};
    int poll_events = 0;
    if(events & FIBER_POLL_IN) {
        poll_events |= EV_READ;
    }
    if(events & FIBER_POLL_OUT) {
        poll_events |= EV_WRITE;
    }
    //this code should really use ev_io_init(), but ev_io_init has compile warnings.
    ev_set_cb(&fd_event, &fd_ready);
    ev_io_set(&fd_event, fd, poll_events);

    fiber_spinlock_lock(&fiber_loop_spinlock);

    fiber_manager_t* const manager = fiber_manager_get();
    manager->event_wait_count += 1;
    fiber_t* const this_fiber = manager->current_fiber;

    fd_event.data = this_fiber;
    ev_io_start(fiber_loop, &fd_event);

    this_fiber->state = FIBER_STATE_WAITING;
    manager->spinlock_to_unlock = &fiber_loop_spinlock;

    fiber_manager_yield(manager);

    return FIBER_SUCCESS;
}
Ejemplo n.º 5
0
static void timer_trigger(struct ev_loop* loop, ev_timer* watcher, int revents)
{
    ev_timer_stop(loop, watcher);
    fiber_manager_t* const manager = fiber_manager_get();
    fiber_t* const the_fiber = watcher->data;
    the_fiber->state = FIBER_STATE_READY;
    fiber_manager_schedule(manager, the_fiber);
    ++num_events_triggered;
}
Ejemplo n.º 6
0
int fiber_rwlock_rdunlock(fiber_rwlock_t* rwlock)
{
    assert(rwlock);

    fiber_rwlock_state_t current_state;
    while(1) {
        const uint64_t snapshot = rwlock->state.blob;
        current_state.blob = snapshot;
        assert(current_state.state.reader_count > 0);
        assert(!current_state.state.write_locked);
        current_state.state.reader_count -= 1;
        if(!current_state.state.reader_count) {
            //if we're the last reader then we are responsible to wake up waiters

            if(current_state.state.waiting_writers) {
                //no fiber will acquire the lock while waiting_writers != 0
                current_state.state.write_locked = 1;
                current_state.state.waiting_writers -= 1;
                if(__sync_bool_compare_and_swap(&rwlock->state.blob, snapshot, current_state.blob)) {
                    fiber_manager_wake_from_mpsc_queue(fiber_manager_get(), &rwlock->write_waiters, 1);
                    break;
                }
                continue;
            }
            if(current_state.state.waiting_readers) {
                //no fiber will acquire the lock while waiting_readers != 0
                current_state.state.reader_count = current_state.state.waiting_readers;
                current_state.state.waiting_readers = 0;
                if(__sync_bool_compare_and_swap(&rwlock->state.blob, snapshot, current_state.blob)) {
                    fiber_manager_wake_from_mpsc_queue(fiber_manager_get(), &rwlock->read_waiters, current_state.state.reader_count);
                    break;
                }
                continue;
            }
        }
        if(__sync_bool_compare_and_swap(&rwlock->state.blob, snapshot, current_state.blob)) {
            break;
        }
    }
    return FIBER_SUCCESS;
}
Ejemplo n.º 7
0
int fiber_manager_set_total_kernel_threads(size_t num_threads)
{
    if(fiber_manager_get_state() != FIBER_MANAGER_STATE_NONE) {
        errno = EINVAL;
        return FIBER_ERROR;
    }

    fiber_mananger_thread_queues = calloc(2 * num_threads, sizeof(*fiber_mananger_thread_queues));
    assert(fiber_mananger_thread_queues);
    fiber_manager_threads = calloc(num_threads, sizeof(*fiber_manager_threads));
    assert(fiber_manager_threads);
    fiber_manager_num_threads = num_threads;
    fiber_managers = calloc(num_threads, sizeof(*fiber_managers));
    assert(fiber_managers);

    fiber_manager_t* const main_manager = fiber_manager_get();
    fiber_mananger_thread_queues[0] = main_manager->queue_one;
    fiber_mananger_thread_queues[1] = main_manager->queue_two;
    fiber_managers[0] = main_manager;

    fiber_manager_state = FIBER_MANAGER_STATE_STARTED;

    size_t i;
    for(i = 1; i < num_threads; ++i) {
        fiber_manager_t* const new_manager = fiber_manager_create();
        assert(new_manager);
        fiber_mananger_thread_queues[2 * i] = new_manager->queue_one;
        fiber_mananger_thread_queues[2 * i + 1] = new_manager->queue_two;
        new_manager->id = i;
        fiber_managers[i] = new_manager;
    }

    pthread_create_function pthread_create_func = (pthread_create_function)fiber_load_symbol("pthread_create");
    assert(pthread_create_func);

    for(i = 1; i < num_threads; ++i) {
        if(pthread_create_func(&fiber_manager_threads[i], NULL, &fiber_manager_thread_func, fiber_managers[i])) {
            assert(0 && "failed to create kernel thread");
            fiber_manager_state = FIBER_MANAGER_STATE_ERROR;
            abort();
            return FIBER_ERROR;
        }
    }

    return FIBER_SUCCESS;
}
Ejemplo n.º 8
0
void fiber_manager_do_maintenance()
{
    fiber_manager_t* const manager = fiber_manager_get();
    if(manager->to_schedule) {
        assert(manager->to_schedule->state == FIBER_STATE_READY);
        wsd_work_stealing_deque_push_bottom(manager->store_to, manager->to_schedule);
        manager->to_schedule = NULL;
    }

    if(manager->mpsc_to_push.fifo) {
        mpsc_fifo_push(manager->mpsc_to_push.fifo, manager->mpsc_to_push.node);
        memset(&manager->mpsc_to_push, 0, sizeof(manager->mpsc_to_push));
    }

    if(manager->mutex_to_unlock) {
        fiber_mutex_t* const to_unlock = manager->mutex_to_unlock;
        manager->mutex_to_unlock = NULL;
        fiber_mutex_unlock_internal(to_unlock);
    }
}
Ejemplo n.º 9
0
static void* fiber_manager_thread_func(void* param)
{
    /* set the thread local, then start running fibers */
#ifdef USE_COMPILER_THREAD_LOCAL
    fiber_the_manager = (fiber_manager_t*)param;
#else
    if(!pthread_setspecific_func) {
        pthread_setspecific_func = (pthread_setspecific_function)fiber_load_symbol("pthread_setspecific");
    }
    const int ret = pthread_setspecific_func(fiber_manager_key, param);
    if(ret) {
        assert(0 && "pthread_setspecific() failed!");
        abort();
    }
#endif

    while(1) {
        /* always call fiber_manager_get(), because this *thread* fiber will actually switch threads */
        fiber_manager_yield(fiber_manager_get());
    }
    return NULL;
}
Ejemplo n.º 10
0
int fiber_poll_events()
{
    if(!fiber_loop) {
        return FIBER_EVENT_NOTINIT;
    }

    if(!fiber_spinlock_trylock(&fiber_loop_spinlock)) {
        return FIBER_EVENT_TRYAGAIN;
    }

    if(!fiber_loop) {
        fiber_spinlock_unlock(&fiber_loop_spinlock);
        return FIBER_EVENT_NOTINIT;
    }

    fiber_manager_get()->poll_count += 1;
    num_events_triggered = 0;
    ev_run(fiber_loop, EVRUN_NOWAIT);
    const int local_copy = num_events_triggered;
    fiber_spinlock_unlock(&fiber_loop_spinlock);

    return local_copy;
}
Ejemplo n.º 11
0
size_t fiber_poll_events_blocking(uint32_t seconds, uint32_t useconds)
{
    if(!fiber_loop) {
        fiber_do_real_sleep(seconds, useconds);
        return 0;
    }

    load_load_barrier();//needed to ensure we read fiber_loop first - we need a valid active_thread count,
                        //so init writes active_threads then fiber_loop, we read here in the reverse order

    //only allow the final thread to perform a blocking poll - this prevents the
    //thread from locking out other threads trying to register for events
    const int local_count = __sync_sub_and_fetch(&active_threads, 1);
    assert(local_count >= 0);
    if(local_count > 0) {
        fiber_do_real_sleep(seconds, useconds);
        __sync_add_and_fetch(&active_threads, 1);
        return 0;
    }

    fiber_spinlock_lock(&fiber_loop_spinlock);

    if(!fiber_loop) {
        __sync_add_and_fetch(&active_threads, 1);
        fiber_spinlock_unlock(&fiber_loop_spinlock);
        return 0;
    }

    num_events_triggered = 0;
    fiber_manager_get()->poll_count += 1;
    ev_run(fiber_loop, EVRUN_ONCE);
    const int local_copy = num_events_triggered;
    fiber_spinlock_unlock(&fiber_loop_spinlock);

    __sync_add_and_fetch(&active_threads, 1);
    return local_copy;
}