Example #1
0
void fiber_manager_yield(fiber_manager_t* manager)
{
    assert(fiber_manager_state == FIBER_MANAGER_STATE_STARTED);
    assert(manager);
    if(wsd_work_stealing_deque_size(manager->schedule_from) == 0) {
        wsd_work_stealing_deque_t* const temp = manager->schedule_from;
        manager->schedule_from = manager->store_to;
        manager->store_to = temp;
    }

    do {
        manager->yield_count += 1;
        //occasionally steal some work from threads with more load
        if((manager->yield_count & 1023) == 0) {
            fiber_load_balance(manager);
        }

        if(wsd_work_stealing_deque_size(manager->schedule_from) > 0) {
            fiber_t* const new_fiber = (fiber_t*)wsd_work_stealing_deque_pop_bottom(manager->schedule_from);
            if(new_fiber != WSD_EMPTY && new_fiber != WSD_ABORT) {
                fiber_t* const old_fiber = manager->current_fiber;
                if(old_fiber->state == FIBER_STATE_RUNNING) {
                    old_fiber->state = FIBER_STATE_READY;
                    manager->to_schedule = old_fiber;/* must schedule it *after* fiber_swap_context, else another thread can start executing an invalid context */
                }
                manager->current_fiber = new_fiber;
                new_fiber->state = FIBER_STATE_RUNNING;
                write_barrier();
                fiber_swap_context(&old_fiber->context, &new_fiber->context);

                fiber_manager_do_maintenance();
            }
        }
    } while((manager = fiber_manager_get()) && FIBER_STATE_WAITING == manager->current_fiber->state && fiber_load_balance(manager));
}
fiber_t* fiber_scheduler_next(fiber_scheduler_t* sched)
{
    fiber_scheduler_wsd_t* const scheduler = (fiber_scheduler_wsd_t*)sched;
    assert(scheduler);
    if(wsd_work_stealing_deque_size(scheduler->schedule_from) == 0) {
        wsd_work_stealing_deque_t* const temp = scheduler->schedule_from;
        scheduler->schedule_from = scheduler->store_to;
        scheduler->store_to = temp;
    }

    while(wsd_work_stealing_deque_size(scheduler->schedule_from) > 0) {
        fiber_t* const new_fiber = (fiber_t*)wsd_work_stealing_deque_pop_bottom(scheduler->schedule_from);
        if(new_fiber != WSD_EMPTY && new_fiber != WSD_ABORT) {
            if(new_fiber->state == FIBER_STATE_SAVING_STATE_TO_WAIT) {
                wsd_work_stealing_deque_push_bottom(scheduler->store_to, new_fiber);
            } else {
                return new_fiber;
            }
        }
    }
    return NULL;
}
Example #3
0
int main(int argc, char* argv[])
{
    wsd_circular_array_t* wsd_a = wsd_circular_array_create(8);
    test_assert(wsd_a);
    test_assert(wsd_circular_array_size(wsd_a) == 256);
    wsd_circular_array_put(wsd_a, 1, (void*)1);
    test_assert((void*)1 == wsd_circular_array_get(wsd_a, 1));
    wsd_circular_array_destroy(wsd_a);

    wsd_work_stealing_deque_t* wsd_d = wsd_work_stealing_deque_create();
    int i;
    for(i = 0; i < 1000; ++i) {
        wsd_work_stealing_deque_push_bottom(wsd_d, (void*)(intptr_t)i);
    }
    for(i = 1000; i > 0; --i) {
        void* item = wsd_work_stealing_deque_pop_bottom(wsd_d);
        test_assert((intptr_t)item == i-1);
    }
    wsd_work_stealing_deque_destroy(wsd_d);

    wsd_d2 = wsd_work_stealing_deque_create();
    pthread_t reader[NUM_THREADS];
    for(i = 1; i < NUM_THREADS; ++i) {
        pthread_create(&reader[i], NULL, &run_func, (void*)(intptr_t)i);
    }

    for(i = 0; i < SHARED_COUNT; ++i) {
        wsd_work_stealing_deque_push_bottom(wsd_d2, (void*)(intptr_t)i);
        if((i & 7) == 0) {
            void* val = wsd_work_stealing_deque_pop_bottom(wsd_d2);
            if(val != WSD_EMPTY && val != WSD_ABORT) {
                __sync_add_and_fetch(&results[0][(intptr_t)val], 1);
                __sync_add_and_fetch(&total, (intptr_t)val);
                ++run_func_count[0];
            }
        }
    }
    void* val = 0;
    do {
        val = wsd_work_stealing_deque_pop_bottom(wsd_d2);
        if(val != WSD_EMPTY && val != WSD_ABORT) {
            __sync_add_and_fetch(&results[0][(intptr_t)val], 1);
            __sync_add_and_fetch(&total, (intptr_t)val);
            ++run_func_count[0];
        }
    } while(val != WSD_EMPTY);

    done = 1;
    for(i = 1; i < NUM_THREADS; ++i) {
        pthread_join(reader[i], NULL);
    }

    uint64_t expected_total = 0;
    for(i = 0; i < SHARED_COUNT; ++i) {
        int sum = 0;
        int j;
        for(j = 0; j < NUM_THREADS; ++j) {
            sum += results[j][i];
        }
        test_assert(sum == 1);
        expected_total += i;
    }
    test_assert(total == expected_total);
    for(i = 0; i < NUM_THREADS; ++i) {
        test_assert(run_func_count[i] > 0);
    }
    return 0;
}