Esempio n. 1
0
void jl_safepoint_wait_gc(void)
{
#ifdef JULIA_ENABLE_THREADING
    // The thread should have set this is already
    assert(jl_get_ptls_states()->gc_state != 0);
    // Use normal volatile load in the loop.
    // Use a acquire load to make sure the GC result is visible on this thread.
    while (jl_gc_running || jl_atomic_load_acquire(&jl_gc_running)) {
        jl_cpu_pause(); // yield?
    }
#else
    assert(0 && "No one should wait for the GC on another thread");
#endif
}
Esempio n. 2
0
int ti_threadgroup_join(ti_threadgroup_t *tg, int16_t ext_tid)
{
    int i;

    tg->thread_sense[tg->tid_map[ext_tid]]->sense
        = !tg->thread_sense[tg->tid_map[ext_tid]]->sense;
    if (tg->tid_map[ext_tid] == 0) {
        for (i = 1;  i < tg->num_threads;  ++i) {
            while (tg->thread_sense[i]->sense == tg->group_sense)
                jl_cpu_pause();
        }
    }

    return 0;
}
Esempio n. 3
0
int ti_threadgroup_fork(ti_threadgroup_t *tg, int16_t ext_tid, void **bcast_val)
{
    if (tg->tid_map[ext_tid] == 0) {
        tg->envelope = bcast_val ? *bcast_val : NULL;
        // synchronize `tg->envelope` and `tg->group_sense`
        jl_atomic_store_release(&tg->group_sense, tg->thread_sense[0]->sense);

        // if it's possible that threads are sleeping, signal them
        if (tg->sleep_threshold) {
            uv_mutex_lock(&tg->alarm_lock);
            uv_cond_broadcast(&tg->alarm);
            uv_mutex_unlock(&tg->alarm_lock);
        }
    }
    else {
        // spin up to threshold ns (count sheep), then sleep
        uint64_t spin_ns;
        uint64_t spin_start = 0;
        // synchronize `tg->envelope` and `tg->group_sense`
        while (jl_atomic_load_acquire(&tg->group_sense) !=
               tg->thread_sense[tg->tid_map[ext_tid]]->sense) {
            if (tg->sleep_threshold) {
                if (!spin_start) {
                    // Lazily initialize spin_start since uv_hrtime is expensive
                    spin_start = uv_hrtime();
                    continue;
                }
                spin_ns = uv_hrtime() - spin_start;
                // In case uv_hrtime is not monotonic, we'll sleep earlier
                if (spin_ns >= tg->sleep_threshold) {
                    uv_mutex_lock(&tg->alarm_lock);
                    if (tg->group_sense !=
                        tg->thread_sense[tg->tid_map[ext_tid]]->sense) {
                        uv_cond_wait(&tg->alarm, &tg->alarm_lock);
                    }
                    uv_mutex_unlock(&tg->alarm_lock);
                    spin_start = 0;
                    continue;
                }
            }
            jl_cpu_pause();
        }
        if (bcast_val)
            *bcast_val = tg->envelope;
    }

    return 0;
}
Esempio n. 4
0
File: jlapi.c Progetto: iglpdc/julia
JL_DLLEXPORT void (jl_cpu_pause)(void)
{
    jl_cpu_pause();
}
Esempio n. 5
0
File: partr.c Progetto: KDr2/julia
JL_DLLEXPORT jl_task_t *jl_task_get_next(jl_value_t *getsticky)
{
    jl_ptls_t ptls = jl_get_ptls_states();
    // spin briefly before blocking when the workqueue is empty
    size_t spin_count = 0;
    jl_task_t *task;

    while (1) {
        jl_gc_safepoint();
        task = get_next_task(getsticky);
        if (task)
            return task;

        if (!_threadedregion) {
            spin_count = 0;
            if (ptls->tid == 0) {
                if (jl_run_once(jl_global_event_loop()) == 0) {
                    task = get_next_task(getsticky);
                    if (task)
                        return task;
#ifdef _OS_WINDOWS_
                    Sleep(INFINITE);
#else
                    pause();
#endif
                }
            }
            else {
                int sleepnow = 0;
                uv_mutex_lock(&sleep_lock);
                if (!_threadedregion) {
                    sleepnow = 1;
                }
                else {
                    uv_mutex_unlock(&sleep_lock);
                }
                if (sleepnow) {
                    int8_t gc_state = jl_gc_safe_enter(ptls);
                    uv_cond_wait(&sleep_alarm, &sleep_lock);
                    uv_mutex_unlock(&sleep_lock);
                    jl_gc_safe_leave(ptls, gc_state);
                }
            }
        }
        else {
            if (++spin_count > 1000 && jl_atomic_load(&jl_uv_n_waiters) == 0 && jl_mutex_trylock(&jl_uv_mutex)) {
                task = get_next_task(getsticky);
                if (task) {
                    JL_UV_UNLOCK();
                    return task;
                }
                uv_loop_t *loop = jl_global_event_loop();
                loop->stop_flag = 0;
                uv_run(loop, UV_RUN_ONCE);
                JL_UV_UNLOCK();
            }
            else {
                jl_cpu_pause();
            }
        }
    }
}