Example #1
0
    unsigned rand_impl(unsigned limit, sched_type t)
    {
        unsigned result = 0;
        size_t const size = stree_.size();
        if (stree_depth_ == size)
        {
            stree_node n = {limit, 0, t};
            stree_.push_back(n);
        }
        else
        {
            RL_VERIFY(size);
            stree_node& n = stree_[stree_depth_];

            // If you hit assert here, then probably your test is non-deterministic
            // Check whether you are using functions like ::rand()
            // or static variables or values of object addresses (for hashing) in your test
            // Replace ::rand() with rl::rand(), eliminate static variables in the test
            RL_VERIFY(n.type_ == t);

            RL_VERIFY(n.count_ == limit);
            RL_VERIFY(n.index_ < n.count_);
            result = n.index_;
        }
        stree_depth_ += 1;
        return result;
    }
Example #2
0
    void yield_priority(unsigned yield)
    {
        RL_VERIFY(yield);

        thread_info_t& t = *this->thread_;
        thread_id_t const& running_thread_count = this->running_threads_count;

        for (thread_id_t i = 0; i != thread_count; ++i)
        {
            thread_info_t& y = this->threads_[i];
            RL_VERIFY(0 == y.yield_priority_[t.index_]);

            if (t.index_ != i
                && y.yield_sched_count_[t.index_] < yield
                && y.state_ != thread_state_finished)
            {
                y.yield_priority_[t.index_] = yield;
                y.total_yield_priority_ += yield;
                this->block_thread(t.index_, false);
            }
            y.yield_sched_count_[t.index_] = 0;
        }

        if (0 == running_thread_count)
            purge_blocked_threads();
    }
Example #3
0
    bool unblock_thread(thread_id_t th)
    {
        RL_VERIFY(th < thread_count);
        thread_info_t& t = threads_[th];
        RL_VERIFY(t.state_ == thread_state_blocked);
        if (--t.block_count_)
            return false;

        running_threads.push_back(th);
        running_threads_count += 1;
        t.state_ = thread_state_running;
        return true;
    }
Example #4
0
    RL_INLINE
    void store(T v, memory_order mo, debug_info_param info)
    {
        RL_VERIFY(mo_acquire != mo);
        RL_VERIFY(mo_acq_rel != mo);

        switch (mo)
        {
        case mo_relaxed: return store_impl<mo_relaxed, &thread_info_base::atomic_store_relaxed>(v, info);
        case mo_release: return store_impl<mo_release, &thread_info_base::atomic_store_release>(v, info);
        case mo_seq_cst: return store_impl< mo_seq_cst, &thread_info_base::atomic_store_seq_cst>(v, info);
        default: break;
        }

        RL_VERIFY(false);
    }
Example #5
0
 thread_id_t create_thread()
 {
     RL_VERIFY(dynamic_thread_count_);
     thread_info_t* th = dynamic_threads_[--dynamic_thread_count_];
     unblock_thread(th->index_);
     return th->index_;
 }
Example #6
0
    thread_finish_result thread_finished()
    {
        RL_VERIFY(thread_->state_ == thread_state_running);
        block_thread(thread_->index_, false);
        thread_->state_ = thread_state_finished;
        finished_thread_count_ += 1;
        self().thread_finished_impl();
retry:
        if (finished_thread_count_ == thread_count)
        {
            return thread_finish_result_last;
        }
        else if (is_deadlock())
        {
            if (dynamic_thread_count_)
            {
                while (dynamic_thread_count_)
                {
                    thread_info_t* th = dynamic_threads_[--dynamic_thread_count_];
                    unblock_thread(th->index_);
                }
                goto retry;
            }
            return thread_finish_result_deadlock;
        }
        else
        {
            return thread_finish_result_normal;
        }
    }
Example #7
0
void context::exec_log(debug_info_param info, event_t const& ev)
{
    RL_VERIFY(collecting_history());
    disable_alloc_ += 1;
    history_.exec_log(threadx_ ? threadx_->index_ : -1, info, ev, params_.output_history);
    disable_alloc_ -= 1;
}
Example #8
0
 ~generic_atomic()
 {
     context& c = ctx();
     RL_VERIFY(false == c.invariant_executing);
     sign_.check($);
     c.atomic_dtor(impl_);
 }
Example #9
0
inline int rl_sem_getvalue(rl_sem_t* sema, int* value, debug_info_param info)
{
    RL_VERIFY(value);
    if (value)
        value[0] = sema->get_value(info);
    return 0;
}
Example #10
0
inline int rl_pthread_join(rl_pthread_t th, void** res, debug_info_param info)
{
    RL_VERIFY(th && res);
    res[0] = 0; //!!!
    th->wait(false, false, info);
    return 0;
}
Example #11
0
void* memory_mgr::alloc(size_t size)
{
    void* pp = 0;
    for (size_t i = 0; i != alloc_cache_.size(); ++i)
    {
        if (alloc_cache_[i].first == size)
        {
            if (alloc_cache_[i].second.size())
            {
                pp = alloc_cache_[i].second.top();
                alloc_cache_[i].second.pop();
            }
            break;
        }
    }
    if (0 == pp)
        pp = (::malloc)(size + alignment);

    if (pp)
    {
        RL_VERIFY(alignment >= sizeof(void*));
        *(size_t*)pp = size;
        void* p = (char*)pp + alignment;
        allocs_.insert(std::make_pair(p, size));
        return p;
    }
    else
    {
        throw std::bad_alloc();
    }
}
Example #12
0
inline int rl_pthread_create(rl_pthread_t* th, rl_pthread_attr_t* attr, void* (*func) (void*), void* arg, debug_info_param info)
{
    (void)attr;
    (void)info;//!!!
    RL_VERIFY(th && func);
    th[0] = ctx().create_thread(func, arg);
    return 0;
}
Example #13
0
inline int rl_sem_post(rl_sem_t* sema, debug_info_param info)
{
    unsigned prev_cout = 0;
    bool result = sema->post(1, prev_cout, info);
    RL_VERIFY(result);
    (void)result;
    return 0;
}
Example #14
0
    bool iteration_end_impl()
    {
        RL_VERIFY(stree_depth_ == stree_.size());

        for (size_t i = stree_.size(); i != 0; --i)
        {
            stree_node& n = stree_[i - 1];
            if (n.index_ != n.count_ - 1)
            {
                stree_.resize(i);
                n.index_ += 1;
                RL_VERIFY(n.index_ < n.count_);
                return false;
            }
        }
        return true;
    }
Example #15
0
    thread_id_t schedule(unpark_reason& reason, unsigned yield)
    {
        thread_id_t const th = self().schedule_impl(reason, yield);

        RL_VERIFY(threads_[th].state_ == thread_state_running);
        thread_ = &threads_[th];

        return th;
    }
Example #16
0
inline unsigned long rl_WaitForMultipleObjectsEx(unsigned long count, rl_HANDLE* objects, int wait_all, unsigned long timeout, int alertable, debug_info_param info)
{
    (void)alertable; //!!!
    //!!! support WAIT_IO_COMPLETION
    RL_VERIFY(false == alertable && "Alertable wait is not supported in WaitForMultipleObjects() yet");

    bool try_wait = (timeout == 0);
    bool is_timed = (timeout != rl_INFINITE);
    win_waitable_object** obj = reinterpret_cast<win_waitable_object**>(objects);
    size_t signaled = 0;
    sema_wakeup_reason reason = wait_for_multiple_objects(signaled, count, obj, !!wait_all, try_wait, is_timed, info);
    if (reason == sema_wakeup_reason_success)
        return rl_WAIT_OBJECT_0 + (int)signaled;
    else if (reason == sema_wakeup_reason_timeout)
        return rl_WAIT_TIMEOUT;
    RL_VERIFY(false);
    return rl_WAIT_FAILED;
}
Example #17
0
inline unsigned long rl_WaitForSingleObjectEx(rl_HANDLE obj, unsigned long timeout, int alertable, debug_info_param info)
{
    (void)alertable; //!!! not yet supported – support it!
    //!!! support WAIT_IO_COMPLETION
    RL_VERIFY(false == alertable && "Alertable wait is not supported in WaitForSingleObject() yet");

    bool try_wait = (timeout == 0);
    bool is_timed = (timeout != rl_INFINITE);
    sema_wakeup_reason reason = static_cast<win_waitable_object*>(obj)->wait(try_wait, is_timed, info);
    if (reason == sema_wakeup_reason_success)
        return rl_WAIT_OBJECT_0;
    else if (reason == sema_wakeup_reason_timeout)
        return rl_WAIT_TIMEOUT;
    else if (reason == sema_wakeup_reason_failed)
        return rl_WAIT_TIMEOUT;
    RL_VERIFY(false);
    return rl_WAIT_FAILED;
}
Example #18
0
    RL_INLINE
    T load(memory_order mo, debug_info_param info) const
    {
        RL_VERIFY(mo_release != mo);
        RL_VERIFY(mo_acq_rel != mo);

        switch (mo)
        {
        case mo_relaxed: return load_impl<mo_relaxed, &thread_info_base::atomic_load_relaxed>(info);
        case mo_consume: return load_impl<mo_consume, &thread_info_base::atomic_load_acquire>(info);
        case mo_acquire: return load_impl<mo_acquire, &thread_info_base::atomic_load_acquire>(info);
        case mo_seq_cst: return load_impl<mo_seq_cst, &thread_info_base::atomic_load_seq_cst>(info);
        default: break;
        }

        RL_VERIFY(false);
        return T();
    }
Example #19
0
    bool park_current_thread(bool is_timed, bool allow_spurious_wakeup)
    {
        if (is_timed)
        {
            timed_threads_[timed_thread_count_++] = thread_;
            RL_VERIFY(timed_thread_count_ <= thread_count);
        }

        if (allow_spurious_wakeup)
        {
            spurious_threads_[spurious_thread_count_++] = thread_;
            RL_VERIFY(spurious_thread_count_ <= thread_count);
        }

        block_thread(thread_->index_, true);

        return is_deadlock() ? false : true;
    }
Example #20
0
 std::pair<rl::memory_order, rl::memory_order> order()
 {
     switch (index)
     {
     default: RL_VERIFY(false);
     case 0: return std::make_pair(rl::mo_relaxed, rl::mo_relaxed);
     case 1: return std::make_pair(rl::mo_release, rl::mo_relaxed);
     case 2: return std::make_pair(rl::mo_seq_cst, rl::mo_relaxed);
     case 3: return std::make_pair(rl::mo_relaxed, rl::mo_acquire);
     case 4: return std::make_pair(rl::mo_relaxed, rl::mo_seq_cst);
     }
 }
Example #21
0
    void block_thread(thread_id_t th, bool yield)
    {
        RL_VERIFY(th < thread_count);
        thread_info_t& t = threads_[th];
        RL_VERIFY(t.state_ != thread_state_finished);
        if (t.block_count_++)
            return;

        for (thread_id_t i = 0; i != running_threads_count; ++i)
        {
            if (running_threads[i] == th)
            {
                running_threads.erase(running_threads.begin() + i);
                running_threads_count -= 1;
                t.state_ = thread_state_blocked;
                self().on_thread_block(th, yield);
                return;
            }
        }
        RL_VERIFY(false);
    }
Example #22
0
 ~generic_mutex_data_impl()
 {
     context& c = ctx();
     RL_VERIFY(false == c.invariant_executing);
     if (exclusive_owner_ != state_free
         || exclusive_waitset_
         || shared_waitset_)
     {
         debug_info info = $;
         RL_HIST(event_t) {this, event_t::type_destroying_owned_mutex} RL_HIST_END();
         RL_ASSERT_IMPL(false, test_result_destroying_owned_mutex, "", $);
     }
Example #23
0
inline int rl_sem_wait(rl_sem_t* sema, debug_info_param info)
{
    sema_wakeup_reason reason = sema->wait(false, false, info);
    if (reason == sema_wakeup_reason_success)
        return 0;
    if (reason == sema_wakeup_reason_spurious)
    {
        set_errno(RL_EINTR);
        return -1;
    }
    RL_VERIFY(false);
    return -1;
}
Example #24
0
inline char const* format(memory_order mo)
{
    switch (mo)
    {
    case mo_relaxed: return "relaxed";
    case mo_consume: return "consume";
    case mo_acquire: return "acquire";
    case mo_release: return "release";
    case mo_acq_rel: return "acq_rel";
    case mo_seq_cst: return "seq_cst";
    }
    RL_VERIFY(!"invalid value of memory order");
    throw std::logic_error("invalid value of memory order");
}
Example #25
0
 generic_mutex_data_impl(bool is_rw, bool is_exclusive_recursive, bool is_shared_recursive, bool failing_try_lock)
     : is_rw_(is_rw)
     , is_exclusive_recursive_(is_exclusive_recursive)
     , is_shared_recursive_(is_shared_recursive)
     , failing_try_lock_(failing_try_lock)
     , exclusive_owner_(state_free)
     , exclusive_recursion_count_(0)
     , shared_lock_count_(0)
     , try_lock_failed_()
 {
     context& c = ctx();
     (void)c;
     RL_VERIFY(false == c.invariant_executing);
     foreach<thread_count>(shared_owner_, &assign_zero);
 }
Example #26
0
    RL_INLINE
    bool compare_exchange(bool_t<spurious_failures>, T& cmp, T xchg, memory_order mo, debug_info_param info)
    {
        switch (mo)
        {
        case mo_relaxed: return compare_swap_impl<spurious_failures, mo_relaxed, &thread_info_base::atomic_rmw_relaxed, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);
        case mo_consume: return compare_swap_impl<spurious_failures, mo_consume, &thread_info_base::atomic_rmw_acquire, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
        case mo_acquire: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
        case mo_release: return compare_swap_impl<spurious_failures, mo_release, &thread_info_base::atomic_rmw_release, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);
        case mo_acq_rel: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
        case mo_seq_cst: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_seq_cst, &thread_info_base::atomic_load_seq_cst_rmw>(cmp, xchg, info);
        }

        RL_VERIFY(false);
        return false;
    }
Example #27
0
    RL_INLINE
    T rmw(rmw_type_t<type>, Y op, memory_order mo, debug_info_param info)
    {
        switch (mo)
        {
        case mo_relaxed: return rmw_impl<Y, mo_relaxed, &thread_info_base::atomic_rmw_relaxed>(rmw_type_t<type>(), op, info);
        case mo_consume: return rmw_impl<Y, mo_consume, &thread_info_base::atomic_rmw_acquire>(rmw_type_t<type>(), op, info);
        case mo_acquire: return rmw_impl<Y, mo_acquire, &thread_info_base::atomic_rmw_acquire>(rmw_type_t<type>(), op, info);
        case mo_release: return rmw_impl<Y, mo_release, &thread_info_base::atomic_rmw_release>(rmw_type_t<type>(), op, info);
        case mo_acq_rel: return rmw_impl<Y, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel>(rmw_type_t<type>(), op, info);
        case mo_seq_cst: return rmw_impl<Y, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst>(rmw_type_t<type>(), op, info);
        }

        RL_VERIFY(false);
        return T();
    }
Example #28
0
int rl_int_futex_impl(context& c,
                      atomic<int>* uaddr,
                      int op,
                      int val,
                      struct timespec const* timeout,
                      atomic<int>* uaddr2,
                      int val3,
                      debug_info_param info)
{
    (void)uaddr2;
    (void)val3;
    if (op == RL_FUTEX_WAIT)
    {
        c.sched();
        c.atomic_thread_fence_seq_cst();
        int v0;
        {
            preemption_disabler pd (c);
            v0 = uaddr->load(memory_order_acquire, info);
        }
  if (v0 != val)
            return RL_EWOULDBLOCK;
        unpark_reason reason = uaddr->wait(c, timeout != 0, true, info);
        if (reason == unpark_reason_normal)
            return 0;
        else if (reason == unpark_reason_timeout)
            return RL_ETIMEDOUT;
        else if (reason == unpark_reason_spurious)
            return RL_EINTR;
        RL_VERIFY(false);
        return RL_EINVAL;
    }
    else if (op == RL_FUTEX_WAKE)
    {
        if (val <= 0)
            return 0;

        c.sched();
        c.atomic_thread_fence_seq_cst();
        return uaddr->wake(c, val, info);
    }
    else
    {
        return RL_EINVAL;
    }
}
Example #29
0
    generic_atomic()
    {
        context& c = ctx();
        RL_VERIFY(false == c.invariant_executing);
        impl_ = c.atomic_ctor(this);
        initialized_ = false;
        value_ = T();
        already_failed_ = false;

        if (val(strong_init))
        {
            unsigned const index = c.threadx_->atomic_init(impl_);
            last_index_ = index;
            initialized_ = true;
            history_[index] = T();
            value_ = T();
        }
    }
Example #30
0
inline int rl_sem_trywait(rl_sem_t* sema, debug_info_param info)
{
    sema_wakeup_reason reason = sema->wait(true, false, info);
    if (sema_wakeup_reason_success == reason)
        return 0;
    if (sema_wakeup_reason_failed == reason)
    {
        set_errno(RL_EAGAIN);
        return -1;
    }
    if (sema_wakeup_reason_spurious == reason)
    {
        set_errno(RL_EINTR);
        return -1;
    }
    RL_VERIFY(false);
    return -1;
}