bool enqueue_node(typename super::mailbox_element* node,
                   int next_state = ready) {
     CPPA_REQUIRE(node->marked == false);
     if (this->m_mailbox._push_back(node)) {
         for (;;) {
             int state = m_state.load();
             switch (state) {
                 case blocked: {
                     if (m_state.compare_exchange_weak(state, next_state)) {
                         CPPA_REQUIRE(this->m_scheduler != nullptr);
                         if (next_state == ready) {
                             this->m_scheduler->enqueue(this);
                         }
                         return true;
                     }
                     break;
                 }
                 case about_to_block: {
                     if (m_state.compare_exchange_weak(state, ready)) {
                         return false;
                     }
                     break;
                 }
                 default: return false;
             }
         }
     }
     return false;
 }
Example #2
0
 // atomically sets m_stack back and enqueues all elements to the cache
 bool fetch_new_data(pointer end_ptr) {
     CPPA_REQUIRE(m_head == nullptr);
     CPPA_REQUIRE(!end_ptr || end_ptr == stack_empty_dummy());
     pointer e = m_stack.load();
     // must not be called on a closed queue
     CPPA_REQUIRE(e != nullptr);
     // it's enough to check this once, since only the owner is allowed
     // to close the queue and only the owner is allowed to call this
     // member function
     while (e != end_ptr) {
         if (m_stack.compare_exchange_weak(e, end_ptr)) {
             if (is_dummy(e)) {
                 // only use-case for this is closing a queue
                 CPPA_REQUIRE(end_ptr == nullptr);
                 return false;
             }
             while (e) {
                 CPPA_REQUIRE(!is_dummy(e));
                 auto next = e->next;
                 e->next = m_head;
                 m_head = e;
                 e = next;
             }
             return true;
         }
         // next iteration
     }
     return false;
 }
Example #3
0
    void chain_pending_nodes(node* first, node* last)
    {
        last->next = to_be_deleted;

        while (!to_be_deleted.compare_exchange_weak(
                   last->next, first));
    }
Example #4
0
File: main.cpp Project: CCJY/coliru
my_type const& instantiate() {
    static std::aligned_storage<
      sizeof(my_type), alignof(my_type)
    >::type storage;
    static std::atomic<int> flag;

    while (flag < 2) {
        // all threads spin until the object is properly initialized
        int expected = 0;
        if (flag.compare_exchange_weak(expected, 1)) {
            // only one thread succeeds at the compare_exchange.
            try {
                ::new (&storage) my_type;
            } catch(...) {
                // Initialization failed. Let another thread try.
                flag = 0;
                throw;
            }
            // Success!
            std::atexit([] {
                reinterpret_cast<my_type&>(storage).~my_type();
            }); 
            flag = 2;
        }
    }

    return reinterpret_cast<my_type&>(storage);
}
 // atomically sets stack_ back and enqueues all elements to the cache
 bool fetch_new_data(pointer end_ptr) {
   CAF_ASSERT(end_ptr == nullptr || end_ptr == stack_empty_dummy());
   pointer e = stack_.load();
   // must not be called on a closed queue
   CAF_ASSERT(e != nullptr);
   // fetching data while blocked is an error
   CAF_ASSERT(e != reader_blocked_dummy());
   // it's enough to check this once, since only the owner is allowed
   // to close the queue and only the owner is allowed to call this
   // member function
   while (e != end_ptr) {
     if (stack_.compare_exchange_weak(e, end_ptr)) {
       // fetching data while blocked is an error
       CAF_ASSERT(e != reader_blocked_dummy());
       if (is_dummy(e)) {
         // only use-case for this is closing a queue
         CAF_ASSERT(end_ptr == nullptr);
         return false;
       }
       while (e) {
         CAF_ASSERT(! is_dummy(e));
         auto next = e->next;
         e->next = head_;
         head_ = e;
         e = next;
       }
       return true;
     }
     // next iteration
   }
   return false;
 }
Example #6
0
 // Concurrent writes to the same cache element can result in invalid cache
 // elements, causing pointer address not being available in the cache even
 // though they should be, i.e. false cache misses. While can cause a
 // slow-down, the cost for keeping the cache thread-local or atomic is
 // much higher (yes, this was measured).
 void push(const void* P) {
     unsigned acquiredVal = mostRecent;
     while(!mostRecent.compare_exchange_weak(acquiredVal, (acquiredVal+1)%lines.size())) {
         acquiredVal = mostRecent;
     }
     lines[acquiredVal] = P;
 }
 std::shared_ptr<T> pop()
 {
     node* old_head=head.load();
     while(old_head &&
           !head.compare_exchange_weak(old_head,old_head->next));
     return old_head ? old_head->data : std::shared_ptr<T>();
 }
Example #8
0
File: smr.cpp Project: arssivka/2rc
 int pop(unsigned index)
 {
     node* n = 0;
     for (;;)
     {
         n = smr_acquire(index, head_);
         if (0 == n)
             break;
         node* next = n->next_.load(rl::memory_order_relaxed);
         if (head_.compare_exchange_weak(n, next, rl::memory_order_acquire))
             break;
         smr_release(index);
     }
     smr_release(index);
     if (n)
     {
         int data = n->VAR(data_);
         smr_defer(index, n);
         return data;
     }
     else
     {
         return 0;
     }
 }
Example #9
0
    void push(T const& data)
    {
        node* const new_node = new node(data);
        new_node->next = head.load();

        while (!head.compare_exchange_weak(new_node->next, new_node));
    }
Example #10
0
    void clearTag(std::atomic<Node::Ptr> &loc, std::memory_order mo) {
        // We want to just xadd(-1) the thing, but C++ doesn't let us
        // because of the level of obstruction^Wabstraction that
        // tagged_ptr adds.
        //
        // Or maybe what we want to do is to align Node on 256 boundaries
        // so that we can do a one byte write to clear the locked flag.
        // That is *especially* not a thing in the C++ memory model.
#if CLEAR_RMW
        // This is probably undefined
        auto &intloc = reinterpret_cast<std::atomic<uintptr_t> &>(loc);
        intloc.fetch_and(~Node::Ptr::kTagBits, mo);
#elif CLEAR_BYTE_WRITE
        // This is certainly undefined, and only works on little endian
        // C++ really does not have any story for mixed-size atomics
        // and mixed-size atomics are pretty funky in practice.
        // Linux does do this on some platforms, though.
        auto &byteloc = reinterpret_cast<std::atomic<uint8_t> &>(loc);
        byteloc.store(0, mo);
#else
        Node::Ptr state(nullptr, 1);
        while (!loc.compare_exchange_weak(state, Node::Ptr(state, 0),
                                          mo, std::memory_order_relaxed)) {
        }
#endif
    }
Example #11
0
 void push(const T& data)
 {
     node *const new_node = new node(data);
     new_node->next = head.load();
     // loop to gurantee that last->next is correct
     while (!head.compare_exchange_weak(new_node->next, new_node));
 }
Example #12
0
void CondVar::Wait()
{
    unsigned dwWaitingForSignal = m_dwWaitingForSignal.load(std::memory_order_seq_cst);
    m_dwWaitingForSignal.store(dwWaitingForSignal + 1, std::memory_order_seq_cst);
    RL_ASSERT(m_lMutex.load(std::memory_order_seq_cst) < 0);

    int lMutex = m_lMutex.load(std::memory_order_seq_cst);
    for (;;)
    {
        unsigned dwWaitingToOwn = lMutex & 0x7FFFFFFFu;
        RL_ASSERT(dwWaitingToOwn >= dwWaitingForSignal);
        if (dwWaitingToOwn == dwWaitingForSignal)
        {
            if (m_lMutex.compare_exchange_weak(lMutex, dwWaitingToOwn + 1, std::memory_order_seq_cst))
                break;
        }
        else
        {
            SetEvent(m_xhEvtEnter);
            break;
       }
   }

   WaitForSingleObject(m_xhSemRelease, INFINITE);
   WaitForSingleObject(m_xhEvtEnter, INFINITE);

   RL_ASSERT(m_lMutex.load(std::memory_order_seq_cst) < 0);
}
 void push(T* what) {
     T* e = m_stack.load();
     for (;;) {
         what->next = e;
         if (!e) {
             lock_type lock(m_mtx);
             if (m_stack.compare_exchange_weak(e, what)) {
                 m_cv.notify_one();
                 return;
             }
         }
         // compare_exchange_weak stores the
         // new value to e if the operation fails
         else if (m_stack.compare_exchange_weak(e, what)) return;
     }
 }
Example #14
0
File: 3.cpp Project: AmesianX/lbd
void append (int val) {     // append an element to the list
  Node* oldHead = list_head;
  Node* newNode = new Node {val,oldHead};

  // what follows is equivalent to: list_head = newNode, but in a thread-safe way:
  while (!list_head.compare_exchange_weak(oldHead,newNode))
    newNode->next = oldHead;
}
Example #15
0
// Register trace consumer
void trace::registerConsumer(trace::TraceConsumer *Consumer) {
  TraceConsumerListNode *Node = new TraceConsumerListNode {Consumer, nullptr};
  do {
    Node->Next = consumers.load(std::memory_order_relaxed);
  } while(!consumers.compare_exchange_weak(Node->Next, Node,
                                           std::memory_order_release,
                                           std::memory_order_relaxed));
}
Example #16
0
 uint32_t fetchWrapIncrement(std::atomic<uint32_t> &shared){
     uint32_t oldValue = shared.load();
     uint32_t newValue;
     do {
         newValue = (oldValue+1)%nElems;
     } while (!shared.compare_exchange_weak(oldValue, newValue));
     return oldValue;
 }
Example #17
0
 void lock() {
     while (1) {
         while (m_lock == to_underlying(lock_state::LOCKED));
         long old = to_underlying(lock_state::UNLOCKED);
         if (m_lock.compare_exchange_weak(old, to_underlying(lock_state::LOCKED),
                 std::memory_order_release, std::memory_order_relaxed))
             return;
     }
 }
Example #18
0
File: main.cpp Project: CCJY/coliru
void update_cur_best(std::atomic<int>& best, int a, int b) {
  if (a < b) {
    a = b;
  }
  
  auto cur_best = best.load(std::memory_order_relaxed);
  while (cur_best < a && !best.compare_exchange_weak(cur_best, a))
    ;
}
 int compare_exchange_state(int expected, int new_value) {
     int e = expected;
     do {
         if (m_state.compare_exchange_weak(e, new_value)) {
             return new_value;
         }
     }
     while (e == expected);
     return e;
 }
Example #20
0
 int try_lock() {
     if (value())
         return -1;
     long old = to_underlying(lock_state::UNLOCKED);
     if (m_lock.compare_exchange_weak(old, to_underlying(lock_state::LOCKED),
                 std::memory_order_release, std::memory_order_relaxed))
         return 0;
     else
         return -1;
 }
Example #21
0
 void read_unlock() {
     long oldval, newval;
     while (1) {
         oldval = value();
         newval = oldval - 2;
         if (m_lock.compare_exchange_weak(oldval, newval,
                 std::memory_order_release, std::memory_order_relaxed))
             break;
     }
 }
Example #22
0
 bool Pop() {
   int val = val_.load(std::memory_order_relaxed);
   for (;;) {
     VERIFY_GE(val, 0);
     VERIFY_LE(val, kQueueSize);
     if (val == 0) return false;
     if (val_.compare_exchange_weak(val, val - 1, std::memory_order_relaxed))
       return true;
   }
 }
Example #23
0
 void read_lock() {
     long oldval, newval;
     while (1) {
         // lower bit is 1 when there's a write lock
         while ((oldval = value()) == to_underlying(lock_state::LOCKED));
         newval = oldval + 2;
         if (m_lock.compare_exchange_weak(oldval, newval,
                 std::memory_order_release, std::memory_order_relaxed))
             break;
     }
 }
Example #24
0
void CondVar::Enter()
{
   int lMutex = m_lMutex.load(std::memory_order_seq_cst);
   for (;;)
   {
     if( lMutex >= 0 )
     {
         if (m_lMutex.compare_exchange_weak(lMutex, lMutex | 0x80000000u, std::memory_order_seq_cst))
            break;
     }
     else
     {
        if (false == m_lMutex.compare_exchange_weak(lMutex, lMutex + 1, std::memory_order_seq_cst))
            continue;
        WaitForSingleObject(m_xhEvtEnter, INFINITE);
        RL_ASSERT(m_lMutex.load(std::memory_order_seq_cst) < 0);
        break;
     }
   }
}
Example #25
0
 pc_region* acquire()
 {
     pc_sys_anchor cmp (head.load(rl::memory_order_relaxed));
     pc_sys_anchor xchg;
     do
     {
         xchg.refcnt = cmp.refcnt + 2;
         xchg.region = cmp.region;
     }
     while (false == head.compare_exchange_weak(cmp, xchg, rl::memory_order_acquire));
     return cmp.region;
 }
Example #26
0
    void chain_pending_nodes(node* first, node* last)
    {
        // replace the next pointer from the last node with 
        // the current to_be_deleted pointer
        last->next = to_be_deleted;
        // store the first node in the chain as the new to_be_deleted pointer
        // have to use compare_exchange_weak in a loop here in order to ensure
        // that you don't leak any nodes that have been added by another thread
        while (!to_be_deleted.compare_exchange_weak(
            last->next,first));

    }
 static void dispose(std::atomic<T*>& ptr) {
   for (;;) {
     auto p = ptr.load();
     if (p == nullptr) {
       return;
     } else if (ptr.compare_exchange_weak(p, nullptr)) {
       p->dispose();
       ptr = nullptr;
       return;
     }
   }
 }
Example #28
0
File: smr.cpp Project: arssivka/2rc
 void push(unsigned index, int data)
 {
     node* n = new node ();
     n->VAR(data_) = data;
     node* next = head_.load(std::memory_order_relaxed);
     for (;;)
     {
         n->next_.store(next, rl::memory_order_relaxed);
         if (head_.compare_exchange_weak(next, n, rl::memory_order_release))
             break;
     }
 }
Example #29
0
	void set_new_tail(counted_node_ptr &old_tail, counted_node_ptr const &new_tail) {
		node * const current_tail_ptr = old_tail.get();
		while (!tail.compare_exchange_weak(old_tail, new_tail) && old_tail.get() == current_tail_ptr) {}

		if (old_tail.get() == current_tail_ptr) {
			free_external_counter(old_tail);
			return;
		}
		
		if (!current_tail_ptr->release_ref())
			deallocate_node(current_tail_ptr);
	}
 void wait_for_data() {
     if (!m_head) {
         T* e = m_stack.load();
         while (e == m_dummy) {
             if (m_stack.compare_exchange_weak(e, 0)) e = 0;
         }
         if (!e) {
             lock_type lock(m_mtx);
             while (!(m_stack.load())) m_cv.wait(lock);
         }
         consume_stack();
     }
 }