void mutex2_lock_bad(mutex2_t *lock) { XEDGE(take, check); XEDGE(check, post); int ticket = L(take, lock->next.fetch_add(1)); while (ticket != L(check, lock->owner)) continue; }
void mutex_lock_bad(mutex_t *lock) { XEDGE(trylock, post); int expected; do { expected = 0; } while (!L(trylock, lock->locked.compare_exchange_weak(expected, 1))); }
void lock() { XEDGE(lock, out); Node::Ptr unlocked(nullptr, 0); if (!L(lock, tail_.compare_exchange_strong(unlocked, Node::Ptr(nullptr, 1)))){ LS(lock, slowpathLock(unlocked)); } LPOST(out); }
void mutex_lock(mutex_t *lock) { XEDGE(trylock, loop_out); int expected; do { expected = 0; } while (!L(trylock, lock->locked.compare_exchange_weak(expected, 1))); LPOST(loop_out); }
rmc_noinline optional<T> MSQueue<T>::dequeue() { auto guard = Epoch::pin(); // Core message passing: reading the data out of the node comes // after getting the pointer to it. XEDGE(get_next, node_use); // Make sure we see at least head's init XEDGE(get_head, get_next); // Need to make sure anything visible through the next pointer // stays visible when it gets republished at the head or tail VEDGE(get_next, dequeue); lf_ptr<MSQueueNode> head, next; for (;;) { head = L(get_head, this->head_); next = L(get_next, head->next_); // Consistency check; see note above if (head != this->head_) continue; // Is the queue empty? if (next == nullptr) { return optional<T> {}; } else { // OK, now we try to actually read the thing out. if (L(dequeue, this->head_.compare_exchange_weak(head, next))) { break; } } } LPOST(node_use); // OK, everything set up. // next contains the value we are reading // head can be freed guard.unlinked(head); optional<T> ret(std::move(next->data_)); next->data_ = optional<T> {}; // destroy the object return ret; }
void mutex2_lock(mutex2_t *lock) { //XEDGE(take, check); // I /think/ maybe we don't need this depending. XEDGE(check, loop_out); int ticket = L(take, lock->next.fetch_add(1)); while (ticket != L(check, lock->owner)) continue; LPOST(loop_out); }
void lock() { XEDGE(acquire, out); uintptr_t locked; for (;;) { locked = locked_; if (!writeLocked(locked)) { if (L(acquire, locked_.compare_exchange_weak( locked, locked|kWriterLocked))) { break; } } } while (readLocked(locked)) { locked = L(acquire, locked_); } LPOST(out); }
void drf_mutex_unlock(mutex_t *lock) { VEDGE(pre, unlock); XEDGE(unlock, post); // ew L(unlock, lock->locked.exchange(0)); }
void drf_mutex_lock(mutex_t *lock) { XEDGE(trylock, post); while (L(trylock, lock->locked.exchange(1)) == 1) continue; }
// Perform a lookup in an RCU-protected widgetlist with a traditional // post edge. // Must be done in an Epoch read-side critical section. widget *widget_find(widgetlist *list, unsigned key) noexcept { XEDGE(find, post); return L(find, widget_find_fine(list, key)); }
rmc_noinline void MSQueue<T>::enqueueNode(lf_ptr<MSQueueNode> node) { node->next_ = node->next_.load().update(nullptr); // XXX: ok? // We publish the node in two ways: // * at enqueue, which links it in as the next_ pointer // of the list tail // * at enqueue_swing, which links it in as // the new tail_ of the queue // Node initialization needs be be visible before a node // publication is. VEDGE(node_init, enqueue); VEDGE(node_init, enqueue_swing); // Make sure to see node init, etc // This we should get to use a data-dep on! // Pretty sure we don't need XEDGE(get_tail, catchup_swing)... XEDGE(get_tail, get_next); // Make sure the contents of the next node stay visible // to anything that finds it through tail_, when we swing VEDGE(get_next, catchup_swing); // enqueue needs to be visible before enqueue_swing so that if // head != tail in dequeue, it always manages to have seen // head->next VEDGE(enqueue, enqueue_swing); // Make sure the read out of next executes before the verification // that it read from a sensible place: XEDGE(get_next, verify_tail); // Marker for node initialization. Everything before the // enqueue_node call is "init". LPRE(node_init); NodePtr tail, next; for (;;) { tail = L(get_tail, this->tail_); next = L(get_next, tail->next_); // Check that tail and next are consistent: In this gen // counter version, this is important for correctness: if, // after we read the tail, it gets removed from this queue, // freed, and added to some other queue, we need to make sure // that we don't try to append to that queue instead. if (tail != L(verify_tail, this->tail_)) continue; // was tail /actually/ the last node? if (next == nullptr) { // if so, try to write it in. (nb. this overwrites next) // XXX: does weak actually help us here? if (L(enqueue, tail->next_.compare_exchange_weak_gen(next, node))) { // we did it! return break; } } else { // nope. try to swing the tail further down the list and try again L(catchup_swing, this->tail_.compare_exchange_strong_gen(tail, next)); } } // Try to swing the tail_ to point to what we inserted L(enqueue_swing, this->tail_.compare_exchange_strong_gen(tail, node)); }
rmc_noinline optional<T> MSQueue<T>::dequeue() { // Core message passing: reading the data out of the node comes // after getting the pointer to it. XEDGE(get_next, node_use); // Make sure we see at least head's init XEDGE(get_head, get_next); // XXX: another part of maintaining the awful head != tail -> // next != null invariant that causes like a billion constraints. // Think about it a bit more to make sure this is right. // This is awful, so many barriers. XEDGE(get_head, get_tail); // If we see an updated tail (so that head != tail), make sure that // we see update to head->next. XEDGE(get_tail, get_next); // Need to make sure anything visible through the next pointer // stays visible when it gets republished at the head or tail VEDGE(get_next, catchup_swing); VEDGE(get_next, dequeue); // Make sure the read out of next executes before the verification // that it read from a sensible place: XEDGE(get_next, verify_head); NodePtr head, tail, next; universal data; for (;;) { head = L(get_head, this->head_); tail = L(get_tail, this->tail_); // XXX: really? next = L(get_next, head->next_); // Consistency check; see note above if (head != L(verify_head, this->head_)) continue; // Check if the queue *might* be empty // XXX: is it necessary to have the empty check under this if (head == tail) { // Ok, so, the queue might be empty, but it also might // be that the tail pointer has just fallen behind. // If the next pointer is null, then it is actually empty if (next == nullptr) { return optional<T> {}; } else { // not empty: tail falling behind; since it is super // not ok for the head to advance past the tail, // try advancing the tail // XXX weak v strong? L(catchup_swing, this->tail_.compare_exchange_strong_gen(tail, next)); } } else { // OK, now we try to actually read the thing out. assert_ne(next.ptr(), nullptr); // We need to read the data out of the node // *before* we try to dequeue it or else it could get // reused before we read it out. data = L(node_use, next->data_); if (L(dequeue, this->head_.compare_exchange_weak_gen(head, next))) { break; } } } LPOST(node_use); // OK, everything set up. // head can be freed MSQueueNode::freelist.unlinked(head); optional<T> ret(data.extract<T>()); return ret; }
rmc_noinline void MSQueue<T>::enqueue_node(lf_ptr<MSQueueNode> node) { auto guard = Epoch::pin(); // We publish the node in two ways: // * at enqueue, which links it in as the next_ pointer // of the list tail // * at enqueue_swing, which links it in as // the new tail_ of the queue // Node initialization needs be be visible before a node // publication is. VEDGE(node_init, enqueue); VEDGE(node_init, enqueue_swing); // Make sure to see node init, etc // This we should get to use a data-dep on! // Pretty sure we don't need XEDGE(get_tail, catchup_swing)... XEDGE(get_tail, get_next); // Make sure the contents of the next node stay visible // to anything that finds it through tail_, when we swing VEDGE(get_next, catchup_swing); // enqueue needs to be visible before enqueue_swing so that if // head != tail in dequeue, it always manages to have seen // head->next // XXX: do we still need this?? VEDGE(enqueue, enqueue_swing); // Marker for node initialization. Everything before the // enqueue_node call is "init". LPRE(node_init); lf_ptr<MSQueueNode> tail, next; for (;;) { tail = L(get_tail, this->tail_); next = L(get_next, tail->next_); // Check that tail and next are consistent: // If we are using an epoch/gc based approach // (which we had better be, since we don't have gen counters), // this is purely an optimization. // XXX: constraint? I think it doesn't matter here, where it is // purely an optimization if (tail != this->tail_) continue; // was tail /actually/ the last node? if (next == nullptr) { // if so, try to write it in. (nb. this overwrites next) // XXX: does weak actually help us here? if (L(enqueue, tail->next_.compare_exchange_weak(next, node))) { // we did it! return break; } } else { // nope. try to swing the tail further down the list and try again L(catchup_swing, this->tail_.compare_exchange_strong(tail, next)); } } // Try to swing the tail_ to point to what we inserted L(enqueue_swing, this->tail_.compare_exchange_strong(tail, node)); }
void slowpathLock(Node::Ptr oldTail) { // makes sure that init of me.next is prior to tail_link in // other thread VEDGE(node_init, enqueue); // init of me needs to be done before publishing it to // previous thread also VEDGE(node_init, tail_link); // Can't write self into previous node until previous node inited XEDGE(enqueue, tail_link); LS(node_init, Node me); Node::Ptr curTail; bool newThreads; // Step one, put ourselves at the back of the queue for (;;) { Node::Ptr newTail = Node::Ptr(&me, oldTail.tag()); // Enqueue ourselves... if (L(enqueue, tail_.compare_exchange_strong(oldTail, newTail))) break; // OK, maybe the whole thing is just unlocked now? if (oldTail == Node::Ptr(nullptr, 0)) { // If so, try the top level lock if (tail_.compare_exchange_strong(oldTail, Node::Ptr(nullptr, 1))) goto out; } delay(); } // Need to make sure not to compete for the lock before the // right time. This makes sure the ordering doesn't get messed // up. XEDGE(ready_wait, lock); // Step two: OK, there is an actual queue, so link up with the old // tail and wait until we are at the head of the queue if (oldTail.ptr()) { // * Writing into the oldTail is safe because threads can't // leave unless there is no thread after them or they have // marked the next ready L(tail_link, oldTail->next = &me); while (!L(ready_wait, me.ready)) delay(); } // Step three: wait until the lock is freed // We don't need a a constraint from this load; "lock" serves // to handle this just fine: lock can't succeed until we've // read an unlocked tail_. while ((curTail = tail_).tag()) { delay(); } // Our lock acquisition needs to be finished before we give the // next thread a chance to try to acquire the lock or it could // compete with us for it, causing trouble. VEDGE(lock, signal_next); // Step four: take the lock for (;;) { assert_eq(curTail.tag(), 0); assert_ne(curTail.ptr(), nullptr); newThreads = curTail.ptr() != &me; // If there aren't any waiters after us, the queue is // empty. Otherwise, keep the old tail. Node *newTailP = newThreads ? curTail : nullptr; Node::Ptr newTail = Node::Ptr(newTailP, 1); if (L(lock, tail_.compare_exchange_strong(curTail, newTail))) break; } // Step five: now that we have the lock, if any threads came // in after us, indicate to the next one that it is at the // head of the queue if (newThreads) { // Next thread might not have written itself in, yet, // so we have to wait. Node *next; XEDGE(load_next, signal_next); while (!L(load_next, next = me.next)) delay(); L(signal_next, next->ready = true); } //printf("full slowpath out\n"); out: //printf("made it out of slowpath!\n"); return; }