Пример #1
0
/*
 * Allocate new node
 * If freeNodes list empty and # nodes < maxNodes, allocate new node
 */
stpcNode* _newNode(stpcProxy* proxy, bool alloc) {
	stpcNode* node = NULL;
	sequencedPtr oldFree, newFree;
	
	oldFree.ival = atomic_load_explicit(&proxy->freeHead.ival, memory_order_acquire);
	while (oldFree.ptr != atomic_load_explicit(&proxy->freeTail, memory_order_relaxed)) {
		newFree.ptr = oldFree.ptr->next;
		newFree.sequence = oldFree.sequence + 1;
        if (atomic_compare_exchange_strong_explicit(&proxy->freeHead.ival, &oldFree.ival, newFree.ival, memory_order_acq_rel, memory_order_acquire)) {
            node = oldFree.ptr;
            stpcGetLocalStats(proxy)->reuse++;
            break;
        }
	}

	if (node == NULL && alloc) {
        int oldNum = atomic_load_explicit(&proxy->numNodes, memory_order_relaxed);
        while (oldNum < proxy->maxNodes && !atomic_compare_exchange_strong_explicit(&proxy->numNodes, &oldNum, oldNum + 1, memory_order_relaxed, memory_order_relaxed));
        if (oldNum >= proxy->maxNodes)
            return NULL;
        node = proxy->allocMem(sizeof(stpcNode));
		if (node == NULL)
			return NULL;
	}

	memset(node, 0, sizeof(stpcNode));

	return node;
}
Пример #2
0
void
exchange (atomic_int *i)
{
  int r;

  atomic_compare_exchange_strong_explicit (i, &r, 0, memory_order_seq_cst, memory_order_release); /* { dg-warning "invalid failure memory" } */
  atomic_compare_exchange_strong_explicit (i, &r, 0, memory_order_seq_cst, memory_order_acq_rel); /* { dg-warning "invalid failure memory" } */
  atomic_compare_exchange_strong_explicit (i, &r, 0, memory_order_relaxed, memory_order_consume); /* { dg-warning "failure memory model cannot be stronger" } */

  atomic_compare_exchange_weak_explicit (i, &r, 0, memory_order_seq_cst, memory_order_release); /* { dg-warning "invalid failure memory" } */
  atomic_compare_exchange_weak_explicit (i, &r, 0, memory_order_seq_cst, memory_order_acq_rel); /* { dg-warning "invalid failure memory" } */
  atomic_compare_exchange_weak_explicit (i, &r, 0, memory_order_relaxed, memory_order_consume); /* { dg-warning "failure memory model cannot be stronger" } */
}
Пример #3
0
void enqueue(queue_t *q, unsigned int val)
{
	std::string str1("enqueue"); //ANNOTATION
	function_call(str1, INVOCATION, val); //ANNOTATION
	
	int success = 0;
	unsigned int node;
	pointer tail;
	pointer next;
	pointer tmp;

	node = new_node();
	store_32(&q->nodes[node].value, val);
	tmp = atomic_load_explicit(&q->nodes[node].next, memory_order_seq_cst);
	set_ptr(&tmp, 0); // NULL
	atomic_store_explicit(&q->nodes[node].next, tmp, memory_order_seq_cst);

	while (!success) {
		tail = atomic_load_explicit(&q->tail, memory_order_seq_cst);
		next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, memory_order_seq_cst);
		if (tail == atomic_load_explicit(&q->tail, memory_order_seq_cst)) {

			/* Check for uninitialized 'next' */
			MODEL_ASSERT(get_ptr(next) != POISON_IDX);

			if (get_ptr(next) == 0) { // == NULL
				pointer value = MAKE_POINTER(node, get_count(next) + 1);
				success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next,
						&next, value, memory_order_seq_cst, memory_order_seq_cst);
			}
			if (!success) {
				unsigned int ptr = get_ptr(atomic_load_explicit(&q->nodes[get_ptr(tail)].next, memory_order_seq_cst));
				pointer value = MAKE_POINTER(ptr,
						get_count(tail) + 1);
				atomic_compare_exchange_strong_explicit(&q->tail,
						&tail, value,
						memory_order_seq_cst, memory_order_seq_cst);
				thrd_yield();
			}
		}
	}
	atomic_compare_exchange_strong_explicit(&q->tail,
			&tail,
			MAKE_POINTER(node, get_count(tail) + 1),
			memory_order_seq_cst, memory_order_seq_cst);

	function_call(str1, RESPONSE); //ANNOTATION
}
Пример #4
0
void _queueNode(stpcProxy* proxy, stpcNode* newNode) {
	sequencedPtr oldTail;
	sequencedPtr newTail;
    stpcNode *tailNode, *next;
    bool rc, rc2;
	long attempts = 0;

		
    newNode->count = GUARD_BIT + 2 * REFERENCE;
	
	/*
	 * monkey through the trees queuing trick
	 */
	
	newTail.ptr = newNode;
	newTail.sequence = 0;
	oldTail.ival = atomic_load_explicit(&proxy->tail.ival, memory_order_consume);
	do {
		attempts++;
	}
	while (!atomic_compare_exchange_strong_explicit(&proxy->tail.ival, &oldTail.ival, newTail.ival, memory_order_acq_rel, memory_order_acquire));
    
	atomic_store_explicit(&oldTail.ptr->next, newNode, memory_order_relaxed);
	// update old node's reference count by number of acquired references, clear guard bit, and drop ref acquired from tail pointer
	_dropProxyNodeReference(proxy, oldTail.ptr, (oldTail.sequence - GUARD_BIT));
		    
    stats_t *stats = stpcGetLocalStats(proxy);
    stats->tries++;                         // _addNode invocations
    stats->attempts += attempts;            // tail enqueue attempts
}
Пример #5
0
/* Test for consistency on sizes 1, 2, 4, 8, 16 and 32.  */
int
main ()
{
  test_struct c;

  atomic_store_explicit (&a, zero, memory_order_relaxed);
  if (memcmp (&a, &zero, size))
    abort ();

  c = atomic_exchange_explicit (&a, ones, memory_order_seq_cst);
  if (memcmp (&c, &zero, size))
    abort ();
  if (memcmp (&a, &ones, size))
    abort ();

  b = atomic_load_explicit (&a, memory_order_relaxed);
  if (memcmp (&b, &ones, size))
    abort ();

  if (!atomic_compare_exchange_strong_explicit (&a, &b, zero, memory_order_seq_cst, memory_order_acquire))
    abort ();
  if (memcmp (&a, &zero, size))
    abort ();

  if (atomic_compare_exchange_weak_explicit (&a, &b, ones, memory_order_seq_cst, memory_order_acquire))
    abort ();
  if (memcmp (&b, &zero, size))
    abort ();

  return 0;
}
Пример #6
0
static noreturn void thread_entry(cloudabi_tid_t tid, void *data) {
  // Set up TLS space.
  char tls_space[tls_size()];
  char *tls_start = tls_addr(tls_space);
  memcpy(tls_start, __pt_tls_vaddr_abs, __pt_tls_filesz);
  memset(tls_start + __pt_tls_filesz, '\0',
         __pt_tls_memsz_aligned - __pt_tls_filesz);
  tls_replace(tls_space);

  // Fix up some of the variables stored in TLS.
  pthread_t handle = data;
  __pthread_self_object = handle;
  __pthread_thread_id = tid;
  __safestack_unsafe_stack_ptr = (void *)__rounddown(
      (uintptr_t)handle->unsafe_stack + handle->unsafe_stacksize,
      PTHREAD_UNSAFE_STACK_ALIGNMENT);

  // Initialize the the once object used for joining. This is also done
  // by the parent, but it may be the case that we call pthread_exit()
  // before the parent has a chance to initialize it.
  cloudabi_lock_t expected = CLOUDABI_LOCK_BOGUS;
  atomic_compare_exchange_strong_explicit(
      &handle->join, &expected, tid | CLOUDABI_LOCK_WRLOCKED,
      memory_order_acquire, memory_order_relaxed);

  // Free thread startup information and invoke user-supplied start routine.
  void *(*start_routine)(void *) = handle->start_routine;
  void *argument = handle->argument;
  pthread_exit(start_routine(argument));
}
Пример #7
0
int take(Deque *q) {
	std::string str1("pop_back"); //ANNOTATION
	function_call(str1, INVOCATION); //ANNOTATION

	size_t b = atomic_load_explicit(&q->bottom, memory_order_seq_cst) - 1;
	Array *a = (Array *) atomic_load_explicit(&q->array, memory_order_seq_cst);
	atomic_store_explicit(&q->bottom, b, memory_order_seq_cst); //relaxed
	atomic_thread_fence(memory_order_seq_cst);
	size_t t = atomic_load_explicit(&q->top, memory_order_seq_cst);
	int x;
	if (t <= b) {
		/* Non-empty queue. */
		x = atomic_load_explicit(&a->buffer[b % atomic_load_explicit(&a->size,memory_order_seq_cst)], memory_order_seq_cst);
		if (t == b) {
			/* Single last element in queue. */
			if (!atomic_compare_exchange_strong_explicit(&q->top, &t, t + 1, memory_order_seq_cst, memory_order_seq_cst))
				/* Failed race. */
				x = EMPTY;
			atomic_store_explicit(&q->bottom, b + 1, memory_order_seq_cst); //relaxed
		}
	} else { /* Empty queue. */
		x = EMPTY;
		atomic_store_explicit(&q->bottom, b + 1, memory_order_seq_cst); // relaxed
	}
	//if(x == EMPTY)
		//function_call(str1, RESPONSE, (uint64_t) NULL); //ANNOTATION
	//else
		function_call(str1, RESPONSE, (uint64_t) x); //ANNOTATION
	return x;
}
Пример #8
0
bool dequeue(queue_t *q, unsigned int *retVal)
{
	std::string str1("dequeue"); //ANNOTATION
	function_call(str1, INVOCATION); //ANNOTATION

	int success = 0;
	pointer head;
	pointer tail;
	pointer next;

	while (!success) {
		head = atomic_load_explicit(&q->head, memory_order_seq_cst);
		tail = atomic_load_explicit(&q->tail, memory_order_seq_cst);
		next = atomic_load_explicit(&q->nodes[get_ptr(head)].next, memory_order_seq_cst);
		if (atomic_load_explicit(&q->head, memory_order_seq_cst) == head) {
			if (get_ptr(head) == get_ptr(tail)) {

				/* Check for uninitialized 'next' */
				MODEL_ASSERT(get_ptr(next) != POISON_IDX);

				if (get_ptr(next) == 0) { // NULL
					function_call(str1, RESPONSE); //ANNOTATION
					return false; // NULL
				}
				atomic_compare_exchange_strong_explicit(&q->tail,
						&tail,
						MAKE_POINTER(get_ptr(next), get_count(tail) + 1),
						memory_order_seq_cst, memory_order_seq_cst);
				thrd_yield();
			} else {
				*retVal = load_32(&q->nodes[get_ptr(next)].value);
				success = atomic_compare_exchange_strong_explicit(&q->head,
						&head,
						MAKE_POINTER(get_ptr(next), get_count(head) + 1),
						memory_order_seq_cst, memory_order_seq_cst);
				if (!success)
					thrd_yield();
			}
		}
	}
	reclaim(get_ptr(head));

	function_call(str1, RESPONSE, *retVal); //ANNOTATION

	return true;
}
Пример #9
0
static _Bool
SetNextSegment(
    _In_ SystemPipeSegment_t*       Segment,
    _In_ SystemPipeSegment_t*       Next)
{
    SystemPipeSegment_t* Expected = NULL;
    return atomic_compare_exchange_strong_explicit(&Segment->Link, &Expected, Next,
        memory_order_release, memory_order_relaxed);
}
Пример #10
0
static inline _Bool
SwapSystemPipeHead(
    _In_ SystemPipe_t*              Pipe,
    _In_ SystemPipeSegment_t**      Segment,
    _In_ SystemPipeSegment_t*       Next)
{
    assert(Pipe->Configuration & PIPE_MULTIPLE_CONSUMERS);
    return atomic_compare_exchange_strong_explicit(&Pipe->ConsumerState.Head, 
        Segment, Next, memory_order_release, memory_order_acquire);
}
Пример #11
0
static inline _Bool
SwapSystemPipeTail(
    _In_ SystemPipe_t*              Pipe,
    _In_ SystemPipeSegment_t**      Segment,
    _In_ SystemPipeSegment_t*       Next)
{
    assert((Pipe->Configuration & PIPE_MPMC) != 0);
    return atomic_compare_exchange_strong_explicit(&Pipe->ProducerState.Tail, 
        Segment, Next, memory_order_release, memory_order_relaxed);
}
Пример #12
0
TEST(stdatomic, atomic_compare_exchange) {
  atomic_int i;
  int expected;

  atomic_store(&i, 123);
  expected = 123;
  ASSERT_TRUE(atomic_compare_exchange_strong(&i, &expected, 456));
  ASSERT_FALSE(atomic_compare_exchange_strong(&i, &expected, 456));
  ASSERT_EQ(456, expected);

  atomic_store(&i, 123);
  expected = 123;
  ASSERT_TRUE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed,
          memory_order_relaxed));
  ASSERT_FALSE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed,
          memory_order_relaxed));
  ASSERT_EQ(456, expected);

  atomic_store(&i, 123);
  expected = 123;
  int iter_count = 0;
  do {
    ++iter_count;
    ASSERT_LT(iter_count, 100);  // Arbitrary limit on spurious compare_exchange failures.
    ASSERT_EQ(expected, 123);
  } while(!atomic_compare_exchange_weak(&i, &expected, 456));
  ASSERT_FALSE(atomic_compare_exchange_weak(&i, &expected, 456));
  ASSERT_EQ(456, expected);

  atomic_store(&i, 123);
  expected = 123;
  iter_count = 0;
  do {
    ++iter_count;
    ASSERT_LT(iter_count, 100);
    ASSERT_EQ(expected, 123);
  } while(!atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed,
          memory_order_relaxed));
  ASSERT_FALSE(atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed,
          memory_order_relaxed));
  ASSERT_EQ(456, expected);
}
Пример #13
0
int pthread_mutex_destroy(pthread_mutex_t* mutex_interface) {
    pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
    uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
    // Store 0xffff to make the mutex unusable. Although POSIX standard says it is undefined
    // behavior to destroy a locked mutex, we prefer not to change mutex->state in that situation.
    if (MUTEX_STATE_BITS_IS_UNLOCKED(old_state) &&
        atomic_compare_exchange_strong_explicit(&mutex->state, &old_state, 0xffff,
                                                memory_order_relaxed, memory_order_relaxed)) {
      return 0;
    }
    return EBUSY;
}
Пример #14
0
static inline __always_inline int __pthread_normal_mutex_trylock(pthread_mutex_internal_t* mutex,
                                                                 uint16_t shared) {
    const uint16_t unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
    const uint16_t locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;

    uint16_t old_state = unlocked;
    if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
                         locked_uncontended, memory_order_acquire, memory_order_relaxed))) {
        return 0;
    }
    return EBUSY;
}
Пример #15
0
	/**
	 *  Unset thread specific value if present
	 *
	 *  @return true on success
	 */
	bool erase() {
		auto currThread = current_thread();

		for (size_t i = 0; i < N; i++) {
			if (atomic_load_explicit(&threads[i], memory_order_acquire) == currThread) {
				values[i] = {};
				thread_t nullThread = nullptr;
				return atomic_compare_exchange_strong_explicit(&threads[i], &currThread,
					nullThread, memory_order_acq_rel, memory_order_acq_rel);
			}
		}

		return false;
	}
Пример #16
0
stpcNode* stpcGetProxyNodeReference(stpcProxy* proxy) {
	sequencedPtr oldTail;
	sequencedPtr newTail;
	
	oldTail.ival = atomic_load_explicit(&proxy->tail.ival, memory_order_relaxed);
	do {
		newTail.sequence = oldTail.sequence + REFERENCE;
		newTail.ptr = oldTail.ptr;
	}
	while (!atomic_compare_exchange_strong_explicit(&proxy->tail.ival, &oldTail.ival, newTail.ival, memory_order_relaxed, memory_order_relaxed));
	
	return oldTail.ptr;
	
}
Пример #17
0
void pony_asio_event_subscribe(asio_event_t* ev)
{
  if((ev == NULL) ||
    (ev->flags == ASIO_DISPOSABLE) ||
    (ev->flags == ASIO_DESTROYED))
    return;

  asio_backend_t* b = ponyint_asio_get_backend();

  if(ev->noisy)
    ponyint_asio_noisy_add();

  struct epoll_event ep;
  ep.data.ptr = ev;
  ep.events = EPOLLRDHUP | EPOLLET;

  if(ev->flags & ASIO_READ)
    ep.events |= EPOLLIN;

  if(ev->flags & ASIO_WRITE)
    ep.events |= EPOLLOUT;

  if(ev->flags & ASIO_TIMER)
  {
    ev->fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
    timer_set_nsec(ev->fd, ev->nsec);
    ep.events |= EPOLLIN;
  }

  if(ev->flags & ASIO_SIGNAL)
  {
    int sig = (int)ev->nsec;
    asio_event_t* prev = NULL;

    if((sig < MAX_SIGNAL) &&
      atomic_compare_exchange_strong_explicit(&b->sighandlers[sig], &prev, ev,
      memory_order_release, memory_order_relaxed))
    {
      signal(sig, signal_handler);
      ev->fd = eventfd(0, EFD_NONBLOCK);
      ep.events |= EPOLLIN;
    } else {
      return;
    }
  }

  epoll_ctl(b->epfd, EPOLL_CTL_ADD, ev->fd, &ep);
}
Пример #18
0
bool ponyint_messageq_markempty(messageq_t* q)
{
  pony_msg_t* tail = q->tail;
  pony_msg_t* head = atomic_load_explicit(&q->head, memory_order_relaxed);

  if(((uintptr_t)head & 1) != 0)
    return true;

  if(head != tail)
    return false;

  head = (pony_msg_t*)((uintptr_t)head | 1);

  return atomic_compare_exchange_strong_explicit(&q->head, &tail, head,
    memory_order_release, memory_order_relaxed);
}
Пример #19
0
void pony_asio_event_unsubscribe(asio_event_t* ev)
{
  if((ev == NULL) ||
    (ev->flags == ASIO_DISPOSABLE) ||
    (ev->flags == ASIO_DESTROYED))
    return;

  asio_backend_t* b = ponyint_asio_get_backend();

  if(ev->noisy)
  {
    ponyint_asio_noisy_remove();
    ev->noisy = false;
  }

  epoll_ctl(b->epfd, EPOLL_CTL_DEL, ev->fd, NULL);

  if(ev->flags & ASIO_TIMER)
  {
    if(ev->fd != -1)
    {
      close(ev->fd);
      ev->fd = -1;
    }
  }

  if(ev->flags & ASIO_SIGNAL)
  {
    int sig = (int)ev->nsec;
    asio_event_t* prev = ev;

    if((sig < MAX_SIGNAL) &&
      atomic_compare_exchange_strong_explicit(&b->sighandlers[sig], &prev, NULL,
      memory_order_release, memory_order_relaxed))
    {
      signal(sig, SIG_DFL);
      close(ev->fd);
      ev->fd = -1;
    }
  }

  ev->flags = ASIO_DISPOSABLE;
  send_request(ev, ASIO_DISPOSABLE);
}
Пример #20
0
bool ponyint_messageq_markempty(messageq_t* q)
{
  pony_msg_t* tail = q->tail;
  pony_msg_t* head = atomic_load_explicit(&q->head, memory_order_relaxed);

  if(((uintptr_t)head & 1) != 0)
    return true;

  if(head != tail)
    return false;

  head = (pony_msg_t*)((uintptr_t)head | 1);

#ifdef USE_VALGRIND
  ANNOTATE_HAPPENS_BEFORE(&q->head);
#endif
  return atomic_compare_exchange_strong_explicit(&q->head, &tail, head,
    memory_order_release, memory_order_relaxed);
}
Пример #21
0
int pthread_mutex_trylock(pthread_mutex_t* mutex_interface) {
    pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);

    uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
    uint16_t mtype  = (old_state & MUTEX_TYPE_MASK);
    uint16_t shared = (old_state & MUTEX_SHARED_MASK);

    const uint16_t unlocked           = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
    const uint16_t locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;

    // Handle common case first.
    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
        return __pthread_normal_mutex_trylock(mutex, shared);
    }

    // Do we already own this recursive or error-check mutex?
    pid_t tid = __get_thread()->tid;
    if (tid == atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed)) {
        if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
            return EBUSY;
        }
        return __recursive_increment(mutex, old_state);
    }

    // Same as pthread_mutex_lock, except that we don't want to wait, and
    // the only operation that can succeed is a single compare_exchange to acquire the
    // lock if it is released / not owned by anyone. No need for a complex loop.
    // If exchanged successfully, an acquire fence is required to make
    // all memory accesses made by other threads visible to the current CPU.
    old_state = unlocked;
    if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
                                                               locked_uncontended,
                                                               memory_order_acquire,
                                                               memory_order_relaxed))) {
        atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
        return 0;
    }
    return EBUSY;
}
Пример #22
0
	/**
	 *  Set or overwrite thread specific value
	 *
	 *  @param value  value to store
	 *
	 *  @return true on success
	 */
	bool set(T value) {
		auto currThread = current_thread();
		T *ptr = nullptr;

		// Find previous value if any
		for (size_t i = 0; ptr == nullptr && i < N; i++)
			if (atomic_load_explicit(&threads[i], memory_order_acquire) == currThread)
				ptr = &values[i];

		// Find null value if any
		for (size_t i = 0; ptr == nullptr && i < N; i++) {
			thread_t nullThread = nullptr;
			if (atomic_compare_exchange_strong_explicit(&threads[i], &nullThread, currThread,
				memory_order_acq_rel, memory_order_acq_rel))
				ptr = &values[i];
		}

		// Insert if we can
		if (ptr) *ptr = value;

		return ptr != nullptr;
	}
Пример #23
0
/**
 * Update speed meter.
 * @param[in] speed
 * @param[in] count
 */
void spdm_update(struct speed_meter *speed, uint64_t count)
{
    uint64_t curr_time = zclock(false);
    uint64_t last_update = atomic_load_explicit(&speed->last_update, memory_order_acquire);

    if (curr_time - last_update >= SEC2USEC(1)) {
        if (atomic_compare_exchange_strong_explicit(&speed->last_update, &last_update, curr_time, memory_order_release,
                                                    memory_order_relaxed)) {
            size_t i = atomic_load_explicit(&speed->i, memory_order_acquire);
            uint64_t speed_aux = atomic_load_explicit(&speed->speed_aux, memory_order_acquire);
            atomic_store_explicit(&speed->backlog[i].speed, speed_aux, memory_order_release);
            atomic_fetch_sub_explicit(&speed->speed_aux, speed_aux, memory_order_release);
            atomic_store_explicit(&speed->backlog[i].timestamp, last_update, memory_order_release);
            i++;
            if (SPEED_METER_BACKLOG == i) {
                i = 0;
            }
            atomic_store_explicit(&speed->i, i, memory_order_release);
        }
    }

    atomic_fetch_add_explicit(&speed->speed_aux, count, memory_order_release);
}
Пример #24
0
void BBinder::attachObject(
    const void* objectID, void* object, void* cleanupCookie,
    object_cleanup_func func)
{
    Extras* e = reinterpret_cast<Extras*>(
                    atomic_load_explicit(&mExtras, std::memory_order_acquire));

    if (!e) {
        e = new Extras;
        uintptr_t expected = 0;
        if (!atomic_compare_exchange_strong_explicit(
                                        &mExtras, &expected,
                                        reinterpret_cast<uintptr_t>(e),
                                        std::memory_order_release,
                                        std::memory_order_acquire)) {
            delete e;
            e = reinterpret_cast<Extras*>(expected);  // Filled in by CAS
        }
        if (e == 0) return; // out of memory
    }

    AutoMutex _l(e->mLock);
    e->mObjects.attach(objectID, object, cleanupCookie, func);
}
Пример #25
0
int steal(Deque *q) {
	std::string str1("pop_front"); //ANNOTATION
	function_call(str1, INVOCATION); //ANNOTATION
	size_t t = atomic_load_explicit(&q->top, memory_order_seq_cst); //acquire
	atomic_thread_fence(memory_order_seq_cst);
	size_t b = atomic_load_explicit(&q->bottom, memory_order_seq_cst); //acquire
	int x = EMPTY;
	if (t < b) {
		/* Non-empty queue. */
		Array *a = (Array *) atomic_load_explicit(&q->array, memory_order_seq_cst);
		x = atomic_load_explicit(&a->buffer[t % atomic_load_explicit(&a->size, memory_order_seq_cst)], memory_order_seq_cst);
		if (!atomic_compare_exchange_strong_explicit(&q->top, &t, t + 1, memory_order_seq_cst, memory_order_seq_cst))
		{
			function_call(str1, RESPONSE, (uint64_t) ABORT); //ANNOTATION
			/* Failed race. */
			return ABORT;
		}
	}
	//if(x == EMPTY)
		//function_call(str1, RESPONSE, (uint64_t) NULL); //ANNOTATION
	//else
		function_call(str1, RESPONSE, (uint64_t) x); //ANNOTATION
	return x;
}
Пример #26
0
static int __pthread_mutex_lock_with_timeout(pthread_mutex_internal_t* mutex,
                                             bool use_realtime_clock,
                                             const timespec* abs_timeout_or_null) {
    uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
    uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
    uint16_t shared = (old_state & MUTEX_SHARED_MASK);

    // Handle common case first.
    if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
        return __pthread_normal_mutex_lock(mutex, shared, use_realtime_clock, abs_timeout_or_null);
    }

    // Do we already own this recursive or error-check mutex?
    pid_t tid = __get_thread()->tid;
    if (tid == atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed)) {
        if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
            return EDEADLK;
        }
        return __recursive_increment(mutex, old_state);
    }

    const uint16_t unlocked           = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
    const uint16_t locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
    const uint16_t locked_contended   = mtype | shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;

    // First, if the mutex is unlocked, try to quickly acquire it.
    // In the optimistic case where this works, set the state to locked_uncontended.
    if (old_state == unlocked) {
        // If exchanged successfully, an acquire fence is required to make
        // all memory accesses made by other threads visible to the current CPU.
        if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
                             locked_uncontended, memory_order_acquire, memory_order_relaxed))) {
            atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
            return 0;
        }
    }

    ScopedTrace trace("Contending for pthread mutex");

    while (true) {
        if (old_state == unlocked) {
            // NOTE: We put the state to locked_contended since we _know_ there
            // is contention when we are in this loop. This ensures all waiters
            // will be unlocked.

            // If exchanged successfully, an acquire fence is required to make
            // all memory accesses made by other threads visible to the current CPU.
            if (__predict_true(atomic_compare_exchange_weak_explicit(&mutex->state,
                                                                     &old_state, locked_contended,
                                                                     memory_order_acquire,
                                                                     memory_order_relaxed))) {
                atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
                return 0;
            }
            continue;
        } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(old_state)) {
            // We should set it to locked_contended beforing going to sleep. This can make
            // sure waiters will be woken up eventually.

            int new_state = MUTEX_STATE_BITS_FLIP_CONTENTION(old_state);
            if (__predict_false(!atomic_compare_exchange_weak_explicit(&mutex->state,
                                                                       &old_state, new_state,
                                                                       memory_order_relaxed,
                                                                       memory_order_relaxed))) {
                continue;
            }
            old_state = new_state;
        }

        int result = check_timespec(abs_timeout_or_null, true);
        if (result != 0) {
            return result;
        }
        // We are in locked_contended state, sleep until someone wakes us up.
        if (__recursive_or_errorcheck_mutex_wait(mutex, shared, old_state, use_realtime_clock,
                                                 abs_timeout_or_null) == -ETIMEDOUT) {
            return ETIMEDOUT;
        }
        old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
    }
}
Пример #27
0
PONY_API void pony_asio_event_unsubscribe(asio_event_t* ev)
{
  if((ev == NULL) ||
    (ev->flags == ASIO_DISPOSABLE) ||
    (ev->flags == ASIO_DESTROYED))
  {
    pony_assert(0);
    return;
  }

  asio_backend_t* b = ponyint_asio_get_backend();
  pony_assert(b != NULL);

  if(ev->noisy)
  {
    uint64_t old_count = ponyint_asio_noisy_remove();
    // tell scheduler threads that asio has no noisy actors
    // if the old_count was 1
    if (old_count == 1)
    {
      ponyint_sched_unnoisy_asio(SPECIAL_THREADID_EPOLL);

      // maybe wake up a scheduler thread if they've all fallen asleep
      ponyint_sched_maybe_wakeup_if_all_asleep(-1);
    }

    ev->noisy = false;
  }

  epoll_ctl(b->epfd, EPOLL_CTL_DEL, ev->fd, NULL);

  if(ev->flags & ASIO_TIMER)
  {
    if(ev->fd != -1)
    {
      close(ev->fd);
      ev->fd = -1;
    }
  }

  if(ev->flags & ASIO_SIGNAL)
  {
    int sig = (int)ev->nsec;
    asio_event_t* prev = ev;

#ifdef USE_VALGRIND
    ANNOTATE_HAPPENS_BEFORE(&b->sighandlers[sig]);
#endif
    if((sig < MAX_SIGNAL) &&
      atomic_compare_exchange_strong_explicit(&b->sighandlers[sig], &prev, NULL,
      memory_order_release, memory_order_relaxed))
    {
      struct sigaction new_action;

#if !defined(USE_SCHEDULER_SCALING_PTHREADS)
      // Make sure we ignore signals related to scheduler sleeping/waking
      // as the default for those signals is termination
      if(sig == PONY_SCHED_SLEEP_WAKE_SIGNAL)
        new_action.sa_handler = empty_signal_handler;
      else
#endif
        new_action.sa_handler = SIG_DFL;

      sigemptyset (&new_action.sa_mask);

      // ask to restart interrupted syscalls to match `signal` behavior
      new_action.sa_flags = SA_RESTART;

      sigaction(sig, &new_action, NULL);

      close(ev->fd);
      ev->fd = -1;
    }
  }

  ev->flags = ASIO_DISPOSABLE;
  send_request(ev, ASIO_DISPOSABLE);
}
Пример #28
0
PONY_API void pony_asio_event_subscribe(asio_event_t* ev)
{
  if((ev == NULL) ||
    (ev->flags == ASIO_DISPOSABLE) ||
    (ev->flags == ASIO_DESTROYED))
  {
    pony_assert(0);
    return;
  }

  asio_backend_t* b = ponyint_asio_get_backend();
  pony_assert(b != NULL);

  if(ev->noisy)
  {
    uint64_t old_count = ponyint_asio_noisy_add();
    // tell scheduler threads that asio has at least one noisy actor
    // if the old_count was 0
    if (old_count == 0)
      ponyint_sched_noisy_asio(SPECIAL_THREADID_EPOLL);
  }

  struct epoll_event ep;
  ep.data.ptr = ev;
  ep.events = EPOLLRDHUP;

  if(ev->flags & ASIO_READ)
    ep.events |= EPOLLIN;

  if(ev->flags & ASIO_WRITE)
    ep.events |= EPOLLOUT;

  if(ev->flags & ASIO_TIMER)
  {
    ev->fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
    timer_set_nsec(ev->fd, ev->nsec);
    ep.events |= EPOLLIN;
  }

  if(ev->flags & ASIO_SIGNAL)
  {
    int sig = (int)ev->nsec;
    asio_event_t* prev = NULL;

#ifdef USE_VALGRIND
    ANNOTATE_HAPPENS_BEFORE(&b->sighandlers[sig]);
#endif
    if((sig < MAX_SIGNAL) &&
      atomic_compare_exchange_strong_explicit(&b->sighandlers[sig], &prev, ev,
      memory_order_release, memory_order_relaxed))
    {
      struct sigaction new_action;
      new_action.sa_handler = signal_handler;
      sigemptyset (&new_action.sa_mask);

      // ask to restart interrupted syscalls to match `signal` behavior
      new_action.sa_flags = SA_RESTART;

      sigaction(sig, &new_action, NULL);

      ev->fd = eventfd(0, EFD_NONBLOCK);
      ep.events |= EPOLLIN;
    } else {
      return;
    }
  }

  if(ev->flags & ASIO_ONESHOT)
  {
    ep.events |= EPOLLONESHOT;
  } else {
    // Only use edge triggering if one shot isn't enabled.
    // This is because of how the runtime gets notifications
    // from epoll in this ASIO thread and then notifies the
    // appropriate actor to read/write as necessary.
    // specifically, it seems there's an edge case/race condition
    // with edge triggering where if there is already data waiting
    // on the socket, then epoll might not be triggering immediately
    // when an edge triggered epoll request is made.

    ep.events |= EPOLLET;
  }

  epoll_ctl(b->epfd, EPOLL_CTL_ADD, ev->fd, &ep);
}