Ejemplo n.º 1
0
void structAtomicLoad() {
  struct foo f = __c11_atomic_load(&bigAtomic, 5); // expected-error {{atomic load requires runtime support that is not available for this target}}
  struct bar b;
  __atomic_load(&smallThing, &b, 5);

  __atomic_load(&bigThing, &f, 5);
}
Ejemplo n.º 2
0
IODataQueueEntry * IOSharedDataQueue::peek()
{
    IODataQueueEntry *entry      = 0;
    UInt32            headOffset;
    UInt32            tailOffset;

    if (!dataQueue) {
        return NULL;
    }

    // Read head and tail with acquire barrier
    // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
    headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
    tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);

    if (headOffset != tailOffset) {
        IODataQueueEntry *  head        = 0;
        UInt32              headSize    = 0;
        UInt32              headOffset  = dataQueue->head;
        UInt32              queueSize   = getQueueSize();

        if (headOffset >= queueSize) {
            return NULL;
        }

        head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
        headSize     = head->size;

        // Check if there's enough room before the end of the queue for a header.
        // If there is room, check if there's enough room to hold the header and
        // the data.

        if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
            (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
            (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
            (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
            // No room for the header or the data, wrap to the beginning of the queue.
            // Note: wrapping even with the UINT32_MAX checks, as we have to support
            // queueSize of UINT32_MAX
            entry = dataQueue->queue;
        } else {
            entry = head;
        }
    }

    return entry;
}
Ejemplo n.º 3
0
__host__ __device__
typename enable_if<
  sizeof(Integer64) == 8,
  Integer64
>::type
atomic_load(const Integer64 *x)
{
#if defined(__CUDA_ARCH__)
  return atomicAdd(const_cast<Integer64*>(x), Integer64(0));
#elif defined(__GNUC__)
  return atomic_load_n(x, __ATOMIC_SEQ_CST);
#elif defined(_MSC_VER)
  return InterlockedExchangeAdd(x, Integer64(0));
#elif defined(__clang__)
  return __c11_atomic_load(x);
#else
#error "No atomic_load_n implementation."
#endif
}
Ejemplo n.º 4
0
 long use_count() const // nothrow
 {
     return __c11_atomic_load( const_cast< atomic_int_least32_t* >( &use_count_ ), __ATOMIC_ACQUIRE );
 }
Ejemplo n.º 5
0
Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
{
    Boolean             retVal          = TRUE;
    IODataQueueEntry *  entry           = 0;
    UInt32              entrySize       = 0;
    UInt32              headOffset      = 0;
    UInt32              tailOffset      = 0;
    UInt32              newHeadOffset   = 0;

	if (!dataQueue || (data && !dataSize)) {
        return false;
    }

    // Read head and tail with acquire barrier
    // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
    headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
    tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);

    if (headOffset != tailOffset) {
        IODataQueueEntry *  head        = 0;
        UInt32              headSize    = 0;
        UInt32              queueSize   = getQueueSize();

        if (headOffset > queueSize) {
            return false;
        }

        head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
        headSize     = head->size;

        // we wrapped around to beginning, so read from there
        // either there was not even room for the header
        if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
            (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
            // or there was room for the header, but not for the data
            (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
            (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
            // Note: we have to wrap to the beginning even with the UINT32_MAX checks
            // because we have to support a queueSize of UINT32_MAX.
            entry           = dataQueue->queue;
            entrySize       = entry->size;
            if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
                (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
                return false;
            }
            newHeadOffset   = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
            // else it is at the end
        } else {
            entry           = head;
            entrySize       = entry->size;
            if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
                (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
                (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
                return false;
            }
            newHeadOffset   = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
        }
	} else {
		// empty queue
		return false;
	}

	if (data) {
		if (entrySize > *dataSize) {
			// not enough space
			return false;
		}
		memcpy(data, &(entry->data), entrySize);
		*dataSize = entrySize;
	}

	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);

	if (newHeadOffset == tailOffset) {
		//
		// If we are making the queue empty, then we need to make sure
		// that either the enqueuer notices, or we notice the enqueue
		// that raced with our making of the queue empty.
		//
		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
	}
    
    return retVal;
}
Ejemplo n.º 6
0
Boolean IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
{
    UInt32             head;
    UInt32             tail;
    UInt32             newTail;
    const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
    IODataQueueEntry * entry;
    
    // Force a single read of head and tail
    // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
    tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
    head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);

    // Check for overflow of entrySize
    if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
        return false;
    }
    // Check for underflow of (getQueueSize() - tail)
    if (getQueueSize() < tail || getQueueSize() < head) {
        return false;
    }
    
    if ( tail >= head )
    {
        // Is there enough room at the end for the entry?
        if ((entrySize <= UINT32_MAX - tail) &&
            ((tail + entrySize) <= getQueueSize()) )
        {
            entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
            
            entry->size = dataSize;
            memcpy(&entry->data, data, dataSize);
            
            // The tail can be out of bound when the size of the new entry
            // exactly matches the available space at the end of the queue.
            // The tail can range from 0 to dataQueue->queueSize inclusive.
            
            newTail = tail + entrySize;
        }
        else if ( head > entrySize )     // Is there enough room at the beginning?
        {
            // Wrap around to the beginning, but do not allow the tail to catch
            // up to the head.
            
            dataQueue->queue->size = dataSize;
            
            // We need to make sure that there is enough room to set the size before
            // doing this. The user client checks for this and will look for the size
            // at the beginning if there isn't room for it at the end.
            
            if ( ( getQueueSize() - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
            {
                ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
            }
            
            memcpy(&dataQueue->queue->data, data, dataSize);
            newTail = entrySize;
        }
        else
        {
            return false;    // queue is full
        }
    }
    else
    {
        // Do not allow the tail to catch up to the head when the queue is full.
        // That's why the comparison uses a '>' rather than '>='.
        
        if ( (head - tail) > entrySize )
        {
            entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
            
            entry->size = dataSize;
            memcpy(&entry->data, data, dataSize);
            newTail = tail + entrySize;
        }
        else
        {
            return false;    // queue is full
        }
    }

	// Publish the data we just enqueued
	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);

	if (tail != head) {
		//
		// The memory barrier below paris with the one in ::dequeue
		// so that either our store to the tail cannot be missed by
		// the next dequeue attempt, or we will observe the dequeuer
		// making the queue empty.
		//
		// Of course, if we already think the queue is empty,
		// there's no point paying this extra cost.
		//
		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
		head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
	}

	if (tail == head) {
		// Send notification (via mach message) that data is now available.
		sendDataAvailableNotification();
	}
	return true;
}
Ejemplo n.º 7
0
// CHECK: void test13() {
// CHECK:   _Atomic(int) i;
// CHECK:   __c11_atomic_init(&i, 0);
// CHECK:   __c11_atomic_load(&i, 0);
// CHECK: }
void test13() {
  _Atomic(int) i;
  __c11_atomic_init(&i, 0);
  __c11_atomic_load(&i, 0);
}