void structAtomicStore() {
  struct foo f = {0};
  __c11_atomic_store(&bigAtomic, f, 5); // expected-error {{atomic store requires runtime support that is not available for this target}}

  struct bar b = {0};
  __atomic_store(&smallThing, &b, 5);

  __atomic_store(&bigThing, &f, 5);
}
示例#2
0
文件: locks.c 项目: aglab2/darwin-xnu
/*
 *	Routine: hw_lock_unlock
 *
 *	Unconditionally release lock, release preemption level.
 */
void
hw_lock_unlock(hw_lock_t lock)
{
	__c11_atomic_store((_Atomic uintptr_t *)&lock->lock_data, 0, memory_order_release_smp);
#if __arm__ || __arm64__
	// ARM tests are only for open-source exclusion
	set_event();
#endif	// __arm__ || __arm64__
#if	CONFIG_DTRACE
	LOCKSTAT_RECORD(LS_LCK_SPIN_UNLOCK_RELEASE, lock, 0);
#endif /* CONFIG_DTRACE */
	enable_preemption();
}
示例#3
0
__host__ __device__
typename enable_if<
  sizeof(Integer64) == 8
>::type
atomic_store(Integer64 *x, Integer64 y)
{
#if defined(__CUDA_ARCH__)
  atomicExch(x, y);
#elif defined(__GNUC__)
  return __atomic_store_n(x, y, __ATOMIC_SEQ_CST);
#elif defined(_MSC_VER)
  InterlockedExchange64(x, y); 
#elif defined(__clang__)
  __c11_atomic_store(x, y);
#else
#error "No atomic_store_n implementation."
#endif
}
示例#4
0
Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
{
    Boolean             retVal          = TRUE;
    IODataQueueEntry *  entry           = 0;
    UInt32              entrySize       = 0;
    UInt32              headOffset      = 0;
    UInt32              tailOffset      = 0;
    UInt32              newHeadOffset   = 0;

	if (!dataQueue || (data && !dataSize)) {
        return false;
    }

    // Read head and tail with acquire barrier
    // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
    headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
    tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);

    if (headOffset != tailOffset) {
        IODataQueueEntry *  head        = 0;
        UInt32              headSize    = 0;
        UInt32              queueSize   = getQueueSize();

        if (headOffset > queueSize) {
            return false;
        }

        head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
        headSize     = head->size;

        // we wrapped around to beginning, so read from there
        // either there was not even room for the header
        if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
            (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
            // or there was room for the header, but not for the data
            (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
            (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
            // Note: we have to wrap to the beginning even with the UINT32_MAX checks
            // because we have to support a queueSize of UINT32_MAX.
            entry           = dataQueue->queue;
            entrySize       = entry->size;
            if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
                (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
                return false;
            }
            newHeadOffset   = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
            // else it is at the end
        } else {
            entry           = head;
            entrySize       = entry->size;
            if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
                (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
                (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
                return false;
            }
            newHeadOffset   = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
        }
	} else {
		// empty queue
		return false;
	}

	if (data) {
		if (entrySize > *dataSize) {
			// not enough space
			return false;
		}
		memcpy(data, &(entry->data), entrySize);
		*dataSize = entrySize;
	}

	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);

	if (newHeadOffset == tailOffset) {
		//
		// If we are making the queue empty, then we need to make sure
		// that either the enqueuer notices, or we notice the enqueue
		// that raced with our making of the queue empty.
		//
		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
	}
    
    return retVal;
}
示例#5
0
// CHECK:    define void @test4([[QUAD_T:%.*]]*
// CHECK:      [[TEMP:%.*]] = alloca [[QUAD_T:%.*]], align 8
// CHECK-NEXT: [[T0:%.*]] = bitcast [[QUAD_T]]* [[TEMP]] to i8*
// CHECK-NEXT: [[T1:%.*]] = bitcast [[QUAD_T]]* {{%.*}} to i8*
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 32, i32 8, i1 false)
// CHECK-NEXT: [[T0:%.*]] = bitcast [[QUAD_T]]* [[TEMP]] to i8*
// CHECK-NEXT: call void @__atomic_store(i64 32, i8* bitcast ([[QUAD_T]]* @a_pointer_quad to i8*), i8* [[T0]], i32 5)
void test4(pointer_quad_t quad) {
  __c11_atomic_store(&a_pointer_quad, quad, memory_order_seq_cst);
}
示例#6
0
Boolean IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
{
    UInt32             head;
    UInt32             tail;
    UInt32             newTail;
    const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
    IODataQueueEntry * entry;
    
    // Force a single read of head and tail
    // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
    tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
    head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);

    // Check for overflow of entrySize
    if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
        return false;
    }
    // Check for underflow of (getQueueSize() - tail)
    if (getQueueSize() < tail || getQueueSize() < head) {
        return false;
    }
    
    if ( tail >= head )
    {
        // Is there enough room at the end for the entry?
        if ((entrySize <= UINT32_MAX - tail) &&
            ((tail + entrySize) <= getQueueSize()) )
        {
            entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
            
            entry->size = dataSize;
            memcpy(&entry->data, data, dataSize);
            
            // The tail can be out of bound when the size of the new entry
            // exactly matches the available space at the end of the queue.
            // The tail can range from 0 to dataQueue->queueSize inclusive.
            
            newTail = tail + entrySize;
        }
        else if ( head > entrySize )     // Is there enough room at the beginning?
        {
            // Wrap around to the beginning, but do not allow the tail to catch
            // up to the head.
            
            dataQueue->queue->size = dataSize;
            
            // We need to make sure that there is enough room to set the size before
            // doing this. The user client checks for this and will look for the size
            // at the beginning if there isn't room for it at the end.
            
            if ( ( getQueueSize() - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
            {
                ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
            }
            
            memcpy(&dataQueue->queue->data, data, dataSize);
            newTail = entrySize;
        }
        else
        {
            return false;    // queue is full
        }
    }
    else
    {
        // Do not allow the tail to catch up to the head when the queue is full.
        // That's why the comparison uses a '>' rather than '>='.
        
        if ( (head - tail) > entrySize )
        {
            entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
            
            entry->size = dataSize;
            memcpy(&entry->data, data, dataSize);
            newTail = tail + entrySize;
        }
        else
        {
            return false;    // queue is full
        }
    }

	// Publish the data we just enqueued
	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);

	if (tail != head) {
		//
		// The memory barrier below paris with the one in ::dequeue
		// so that either our store to the tail cannot be missed by
		// the next dequeue attempt, or we will observe the dequeuer
		// making the queue empty.
		//
		// Of course, if we already think the queue is empty,
		// there's no point paying this extra cost.
		//
		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
		head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
	}

	if (tail == head) {
		// Send notification (via mach message) that data is now available.
		sendDataAvailableNotification();
	}
	return true;
}
示例#7
0
// CHECK:    define void @test3(
// CHECK:      [[PAIR:%.*]] = alloca [[PAIR_T:%.*]], align 8
// CHECK-NEXT: [[TEMP:%.*]] = alloca [[PAIR_T]], align 8
// CHECK:      llvm.memcpy
// CHECK-NEXT: [[T0:%.*]] = bitcast [[PAIR_T]]* [[TEMP]] to i128*
// CHECK-NEXT: [[T1:%.*]] = load i128* [[T0]], align 16
// CHECK-NEXT: store atomic i128 [[T1]], i128* bitcast ([[PAIR_T]]* @a_pointer_pair to i128*) seq_cst, align 16
void test3(pointer_pair_t pair) {
  __c11_atomic_store(&a_pointer_pair, pair, memory_order_seq_cst);
}
示例#8
0
// CHECK:    define void @test2()
// CHECK:      [[TEMP:%.*]] = alloca i8*, align 8
// CHECK-NEXT: store i8* @a_bool, i8** [[TEMP]]
// CHECK-NEXT: [[T0:%.*]] = bitcast i8** [[TEMP]] to i64*
// CHECK-NEXT: [[T1:%.*]] = load i64* [[T0]], align 8
// CHECK-NEXT: store atomic i64 [[T1]], i64* bitcast (i8** @a_pointer to i64*) seq_cst, align 8
void test2() {
  __c11_atomic_store(&a_pointer, &a_bool, memory_order_seq_cst);
}
示例#9
0
// CHECK:    define void @test1()
// CHECK:      [[TEMP:%.*]] = alloca float, align 4
// CHECK-NEXT: store float 3.000000e+00, float* [[TEMP]]
// CHECK-NEXT: [[T0:%.*]] = bitcast float* [[TEMP]] to i32*
// CHECK-NEXT: [[T1:%.*]] = load i32* [[T0]], align 4
// CHECK-NEXT: store atomic i32 [[T1]], i32* bitcast (float* @a_float to i32*) seq_cst, align 4
void test1() {
  __c11_atomic_store(&a_float, 3, memory_order_seq_cst);
}
示例#10
0
// CHECK:    define void @test0()
// CHECK:      [[TEMP:%.*]] = alloca i8, align 1
// CHECK-NEXT: store i8 1, i8* [[TEMP]]
// CHECK-NEXT: [[T0:%.*]] = load i8* [[TEMP]], align 1
// CHECK-NEXT: store atomic i8 [[T0]], i8* @a_bool seq_cst, align 1
void test0() {
  __c11_atomic_store(&a_bool, 1, memory_order_seq_cst);
}
示例#11
0
void atomic_flag_clear(volatile atomic_flag *object) {
  return __c11_atomic_store(&(object)->_Value, 0, __ATOMIC_SEQ_CST);
}