Пример #1
0
void
test_or ()
{
  v = 0;
  count = 1;

  __atomic_or_fetch (&v, count, __ATOMIC_RELAXED);
  if (v != 1)
    abort ();

  count *= 2;
  __atomic_fetch_or (&v, count, __ATOMIC_CONSUME);
  if (v != 3)
    abort ();

  count *= 2;
  __atomic_or_fetch (&v, 4, __ATOMIC_ACQUIRE);
  if (v != 7)
    abort ();

  count *= 2;
  __atomic_fetch_or (&v, 8, __ATOMIC_RELEASE);
  if (v != 15)
    abort ();

  count *= 2;
  __atomic_or_fetch (&v, count, __ATOMIC_ACQ_REL);
  if (v != 31)
    abort ();

  count *= 2;
  __atomic_fetch_or (&v, count, __ATOMIC_SEQ_CST);
  if (v != 63)
    abort ();
}
Пример #2
0
// Expand #1:
//   wait if other threads already have token. return if this thread is the first.
int tree_simple_begin_expand(TreeBlock* parent, BlockOffset parent_offset, TreeBlock **child) {
  BlockBits expansion_before = __atomic_fetch_or(
      &parent->expansion,
      BIT(parent_offset),
      __ATOMIC_ACQ_REL);
  if (!TEST_BIT(expansion_before, parent_offset)) {
    *child = NULL;
    return EXPAND_STATUS_FIRST;  // we've got to do it
  }

  ChildInfo* cinfo = &parent->children[parent_offset];
  TreeBlock* c;

  // Wait if no one has done it yet.
  if ((c = __atomic_load_n(&cinfo->child, __ATOMIC_ACQUIRE)) == NULL) {
    // We've got to wait.
    for (;;) {
      EventCountKey key = event_count_prepare(&cinfo->event_count);
      if ((c = __atomic_load_n(&cinfo->child, __ATOMIC_ACQUIRE)) != NULL) {
        event_count_cancel(&cinfo->event_count);
        break;
      }
      event_count_wait(&cinfo->event_count, key);
    }
  }

  *child = c;
  return EXPAND_STATUS_DONE;
}
Пример #3
0
void ObjectPool::bitset(uint64_t *ptr, size_t pos, char val)
{
    if (val)
        __atomic_fetch_or(ptr, (uint64_t) 1 << pos, __ATOMIC_SEQ_CST);
    else
        __atomic_fetch_and(ptr, ~((uint64_t)1 << pos), __ATOMIC_SEQ_CST);
}
Пример #4
0
void test_atomic_bool (_Atomic _Bool *a)
{
  enum { SEQ_CST = __ATOMIC_SEQ_CST };
  
  __atomic_fetch_add (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_add." } */
  __atomic_fetch_sub (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_sub." } */
  __atomic_fetch_and (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_and." } */
  __atomic_fetch_xor (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_xor." } */
  __atomic_fetch_or (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_or." } */
  __atomic_fetch_nand (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_nand." } */

  __atomic_add_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_add_fetch." } */
  __atomic_sub_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_sub_fetch." } */
  __atomic_and_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_and_fetch." } */
  __atomic_xor_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_xor_fetch." } */
  __atomic_or_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_or_fetch." } */
  __atomic_nand_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_nand_fetch." } */

  /* The following are valid and must be accepted.  */
  _Bool val = 0, ret = 0;
  __atomic_exchange (a, &val, &ret, SEQ_CST);
  __atomic_exchange_n (a, val, SEQ_CST);
  __atomic_compare_exchange (a, &val, &ret, !1, SEQ_CST, SEQ_CST);
  __atomic_compare_exchange_n (a, &val, ret, !1, SEQ_CST, SEQ_CST);
  __atomic_test_and_set (a, SEQ_CST);
  __atomic_clear (a, SEQ_CST);
}
Пример #5
0
void
test_fetch_or ()
{
  v = 0;
  count = 1;

  if (__atomic_fetch_or (&v, count, __ATOMIC_RELAXED) !=  0) 
    abort ();

  count *= 2;
  if (__atomic_fetch_or (&v, 2, __ATOMIC_CONSUME) !=  1) 
    abort ();

  count *= 2;
  if (__atomic_fetch_or (&v, count, __ATOMIC_ACQUIRE) !=  3) 
    abort ();

  count *= 2;
  if (__atomic_fetch_or (&v, 8, __ATOMIC_RELEASE) !=  7) 
    abort ();

  count *= 2;
  if (__atomic_fetch_or (&v, count, __ATOMIC_ACQ_REL) !=  15) 
    abort ();

  count *= 2;
  if (__atomic_fetch_or (&v, count, __ATOMIC_SEQ_CST) !=  31) 
    abort ();
}
Пример #6
0
uint64 host_atomic_or(uint64* value, const uint64 op)
{
#if defined(__GNUC__)
    return __atomic_fetch_or( value, op, __ATOMIC_RELAXED );
#else
    Mutex mutex;
    ScopedLock lock( &mutex );

    const uint64 old = *value;
    *value |= op;
    return old;
#endif
}
Пример #7
0
// Expand #2:
//   return if other threads already have tokens and are expanding.
int tree_simple_begin_expand_nowait(TreeBlock* parent, BlockOffset parent_offset, TreeBlock **child) {
  BlockBits expansion_before = __atomic_fetch_or(
      &parent->expansion,
      BIT(parent_offset),
      __ATOMIC_ACQ_REL);

  if (!TEST_BIT(expansion_before, parent_offset)) {
    *child = NULL;
    return EXPAND_STATUS_FIRST;  // we've got to do it
  }

  ChildInfo* cinfo = &parent->children[parent_offset];

  // Wait if no one has done it yet.
  if ((*child = __atomic_load_n(&cinfo->child, __ATOMIC_ACQUIRE)) == NULL) return EXPAND_STATUS_EXPANDING;
  else return EXPAND_STATUS_DONE;
}
Пример #8
0
__host__ __device__
typename enable_if<
  sizeof(Integer64) == 8,
  Integer64
>::type
atomic_fetch_or(Integer64 *x, Integer64 y)
{
#if defined(__CUDA_ARCH__)
  return atomicOr(x, y);
#elif defined(__GNUC__)
  return __atomic_fetch_or(x, y, __ATOMIC_SEQ_CST);
#elif defined(_MSC_VER)
  return InterlockedOr64(x, y);
#elif defined(__clang__)
  return __c11_atomic_fetch_or(x, y)
#else
#error "No atomic_fetch_or implementation."
#endif
}
Пример #9
0
static inline void arch_perfBtsCount(honggfuzz_t * hfuzz, fuzzer_t * fuzzer)
{
    struct perf_event_mmap_page *pem = (struct perf_event_mmap_page *)fuzzer->linux.perfMmapBuf;
    struct bts_branch {
        uint64_t from;
        uint64_t to;
        uint64_t misc;
    };

    struct bts_branch *br = (struct bts_branch *)fuzzer->linux.perfMmapAux;
    for (; br < ((struct bts_branch *)(fuzzer->linux.perfMmapAux + pem->aux_head)); br++) {
        /*
         * Kernel sometimes reports branches from the kernel (iret), we are not interested in that as it
         * makes the whole concept of unique branch counting less predictable
         */
        if (__builtin_expect(br->from > 0xFFFFFFFF00000000, false)
            || __builtin_expect(br->to > 0xFFFFFFFF00000000, false)) {
            LOG_D("Adding branch %#018" PRIx64 " - %#018" PRIx64, br->from, br->to);
            continue;
        }
        if (br->from >= hfuzz->linux.dynamicCutOffAddr || br->to >= hfuzz->linux.dynamicCutOffAddr) {
            continue;
        }

        register size_t pos = 0UL;
        if (br->to == 0ULL) {
            pos = br->from % (hfuzz->bbMapSz * 8);
        } else {
            pos = (br->from * br->to) % (hfuzz->bbMapSz * 8);
        }

        size_t byteOff = pos / 8;
        uint8_t bitSet = (uint8_t) (1 << (pos % 8));

        register uint8_t prev =
            __atomic_fetch_or(&(hfuzz->bbMap[byteOff]), bitSet, __ATOMIC_SEQ_CST);
        if (!(prev & bitSet)) {
            fuzzer->linux.hwCnts.newBBCnt++;
        }
    }
}
Пример #10
0
P_LIB_API puint
p_atomic_int_or (volatile puint	*atomic,
		 puint		val)
{
	return (puint) __atomic_fetch_or (atomic, val, __ATOMIC_SEQ_CST);
}
Пример #11
0
BOOL cnn_data_fetch_set_evaluated_bit(CNNData* data, unsigned char bit) {
  unsigned char value_before = __atomic_fetch_or(&data->evaluated, BIT(bit), __ATOMIC_RELEASE);
  event_count_broadcast(&data->event_counts[bit]);
  return TEST_BIT(value_before, bit) ? TRUE : FALSE;
}
Пример #12
0
short
short_fetch_ior_release (short *ptr, int value)
{
  return __atomic_fetch_or (ptr, value, __ATOMIC_RELEASE);
}
Пример #13
0
char
char_fetch_ior_release (char *ptr, int value)
{
  return __atomic_fetch_or (ptr, value, __ATOMIC_RELEASE);
}
Пример #14
0
__int128_t
quad_fetch_ior_release (__int128_t *ptr, __int128_t value)
{
  return __atomic_fetch_or (ptr, value, __ATOMIC_RELEASE);
}
Пример #15
0
long
long_fetch_ior_release (long *ptr, long value)
{
  return __atomic_fetch_or (ptr, value, __ATOMIC_RELEASE);
}
void
__gcov_ior_profiler_atomic (gcov_type *counters, gcov_type value)
{
  __atomic_fetch_or (&counters[0], value, __ATOMIC_RELAXED);
}
Пример #17
0
int
atomic_fetch_or_RELAXED ()
{
  return __atomic_fetch_or (&v, 4096, __ATOMIC_RELAXED);
}
Пример #18
0
static __int128_t
quad_fetch_or (__int128_t *ptr, __int128_t value)
{
  return __atomic_fetch_or (ptr, value, __ATOMIC_ACQUIRE);
}
Пример #19
0
P_LIB_API psize
p_atomic_pointer_or (volatile void	*atomic,
		     psize		val)
{
	return (psize) __atomic_fetch_or ((volatile pssize *) atomic, val, __ATOMIC_SEQ_CST);
}
Пример #20
0
int
atomic_fetch_or_ACQUIRE (int a)
{
  return __atomic_fetch_or (&v, a, __ATOMIC_ACQUIRE);
}
Пример #21
0
long
atomic_fetch_or_RELAXED (long a)
{
  return __atomic_fetch_or (&v, a, __ATOMIC_RELAXED);
}