Пример #1
0
void
test_sub()
{
  v = res = 20;
  count = 0;

  __atomic_sub_fetch (&v, count + 1, __ATOMIC_RELAXED);
  if (v != --res)
    abort ();

  __atomic_fetch_sub (&v, count + 1, __ATOMIC_CONSUME);
  if (v != --res)
    abort ();                                                  
                                                               
  __atomic_sub_fetch (&v, 1, __ATOMIC_ACQUIRE);
  if (v != --res)
    abort ();                                                  
                                                               
  __atomic_fetch_sub (&v, 1, __ATOMIC_RELEASE);
  if (v != --res)
    abort ();                                                  
                                                               
  __atomic_sub_fetch (&v, count + 1, __ATOMIC_ACQ_REL);
  if (v != --res)
    abort ();                                                  
                                                               
  __atomic_fetch_sub (&v, count + 1, __ATOMIC_SEQ_CST);
  if (v != --res)
    abort ();
}
Пример #2
0
/**
 * Destroy session.
 * @param[in] sess
 */
void session_destroy(struct zsession *sess)
{
    // update counters
    __atomic_sub_fetch(&zinst()->sessions_cnt, 1, __ATOMIC_RELAXED);
    if (0 == sess->client->id) {
        __atomic_sub_fetch(&zinst()->unauth_sessions_cnt, 1, __ATOMIC_RELAXED);
    }

    pthread_rwlock_destroy(&sess->lock_client);
    client_session_remove(sess->client, sess);
    client_release(sess->client);

    if(sess->nat) znat_destroy(sess->nat);

    free(sess);
}
Пример #3
0
void test_atomic_bool (_Atomic _Bool *a)
{
  enum { SEQ_CST = __ATOMIC_SEQ_CST };
  
  __atomic_fetch_add (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_add." } */
  __atomic_fetch_sub (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_sub." } */
  __atomic_fetch_and (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_and." } */
  __atomic_fetch_xor (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_xor." } */
  __atomic_fetch_or (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_or." } */
  __atomic_fetch_nand (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_nand." } */

  __atomic_add_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_add_fetch." } */
  __atomic_sub_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_sub_fetch." } */
  __atomic_and_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_and_fetch." } */
  __atomic_xor_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_xor_fetch." } */
  __atomic_or_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_or_fetch." } */
  __atomic_nand_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_nand_fetch." } */

  /* The following are valid and must be accepted.  */
  _Bool val = 0, ret = 0;
  __atomic_exchange (a, &val, &ret, SEQ_CST);
  __atomic_exchange_n (a, val, SEQ_CST);
  __atomic_compare_exchange (a, &val, &ret, !1, SEQ_CST, SEQ_CST);
  __atomic_compare_exchange_n (a, &val, ret, !1, SEQ_CST, SEQ_CST);
  __atomic_test_and_set (a, SEQ_CST);
  __atomic_clear (a, SEQ_CST);
}
Пример #4
0
static void barrier_wait(uint32_t *barrier)
{
	uint32_t val = __atomic_sub_fetch(barrier, 1, __ATOMIC_RELAXED);
	while (val != 0)
		val = __atomic_load_n(barrier, __ATOMIC_RELAXED);

	__atomic_thread_fence(__ATOMIC_SEQ_CST);
}
Пример #5
0
inline T Atomic<T>::subAndFetch ( const T& val )
{
#ifdef HAVE_NEW_GCC_ATOMIC_OPS
   return __atomic_sub_fetch( &_value, val, __ATOMIC_ACQ_REL);
#else
   return __sync_sub_and_fetch( &_value,val );
#endif
}
Пример #6
0
static INLINE int tcache_get(const struct timeval *const tv, struct tm *const tm)
{
	unsigned mode;
	mode = __atomic_load_n(&g_tcache_mode, __ATOMIC_RELAXED);
	if (0 == (mode & TCACHE_FLUID))
	{
		mode = __atomic_fetch_add(&g_tcache_mode, 1, __ATOMIC_ACQUIRE);
		if (0 == (mode & TCACHE_FLUID))
		{
			if (g_tcache_tv.tv_sec == tv->tv_sec)
			{
				*tm = g_tcache_tm;
				__atomic_sub_fetch(&g_tcache_mode, 1, __ATOMIC_RELEASE);
				return !0;
			}
			__atomic_or_fetch(&g_tcache_mode, TCACHE_STALE, __ATOMIC_RELAXED);
		}
		__atomic_sub_fetch(&g_tcache_mode, 1, __ATOMIC_RELEASE);
	}
	return 0;
}
Пример #7
0
static void radeon_update_memory_usage(struct radeon_bo *bo,
				       unsigned mem_type, int sign)
{
	struct radeon_device *rdev = bo->rdev;
	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;

	switch (mem_type) {
	case TTM_PL_TT:
		if (sign > 0)
			__atomic_add_fetch(&rdev->gtt_usage.counter, size,__ATOMIC_RELAXED);
		else
			__atomic_sub_fetch(&rdev->gtt_usage.counter, size,__ATOMIC_RELAXED);
		break;
	case TTM_PL_VRAM:
		if (sign > 0)
			__atomic_add_fetch(&rdev->vram_usage.counter, size,__ATOMIC_RELAXED);
		else
			__atomic_sub_fetch(&rdev->vram_usage.counter, size,__ATOMIC_RELAXED );
		break;
	}
}
Пример #8
0
        //  Atomic subtraction. Returns false if the counter drops to zero.
        inline bool sub (integer_t decrement)
        {
#if defined ZMQ_ATOMIC_COUNTER_WINDOWS
            LONG delta = - ((LONG) decrement);
            integer_t old = InterlockedExchangeAdd ((LONG*) &value, delta);
            return old - decrement != 0;
#elif defined ZMQ_ATOMIC_INTRINSIC
            integer_t nv = __atomic_sub_fetch(&value, decrement, __ATOMIC_ACQ_REL);
            return nv != 0;
#elif defined ZMQ_ATOMIC_COUNTER_ATOMIC_H
            int32_t delta = - ((int32_t) decrement);
            integer_t nv = atomic_add_32_nv (&value, delta);
            return nv != 0;
#elif defined ZMQ_ATOMIC_COUNTER_TILE
            int32_t delta = - ((int32_t) decrement);
            integer_t nv = arch_atomic_add (&value, delta);
            return nv != 0;
#elif defined ZMQ_ATOMIC_COUNTER_X86
            integer_t oldval = -decrement;
            volatile integer_t *val = &value;
            __asm__ volatile ("lock; xaddl %0,%1"
                : "=r" (oldval), "=m" (*val)
                : "0" (oldval), "m" (*val)
                : "cc", "memory");
            return oldval != decrement;
#elif defined ZMQ_ATOMIC_COUNTER_ARM
            integer_t old_value, flag, tmp;
            __asm__ volatile (
                "       dmb     sy\n\t"
                "1:     ldrex   %0, [%5]\n\t"
                "       sub     %2, %0, %4\n\t"
                "       strex   %1, %2, [%5]\n\t"
                "       teq     %1, #0\n\t"
                "       bne     1b\n\t"
                "       dmb     sy\n\t"
                : "=&r"(old_value), "=&r"(flag), "=&r"(tmp), "+Qo"(value)
                : "Ir"(decrement), "r"(&value)
                : "cc");
            return old_value - decrement != 0;
#elif defined ZMQ_ATOMIC_COUNTER_MUTEX
            sync.lock ();
            value -= decrement;
            bool result = value ? true : false;
            sync.unlock ();
            return result;
#else
#error atomic_counter is not implemented for this platform
#endif
        }
Пример #9
0
bool Threading::Semaphore::WaitWithoutYield(const wxTimeSpan& timeout)
{
	// This method is the reason why there has to be a special Darwin
	// implementation of Semaphore. Note that semaphore_timedwait() is prone
	// to returning with KERN_ABORTED, which basically signifies that some
	// signal has worken it up. The best official "documentation" for
	// semaphore_timedwait() is the way it's used in Grand Central Dispatch,
	// which is open-source.

	// on x86 platforms, mach_absolute_time() returns nanoseconds
	// TODO(aktau): on iOS a scale value from mach_timebase_info will be necessary
	u64 const kOneThousand = 1000;
	u64 const kOneBillion = kOneThousand * kOneThousand * kOneThousand;
	u64 const delta = timeout.GetMilliseconds().GetValue() * (kOneThousand * kOneThousand);
	mach_timespec_t ts;
	kern_return_t kr = KERN_ABORTED;
	for (u64 now = mach_absolute_time(), deadline = now + delta;
		kr == KERN_ABORTED; now = mach_absolute_time()) {
		if (now > deadline) {
			// timed out by definition
			return false;
		}

		u64 timeleft = deadline - now;
		ts.tv_sec = timeleft / kOneBillion;
		ts.tv_nsec = timeleft % kOneBillion;

		// possible return values of semaphore_timedwait() (from XNU sources):
		// internal kernel val -> return value
		// THREAD_INTERRUPTED  -> KERN_ABORTED
		// THREAD_TIMED_OUT    -> KERN_OPERATION_TIMED_OUT
		// THREAD_AWAKENED     -> KERN_SUCCESS
		// THREAD_RESTART      -> KERN_TERMINATED
		// default             -> KERN_FAILURE
		kr = semaphore_timedwait(m_sema, ts);
	}

	if (kr == KERN_OPERATION_TIMED_OUT) {
		return false;
	}

	// while it's entirely possible to have KERN_FAILURE here, we should
	// probably assert so we can study and correct the actual error here
	// (the thread dying while someone is wainting for it).
	MACH_CHECK(kr);

	__atomic_sub_fetch(&m_counter, 1, __ATOMIC_SEQ_CST);
	return true;
}
Пример #10
0
void __hlt_object_unref(const hlt_type_info* ti, void* obj, hlt_execution_context* ctx)
{
    if ( ! obj )
        return;

    __hlt_gchdr* hdr = (__hlt_gchdr*)obj;

#ifdef DEBUG
    if ( ! ti->gc ) {
        _dbg_mem_gc("! unref", ti, obj, 0, "", ctx);
        _internal_memory_error(obj, "__hlt_object_unref", "object not garbage collected", ti);
    }
#endif

#ifdef HLT_ATOMIC_REF_COUNTING
    int64_t new_ref_cnt = __atomic_sub_fetch(&hdr->ref_cnt, 1, __ATOMIC_SEQ_CST);
#else
    int64_t new_ref_cnt = --hdr->ref_cnt;
#endif

#ifdef DEBUG
    const char* aux = 0;

    if ( new_ref_cnt == 0 )
        aux = "dtor";

#if 0
    // This is now ok !
    if ( new_ref_cnt < 0 ) {
        _dbg_mem_gc("! unref", ti, obj, 0, aux, ctx);
        _internal_memory_error(obj, "__hlt_object_unref", "bad reference count", ti);
    }
#endif
#endif

#ifdef DEBUG
    ++__hlt_globals()->num_unrefs;
    _dbg_mem_gc("unref", ti, obj, 0, aux, ctx);
#endif

    if ( new_ref_cnt == 0 )
        __hlt_memory_nullbuffer_add(ctx->nullbuffer, ti, hdr, ctx);
}
Пример #11
0
mmap_area_t* free_mmap_area(mmap_area_t* mm, mmap_area_t** pmma, proc_t* proc) {
    uint64_t use_count = __atomic_sub_fetch(&mm->count, 1, __ATOMIC_SEQ_CST);
    switch (mm->mtype) {
    case program_data:
    case stack_data:
    case heap_data:
    case kernel_allocated_heap_data: {
        deallocate(mm->vastart, mm->vaend-mm->vastart, proc->pml4);
    } break;
    case nondealloc_map:
        break;
    }
    mmap_area_t* mmn = mm->next;
    *pmma = mm->next;
    if (use_count == 0) {
        free(mm);
    }
    return mmn;
}
Пример #12
0
/**
 * Release session reference.
 * @param[in] sess
 */
void session_release(struct zsession *sess)
{
    if (0 == __atomic_sub_fetch(&sess->refcnt, 1, __ATOMIC_RELAXED)) {
        session_destroy(sess);
    }
}
Пример #13
0
void Threading::Semaphore::WaitWithoutYield()
{
	pxAssertMsg(!wxThread::IsMain(), "Unyielding semaphore wait issued from the main/gui thread.  Please use Wait() instead.");
	MACH_CHECK(semaphore_wait(m_sema));
	__atomic_sub_fetch(&m_counter, 1, __ATOMIC_SEQ_CST);
}
Пример #14
0
int
atomic_sub_fetch_RELAXED ()
{
  return __atomic_sub_fetch (&v, 4096, __ATOMIC_RELAXED);
}