Exemplo n.º 1
0
static inline void
decrementcounter(isc_stats_t *stats, int counter) {
	isc_int32_t prev;

#ifdef ISC_RWLOCK_USEATOMIC
	isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
#endif

#if ISC_STATS_USEMULTIFIELDS
	prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, -1);
	if (prev == 0)
		isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi,
				-1);
#elif defined(ISC_PLATFORM_HAVEXADDQ)
	UNUSED(prev);
	isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], -1);
#else
	UNUSED(prev);
	stats->counters[counter]--;
#endif

#ifdef ISC_RWLOCK_USEATOMIC
	isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
#endif
}
Exemplo n.º 2
0
void
do_xaddq(isc_task_t *task, isc_event_t *ev) {
	counter_t *state = (counter_t *)ev->ev_arg;
	int i;

	for (i = 0 ; i < COUNTS_PER_ITERATION ; i++) {
		isc_atomic_xaddq(&counter_64, INCREMENT_64);
	}

	state->iteration++;
	if (state->iteration < ITERATIONS) {
		isc_task_send(task, &ev);
	} else {
		isc_event_free(&ev);
	}
}
Exemplo n.º 3
0
static inline void
incrementcounter(isc_stats_t *stats, int counter) {
	isc_int32_t prev;

#ifdef ISC_RWLOCK_USEATOMIC
	/*
	 * We use a "read" lock to prevent other threads from reading the
	 * counter while we "writing" a counter field.  The write access itself
	 * is protected by the atomic operation.
	 */
	isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
#endif

#if ISC_STATS_USEMULTIFIELDS
	prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, 1);
	/*
	 * If the lower 32-bit field overflows, increment the higher field.
	 * Note that it's *theoretically* possible that the lower field
	 * overlaps again before the higher field is incremented.  It doesn't
	 * matter, however, because we don't read the value until
	 * isc_stats_copy() is called where the whole process is protected
	 * by the write (exclusive) lock.
	 */
	if (prev == (isc_int32_t)0xffffffff)
		isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, 1);
#elif defined(ISC_PLATFORM_HAVEXADDQ)
	UNUSED(prev);
	isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], 1);
#else
	UNUSED(prev);
	stats->counters[counter]++;
#endif

#ifdef ISC_RWLOCK_USEATOMIC
	isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
#endif
}