Exemple #1
0
void
ck_barrier_centralized(struct ck_barrier_centralized *barrier,
    struct ck_barrier_centralized_state *state,
    unsigned int n_threads)
{
	unsigned int sense, value;

	/*
	 * Every execution context has a sense associated with it.
	 * This sense is reversed when the barrier is entered. Every
	 * thread will spin on the global sense until the last thread
	 * reverses it.
	 */
	sense = state->sense = ~state->sense;
	value = ck_pr_faa_uint(&barrier->value, 1);
	if (value == n_threads - 1) {
		ck_pr_store_uint(&barrier->value, 0);
		ck_pr_fence_memory();
		ck_pr_store_uint(&barrier->sense, sense);
		return;
	}

	ck_pr_fence_load();
	while (sense != ck_pr_load_uint(&barrier->sense))
		ck_pr_stall();

	ck_pr_fence_memory();
	return;
}
Exemple #2
0
hash_t *
hash_create (size_t size)
{
    hash_t *hash;

    debug_msg("hash_create size = %zd", size);

    hash = (hash_t *) malloc ( sizeof(hash_t) );
    if( hash == NULL )
    {
        debug_msg("hash malloc error in hash_create()");
        return NULL;
    }

    /*
     * Rounds up size to nearest power of two. This allows us to avoid division
     * from modulus math when calculating buckets.
     */
    size--;
    size |= size >> 1;
    size |= size >> 2;
    size |= size >> 4;
    size |= size >> 8;
    size |= size >> 16;
    size++;
    hash->size = size;

    debug_msg("hash->size is %zd", hash->size);

    hash->node = calloc(hash->size, sizeof (*hash->node));
    if (hash->node == NULL)
    {
        debug_msg("hash->node malloc error. freeing hash.");
        free(hash);
        return NULL;
    }

    hash->lock = calloc(hash->size, sizeof (*hash->lock));
    if (hash->lock == NULL)
    {
        debug_msg("hash->lock malloc error. freeing hash.");
        free(hash);
        return NULL;
    }
    /*
     * ck_rwlock_init just sets things to zero and does a memory fence. calloc
     * already set zero, so add mfence only.
     */
    ck_pr_fence_memory();

    return hash;
}
Exemple #3
0
int
main(void)
{
	int r = 0;

	/* Below serves as a marker. */
	ck_pr_sub_int(&r, 31337);

	/*
	 * This is a simple test to help ensure all fences compile or crash
	 * on target. Below are generated according to the underlying memory
	 * model's ordering.
	 */
	ck_pr_fence_atomic();
	ck_pr_fence_atomic_store();
	ck_pr_fence_atomic_load();
	ck_pr_fence_store_atomic();
	ck_pr_fence_load_atomic();
	ck_pr_fence_load();
	ck_pr_fence_load_store();
	ck_pr_fence_store();
	ck_pr_fence_store_load();
	ck_pr_fence_memory();
	ck_pr_fence_release();
	ck_pr_fence_acquire();
	ck_pr_fence_acqrel();
	ck_pr_fence_lock();
	ck_pr_fence_unlock();

	/* Below serves as a marker. */
	ck_pr_sub_int(&r, 31337);

	/* The following are generating assuming RMO. */
	ck_pr_fence_strict_atomic();
	ck_pr_fence_strict_atomic_store();
	ck_pr_fence_strict_atomic_load();
	ck_pr_fence_strict_store_atomic();
	ck_pr_fence_strict_load_atomic();
	ck_pr_fence_strict_load();
	ck_pr_fence_strict_load_store();
	ck_pr_fence_strict_store();
	ck_pr_fence_strict_store_load();
	ck_pr_fence_strict_memory();
	ck_pr_fence_strict_release();
	ck_pr_fence_strict_acquire();
	ck_pr_fence_strict_acqrel();
	ck_pr_fence_strict_lock();
	ck_pr_fence_strict_unlock();
	return 0;
}
Exemple #4
0
static void
ck_barrier_combining_aux(struct ck_barrier_combining *barrier,
    struct ck_barrier_combining_group *tnode,
    unsigned int sense)
{

	/*
	 * If this is the last thread in the group, it moves on to the parent group.
	 * Otherwise, it spins on this group's sense.
	 */
	if (ck_pr_faa_uint(&tnode->count, 1) == tnode->k - 1) {
		/*
		 * If we are and will be the last thread entering the barrier for the
		 * current group then signal the parent group if one exists.
		 */
		if (tnode->parent != NULL)
			ck_barrier_combining_aux(barrier, tnode->parent, sense);

		/*
		 * Once the thread returns from its parent(s), it reinitializes the group's
		 * arrival count and signals other threads to continue by flipping the group
		 * sense. Order of these operations is not important since we assume a static
		 * number of threads are members of a barrier for the lifetime of the barrier.
		 * Since count is explicitly reinitialized, it is guaranteed that at any point
		 * tnode->count is equivalent to tnode->k if and only if that many threads
		 * are at the barrier.
		 */
		ck_pr_store_uint(&tnode->count, 0);
		ck_pr_fence_store();
		ck_pr_store_uint(&tnode->sense, ~tnode->sense);
	} else {
		ck_pr_fence_memory();
		while (sense != ck_pr_load_uint(&tnode->sense))
			ck_pr_stall();
	}

	return;
}
Exemple #5
0
static void *
test_spmc(void *c)
{
	unsigned int observed = 0;
	unsigned long previous = 0;
	unsigned int seed;
	int i, k, j, tid;
	struct context *context = c;
	ck_ring_buffer_t *buffer;

	buffer = context->buffer;
        if (aff_iterate(&a)) {
                perror("ERROR: Could not affine thread");
                exit(EXIT_FAILURE);
        }

	tid = ck_pr_faa_int(&eb, 1);
	ck_pr_fence_memory();
	while (ck_pr_load_int(&eb) != nthr - 1);

	for (i = 0; i < ITERATIONS; i++) {
		for (j = 0; j < size; j++) {
			struct entry *o;
			int spin;

			/* Keep trying until we encounter at least one node. */
			if (j & 1) {
				while (ck_ring_dequeue_spmc(&ring_spmc, buffer,
				    &o) == false);
			} else {
				while (ck_ring_trydequeue_spmc(&ring_spmc, buffer,
				    &o) == false);
			}

			observed++;
			if (o->value < 0
			    || o->value != o->tid
			    || o->magic != 0xdead
			    || (previous != 0 && previous >= o->value_long)) {
				ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
					(void *)o, o->magic, o->tid, o->value, size);
			}

			o->magic = 0xbeef;
			o->value = -31337;
			o->tid = -31338;
			previous = o->value_long;

			if (ck_pr_faa_uint(&o->ref, 1) != 0) {
				ck_error("[%p] We dequeued twice.\n", (void *)o);
			}

			if ((i % 4) == 0) {
				spin = common_rand_r(&seed) % 16384;
				for (k = 0; k < spin; k++) {
					ck_pr_stall();
				}
			}

			free(o);
		}
	}

	fprintf(stderr, "[%d] Observed %u\n", tid, observed);
	return NULL;
}
void
ck_barrier_tournament(struct ck_barrier_tournament *barrier,
    struct ck_barrier_tournament_state *state)
{
	struct ck_barrier_tournament_round **rounds = ck_pr_load_ptr(&barrier->rounds);
	int round = 1;

	if (barrier->size == 1)
		return;

	for (;; ++round) {
		switch (rounds[state->vpid][round].role) {
		case CK_BARRIER_TOURNAMENT_BYE:
			break;
		case CK_BARRIER_TOURNAMENT_CHAMPION:
			/*
			 * The CK_BARRIER_TOURNAMENT_CHAMPION waits until it wins the tournament; it then
			 * sets the final flag before the wakeup phase of the barrier.
			 */
			while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
				ck_pr_stall();

			ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
			goto wakeup;
		case CK_BARRIER_TOURNAMENT_DROPOUT:
			/* NOTREACHED */
			break;
		case CK_BARRIER_TOURNAMENT_LOSER:
			/*
			 * CK_BARRIER_TOURNAMENT_LOSERs set the flags of their opponents and wait until
			 * their opponents release them after the tournament is over.
			 */
			ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
			while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
				ck_pr_stall();

			goto wakeup;
		case CK_BARRIER_TOURNAMENT_WINNER:
			/*
			 * CK_BARRIER_TOURNAMENT_WINNERs wait until their current opponent sets their flag; they then
			 * continue to the next round of the tournament.
			 */
			while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
				ck_pr_stall();
			break;
		}
	}

wakeup:
	for (round -= 1 ;; --round) {
		switch (rounds[state->vpid][round].role) {
		case CK_BARRIER_TOURNAMENT_BYE:
			break;
		case CK_BARRIER_TOURNAMENT_CHAMPION:
			/* NOTREACHED */
			break;
		case CK_BARRIER_TOURNAMENT_DROPOUT:
			goto leave;
			break;
		case CK_BARRIER_TOURNAMENT_LOSER:
			/* NOTREACHED */
			break;
		case CK_BARRIER_TOURNAMENT_WINNER:
			/*
			 * Winners inform their old opponents the tournament is over
			 * by setting their flags.
			 */
			ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
			break;
		}
	}

leave:
	ck_pr_fence_memory();
	state->sense = ~state->sense;
	return;
}