Exemple #1
0
CK_CC_INLINE static bool
ck_barrier_mcs_check_children(unsigned int *childnotready)
{

	if (ck_pr_load_uint(&childnotready[0]) != 0)
		return (false);
	if (ck_pr_load_uint(&childnotready[1]) != 0)
		return (false);
	if (ck_pr_load_uint(&childnotready[2]) != 0)
		return (false);
	if (ck_pr_load_uint(&childnotready[3]) != 0)
		return (false);

	return (true);
}
Exemple #2
0
static inline unsigned int
ck_rhs_map_bound_get(struct ck_rhs_map *m, unsigned long h)
{
	unsigned long offset = h & m->mask;
	unsigned int r = CK_RHS_WORD_MAX;

	if (m->read_mostly)
		r = ck_pr_load_uint(&m->probe_maximum);
	else {
		r = CK_RHS_LOAD(&m->entries.descs[offset].probe_bound);
		if (r == CK_RHS_WORD_MAX)
			r = ck_pr_load_uint(&m->probe_maximum);
	}
	return r;
}
Exemple #3
0
void
ck_barrier_mcs(struct ck_barrier_mcs *barrier,
               struct ck_barrier_mcs_state *state)
{

	/*
	 * Wait until all children have reached the barrier and are done waiting
	 * for their children.
	 */
	while (ck_barrier_mcs_check_children(barrier[state->vpid].childnotready) == false)
		ck_pr_stall();

	/* Reinitialize for next barrier. */
	ck_barrier_mcs_reinitialize_children(&barrier[state->vpid]);

	/* Inform parent thread and its children have arrived at the barrier. */
	ck_pr_store_uint(barrier[state->vpid].parent, 0);

	/* Wait until parent indicates all threads have arrived at the barrier. */
	if (state->vpid != 0) {
		while (ck_pr_load_uint(&barrier[state->vpid].parentsense) != state->sense)
			ck_pr_stall();
	}

	/* Inform children of successful barrier. */
	ck_pr_store_uint(barrier[state->vpid].children[0], state->sense);
	ck_pr_store_uint(barrier[state->vpid].children[1], state->sense);
	state->sense = ~state->sense;
	return;
}
void
ck_barrier_dissemination(struct ck_barrier_dissemination *barrier,
			 struct ck_barrier_dissemination_state *state)
{
	unsigned int i;
	unsigned int size = barrier->size;

	for (i = 0; i < size; ++i) {
		/* Unblock current partner. */
		ck_pr_store_uint(barrier[state->tid].flags[state->parity][i].pflag, state->sense);

		/* Wait until some other thread unblocks this one. */
		while (ck_pr_load_uint(&barrier[state->tid].flags[state->parity][i].tflag) != state->sense)
			ck_pr_stall();
	}

	/*
	 * Dissemination barriers use two sets of flags to prevent race conditions
	 * between successive calls to the barrier. Parity indicates which set will
	 * be used for the next barrier. They also use a sense reversal technique 
	 * to avoid re-initialization of the flags for every two calls to the barrier.
	 */
	if (state->parity == 1)
		state->sense = ~state->sense;

	state->parity = 1 - state->parity;
	return;
}
Exemple #5
0
static CK_CC_INLINE void
rwlock_read_lock(rwlock_t *rw)
{

        for (;;) {
                while (ck_pr_load_uint(&rw->writer.value) != 0)
                        ck_pr_stall();

                ck_pr_inc_uint(&rw->readers);
                if (ck_pr_load_uint(&rw->writer.value) == 0)
                        break;
                ck_pr_dec_uint(&rw->readers);
        }

        return;
}
Exemple #6
0
void
ck_barrier_centralized(struct ck_barrier_centralized *barrier,
    struct ck_barrier_centralized_state *state,
    unsigned int n_threads)
{
	unsigned int sense, value;

	/*
	 * Every execution context has a sense associated with it.
	 * This sense is reversed when the barrier is entered. Every
	 * thread will spin on the global sense until the last thread
	 * reverses it.
	 */
	sense = state->sense = ~state->sense;
	value = ck_pr_faa_uint(&barrier->value, 1);
	if (value == n_threads - 1) {
		ck_pr_store_uint(&barrier->value, 0);
		ck_pr_fence_memory();
		ck_pr_store_uint(&barrier->sense, sense);
		return;
	}

	ck_pr_fence_load();
	while (sense != ck_pr_load_uint(&barrier->sense))
		ck_pr_stall();

	ck_pr_fence_memory();
	return;
}
Exemple #7
0
static void *
test(void *c)
{
	struct context *context = c;
	struct entry *entry;
	ck_fifo_spsc_entry_t *fifo_entry;
	int i, j;

        if (aff_iterate(&a)) {
                perror("ERROR: Could not affine thread");
                exit(EXIT_FAILURE);
        }

#ifdef DEBUG
	fprintf(stderr, "%p %u: %u -> %u\n", fifo+context->tid, context->tid, context->previous, context->tid);
#endif

	if (context->tid == 0) {
		struct entry *entries;

		entries = malloc(sizeof(struct entry) * size);
		assert(entries != NULL);

		for (i = 0; i < size; i++) {
			entries[i].value = i;
			entries[i].tid = 0;

			fifo_entry = malloc(sizeof(ck_fifo_spsc_entry_t));
			ck_fifo_spsc_enqueue(fifo + context->tid, fifo_entry, entries + i);
		}
	}

	ck_pr_inc_uint(&barrier);
	while (ck_pr_load_uint(&barrier) < (unsigned int)nthr);

	for (i = 0; i < ITERATIONS; i++) {
		for (j = 0; j < size; j++) {
			while (ck_fifo_spsc_dequeue(fifo + context->previous, &entry) == false);
			if (context->previous != (unsigned int)entry->tid) {
				ck_error("T [%u:%p] %u != %u\n",
					context->tid, (void *)entry, entry->tid, context->previous);
			}

			if (entry->value != j) {
				ck_error("V [%u:%p] %u != %u\n",
					context->tid, (void *)entry, entry->value, j);
			}

			entry->tid = context->tid;
			fifo_entry = ck_fifo_spsc_recycle(fifo + context->tid);
			if (fifo_entry == NULL)
				fifo_entry = malloc(sizeof(ck_fifo_spsc_entry_t));

			ck_fifo_spsc_enqueue(fifo + context->tid, fifo_entry, entry);
		}
	}

	return (NULL);
}
Exemple #8
0
static void *
test(void *c)
{
	struct context *context = c;
	struct entry *entry;
	ck_hp_fifo_entry_t *fifo_entry;
	ck_hp_record_t record;
	int i, j;

        if (aff_iterate(&a)) {
                perror("ERROR: Could not affine thread");
                exit(EXIT_FAILURE);
        }

	ck_hp_register(&fifo_hp, &record, malloc(sizeof(void *) * 2));
	ck_pr_inc_uint(&barrier);
	while (ck_pr_load_uint(&barrier) < (unsigned int)nthr);

	for (i = 0; i < ITERATIONS; i++) {
		for (j = 0; j < size; j++) {
			fifo_entry = malloc(sizeof(ck_hp_fifo_entry_t));
			entry = malloc(sizeof(struct entry));
			entry->tid = context->tid;
			ck_hp_fifo_enqueue_mpmc(&record, &fifo, fifo_entry, entry);

			fifo_entry = ck_hp_fifo_dequeue_mpmc(&record, &fifo, &entry);
			if (fifo_entry == NULL) {
				fprintf(stderr, "ERROR [%u] Queue should never be empty.\n", context->tid);
				exit(EXIT_FAILURE);
			}

			if (entry->tid < 0 || entry->tid >= nthr) {
				fprintf(stderr, "ERROR [%u] Incorrect value in entry.\n", entry->tid);
				exit(EXIT_FAILURE);
			}

			ck_hp_free(&record, &fifo_entry->hazard, fifo_entry, fifo_entry);
		}
	}

	ck_pr_inc_uint(&e_barrier);
	while (ck_pr_load_uint(&e_barrier) < (unsigned int)nthr);

	return (NULL);
}
Exemple #9
0
static void *
writer_thread(void *unused)
{
	unsigned int i;
	unsigned int iteration = 0;

	(void)unused;

	for (;;) {
		iteration++;
		ck_epoch_write_begin(&epoch_wr);
		for (i = 1; i <= writer_max; i++) {
			if (ck_bag_put_spmc(&bag, (void *)(uintptr_t)i) == false) {
				perror("ck_bag_put_spmc");
				exit(EXIT_FAILURE);
			}

			if (ck_bag_member_spmc(&bag, (void *)(uintptr_t)i) == false) {
				fprintf(stderr, "ck_bag_put_spmc [%u]: %u\n", iteration, i);
				exit(EXIT_FAILURE);
			}
		}

		if (ck_pr_load_int(&leave) == 1)
			break;

		for (i = 1; i < writer_max; i++) {
			void *replace = (void *)(uintptr_t)i;
			if (ck_bag_set_spmc(&bag, (void *)(uintptr_t)i, replace) == false) {
				fprintf(stderr, "ERROR: set %ju != %ju",
						(uintmax_t)(uintptr_t)replace, (uintmax_t)i);
				exit(EXIT_FAILURE);
			}
		}

		for (i = writer_max; i > 0; i--) {
			if (ck_bag_member_spmc(&bag, (void *)(uintptr_t)i) == false) {
				fprintf(stderr, "ck_bag_member_spmc [%u]: %u\n", iteration, i);
				exit(EXIT_FAILURE);
			}

			if (ck_bag_remove_spmc(&bag, (void *)(uintptr_t)i) == false) {
				fprintf(stderr, "ck_bag_remove_spmc [%u]: %u\n", iteration, i);
				exit(EXIT_FAILURE);
			}
		}

		ck_epoch_write_end(&epoch_wr);
	}

	fprintf(stderr, "Writer %u iterations, %u writes per iteration.\n", iteration, writer_max);
	while (ck_pr_load_uint(&barrier) != NUM_READER_THREADS)
		ck_pr_stall();

	ck_pr_inc_uint(&barrier);
	return NULL;
}
Exemple #10
0
static void *
test(void *c)
{
#ifdef CK_F_FIFO_MPMC
	struct context *context = c;
	struct entry *entry;
	ck_fifo_mpmc_entry_t *fifo_entry, *garbage;
	int i, j;

        if (aff_iterate(&a)) {
                perror("ERROR: Could not affine thread");
                exit(EXIT_FAILURE);
        }

	ck_pr_inc_uint(&barrier);
	while (ck_pr_load_uint(&barrier) < (unsigned int)nthr);

	for (i = 0; i < ITERATIONS; i++) {
		for (j = 0; j < size; j++) {
			fifo_entry = malloc(sizeof(ck_fifo_mpmc_entry_t));
			entry = malloc(sizeof(struct entry));
			entry->tid = context->tid;
			ck_fifo_mpmc_enqueue(&fifo, fifo_entry, entry);
			if (ck_fifo_mpmc_dequeue(&fifo, &entry, &garbage) == false) {
				fprintf(stderr, "ERROR [%u] Queue should never be empty.\n", context->tid);
				exit(EXIT_FAILURE);
			}

			if (entry->tid < 0 || entry->tid >= nthr) {
				fprintf(stderr, "ERROR [%u] Incorrect value in entry.\n", entry->tid);
				exit(EXIT_FAILURE);
			}
		}
	}

	for (i = 0; i < ITERATIONS; i++) {
		for (j = 0; j < size; j++) {
			fifo_entry = malloc(sizeof(ck_fifo_mpmc_entry_t));
			entry = malloc(sizeof(struct entry));
			entry->tid = context->tid;
			while (ck_fifo_mpmc_tryenqueue(&fifo, fifo_entry, entry) == false)
				ck_pr_stall();

			while (ck_fifo_mpmc_trydequeue(&fifo, &entry, &garbage) == false)
				ck_pr_stall();

			if (entry->tid < 0 || entry->tid >= nthr) {
				fprintf(stderr, "ERROR [%u] Incorrect value in entry when using try interface.\n", entry->tid);
				exit(EXIT_FAILURE);
			}
		}
	}
#endif

	return (NULL);
}
Exemple #11
0
static CK_CC_INLINE void
rwlock_write_lock(rwlock_t *rw)
{

        ck_spinlock_fas_lock(&rw->writer);
        while (ck_pr_load_uint(&rw->readers) != 0)
                ck_pr_stall();

        return;
}
Exemple #12
0
static void *
fairness(void *null)
{
	struct block *context = null;
	unsigned int i = context->tid;
	volatile int j;
	long int base;
	unsigned int core;
	CK_COHORT_INSTANCE(basic) *cohort;


	if (aff_iterate_core(&a, &core)) {
		perror("ERROR: Could not affine thread");
		exit(EXIT_FAILURE);
	}

	cohort = &((cohorts + (core / (int)(a.delta)) % n_cohorts)->cohort);

	while (ck_pr_load_uint(&ready) == 0);

	ck_pr_inc_uint(&barrier);
	while (ck_pr_load_uint(&barrier) != nthr);

	while (ready) {
		CK_COHORT_LOCK(basic, cohort, NULL, NULL);

		count[i].value++;
		if (critical) {
			base = common_lrand48() % critical;
			for (j = 0; j < base; j++);
		}

		CK_COHORT_UNLOCK(basic, cohort, NULL, NULL);
	}

	return NULL;
}
Exemple #13
0
static void *spin_and_count(void *ptr)
{
  struct counter_data *data = (struct counter_data*)ptr;
  ph_counter_block_t *block;
  uint32_t i;

  while (ck_pr_load_uint(&data->barrier) == 0);

  block = ph_counter_block_open(data->scope);

  for (i = 0; i < data->iters; i++) {
    ph_counter_block_add(block, data->slot, 1);
  }

  ph_counter_block_delref(block);

  return NULL;
}
Exemple #14
0
static void
ck_barrier_combining_aux(struct ck_barrier_combining *barrier,
    struct ck_barrier_combining_group *tnode,
    unsigned int sense)
{

	/*
	 * If this is the last thread in the group, it moves on to the parent group.
	 * Otherwise, it spins on this group's sense.
	 */
	if (ck_pr_faa_uint(&tnode->count, 1) == tnode->k - 1) {
		/*
		 * If we are and will be the last thread entering the barrier for the
		 * current group then signal the parent group if one exists.
		 */
		if (tnode->parent != NULL)
			ck_barrier_combining_aux(barrier, tnode->parent, sense);

		/*
		 * Once the thread returns from its parent(s), it reinitializes the group's
		 * arrival count and signals other threads to continue by flipping the group
		 * sense. Order of these operations is not important since we assume a static
		 * number of threads are members of a barrier for the lifetime of the barrier.
		 * Since count is explicitly reinitialized, it is guaranteed that at any point
		 * tnode->count is equivalent to tnode->k if and only if that many threads
		 * are at the barrier.
		 */
		ck_pr_store_uint(&tnode->count, 0);
		ck_pr_fence_store();
		ck_pr_store_uint(&tnode->sense, ~tnode->sense);
	} else {
		ck_pr_fence_memory();
		while (sense != ck_pr_load_uint(&tnode->sense))
			ck_pr_stall();
	}

	return;
}
Exemple #15
0
static void *
thread_brlock(void *pun)
{
	uint64_t s_b, e_b, a, i;
	ck_brlock_reader_t r;
	uint64_t *value = pun;

	if (aff_iterate(&affinity) != 0) {
		perror("ERROR: Could not affine thread");
		exit(EXIT_FAILURE);
	}

	ck_brlock_read_register(&brlock, &r);
	ck_pr_inc_int(&barrier);
	while (ck_pr_load_int(&barrier) != threads)
		ck_pr_stall();

	for (i = 1, a = 0;; i++) {
		s_b = rdtsc();
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		e_b = rdtsc();

		a += (e_b - s_b) >> 4;

		if (ck_pr_load_uint(&flag) == 1)
			break;
	}

	ck_pr_inc_int(&barrier);
	while (ck_pr_load_int(&barrier) != threads * 2)
		ck_pr_stall();

	*value = (a / i);
	return NULL;
}
Exemple #16
0
static void *
thread(void *null)
{
	struct block *context = null;
	int i = ITERATE;
	unsigned int l;

        if (aff_iterate(&a)) {
                perror("ERROR: Could not affine thread");
                exit(EXIT_FAILURE);
        }

	if (context->tid == (unsigned int)nthr - 1)
		context->tid = sizeof(lock.readers) + 1;

	while (i--) {
		ck_bytelock_write_lock(&lock, context->tid);
		{
			l = ck_pr_load_uint(&locked);
			if (l != 0) {
				ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
			}

			ck_pr_inc_uint(&locked);
			ck_pr_inc_uint(&locked);
			ck_pr_inc_uint(&locked);
			ck_pr_inc_uint(&locked);
			ck_pr_inc_uint(&locked);
			ck_pr_inc_uint(&locked);
			ck_pr_inc_uint(&locked);
			ck_pr_inc_uint(&locked);

			l = ck_pr_load_uint(&locked);
			if (l != 8) {
				ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
			}

			ck_pr_dec_uint(&locked);
			ck_pr_dec_uint(&locked);
			ck_pr_dec_uint(&locked);
			ck_pr_dec_uint(&locked);
			ck_pr_dec_uint(&locked);
			ck_pr_dec_uint(&locked);
			ck_pr_dec_uint(&locked);
			ck_pr_dec_uint(&locked);

			l = ck_pr_load_uint(&locked);
			if (l != 0) {
				ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
			}
		}
		ck_bytelock_write_unlock(&lock);

		ck_bytelock_read_lock(&lock, context->tid);
		{
			l = ck_pr_load_uint(&locked);
			if (l != 0) {
				ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
			}
		}
		ck_bytelock_read_unlock(&lock, context->tid);
	}

	return (NULL);
}
Exemple #17
0
static void *
thread_lock_rtm(void *pun)
{
	uint64_t s_b, e_b, a, i;
	uint64_t *value = pun;

	if (aff_iterate(&affinity) != 0) {
		perror("ERROR: Could not affine thread");
		exit(EXIT_FAILURE);
	}

	ck_pr_inc_int(&barrier);
	while (ck_pr_load_int(&barrier) != threads)
		ck_pr_stall();

	for (i = 1, a = 0;; i++) {
		s_b = rdtsc();
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		e_b = rdtsc();

		a += (e_b - s_b) >> 4;

		if (ck_pr_load_uint(&flag) == 1)
			break;
	}

	ck_pr_inc_int(&barrier);
	while (ck_pr_load_int(&barrier) != threads * 2)
		ck_pr_stall();

	*value = (a / i);
	return NULL;
}
Exemple #18
0
void
ck_barrier_tournament(struct ck_barrier_tournament *barrier,
                      struct ck_barrier_tournament_state *state)
{
	struct ck_barrier_tournament_round **rounds = ck_pr_load_ptr(&barrier->rounds);
	int round = 1;

	for (;; ++round) {
		switch (rounds[state->vpid][round].role) { // MIGHT NEED TO USE CK_PR_LOAD***
		case CK_BARRIER_TOURNAMENT_BYE:
			break;
		case CK_BARRIER_TOURNAMENT_CHAMPION:
			/*
			 * The CK_BARRIER_TOURNAMENT_CHAMPION waits until it wins the tournament; it then
			 * sets the final flag before the wakeup phase of the barrier.
			 */
			while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
				ck_pr_stall();

			ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
			goto wakeup;
		case CK_BARRIER_TOURNAMENT_DROPOUT:
			/* NOTREACHED */
			break;
		case CK_BARRIER_TOURNAMENT_LOSER:
			/*
			 * CK_BARRIER_TOURNAMENT_LOSERs set the flags of their opponents and wait until
			 * their opponents release them after the tournament is over.
			 */
			ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
			while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
				ck_pr_stall();

			goto wakeup;
		case CK_BARRIER_TOURNAMENT_WINNER:
			/*
			 * CK_BARRIER_TOURNAMENT_WINNERs wait until their current opponent sets their flag; they then
			 * continue to the next round of the tournament.
			 */
			while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
				ck_pr_stall();
			break;
		}
	}

wakeup:
	for (round -= 1 ;; --round) {
		switch (rounds[state->vpid][round].role) { // MIGHT NEED TO USE CK_PR_LOAD***
		case CK_BARRIER_TOURNAMENT_BYE:
			break;
		case CK_BARRIER_TOURNAMENT_CHAMPION:
			/* NOTREACHED */
			break;
		case CK_BARRIER_TOURNAMENT_DROPOUT:
			goto leave;
			break;
		case CK_BARRIER_TOURNAMENT_LOSER:
			/* NOTREACHED */
			break;
		case CK_BARRIER_TOURNAMENT_WINNER:
			/* 
			 * Winners inform their old opponents the tournament is over
			 * by setting their flags.
			 */
			ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
			break;
		}
	}

leave:
	state->sense = ~state->sense;
	return;
}
Exemple #19
0
static void *
reader(void *arg)
{
	void *curr_ptr;
	intptr_t curr, prev, curr_max, prev_max;
	unsigned long long n_entries = 0, iterations = 0;
	ck_epoch_record_t epoch_record;
	ck_bag_iterator_t iterator;
	struct ck_bag_block *block = NULL;

	(void)arg;

	ck_epoch_register(&epoch_bag, &epoch_record);

	/*
	 * Check if entries within a block are sequential. Since ck_bag inserts
	 * newly occupied blocks at the beginning of the list, there is no ordering
	 * guarantee across the bag.
	 */
	for (;;) {
		ck_epoch_read_begin(&epoch_record);
		ck_bag_iterator_init(&iterator, &bag);
		curr_max = prev_max = prev = -1;
		block = NULL;

		while (ck_bag_next(&iterator, &curr_ptr) == true) {
			if (block != iterator.block) {
				prev = -1;
				curr = 0;
				prev_max = curr_max;
				curr_max = 0;
				block = iterator.block;
			}

			curr = (uintptr_t)(curr_ptr);
			if (curr < prev) {
				/* Ascending order within block violated */
				fprintf(stderr, "%p: %p: %ju\n", (void *)&epoch_record, (void *)iterator.block, (uintmax_t)curr);
				fprintf(stderr, "ERROR: %ju < %ju \n",
				    (uintmax_t)curr, (uintmax_t)prev);
				exit(EXIT_FAILURE);
			} else if (prev_max != -1 && curr > prev_max) {
				/* Max of prev block > max of current block */
				fprintf(stderr, "%p: %p: %ju\n", (void *)&epoch_record, (void *)iterator.block, (uintmax_t)curr);
				fprintf(stderr, "ERROR: %ju > prev_max: %ju\n",
				    (uintmax_t)curr, (uintmax_t)prev_max);
				exit(EXIT_FAILURE);
			}

			curr_max = curr;

			prev = curr;
			n_entries++;
		}

		ck_epoch_read_end(&epoch_record);

		iterations++;
		if (ck_pr_load_int(&leave) == 1)
			break;
	}

	fprintf(stderr, "Read %llu entries in %llu iterations.\n", n_entries, iterations);

	ck_pr_inc_uint(&barrier);
	while (ck_pr_load_uint(&barrier) != NUM_READER_THREADS + 1)
		ck_pr_stall();

	return NULL;

}
int
main(int argc, char** argv)
{
        ck_hp_fifo_entry_t *stub;
        unsigned long element_count, i;
	pthread_t *thr;

        if (argc != 3) {
        	ck_error("Usage: cktest <thread_count> <element_count>\n");
        }

        /* Get element count from argument */
        element_count = atoi(argv[2]);

	/* Get element count from argument */
        thread_count = atoi(argv[1]);

	/* pthread handles */
	thr = malloc(sizeof(pthread_t) * thread_count);

	/* array for local operation count */
	count = malloc(sizeof(unsigned long *) * thread_count);
	
        /*
         * Initialize global hazard pointer safe memory reclamation to execute free()
         * when a fifo_entry is safe to be deleted.
         * Hazard pointer scan routine will be called when the thread local intern plist's
         * size exceed 100 entries.
         */

	/* FIFO queue needs 2 hazard pointers */
	ck_hp_init(&fifo_hp, CK_HP_FIFO_SLOTS_COUNT, 100, destructor);

        /* The FIFO requires one stub entry on initialization. */
        stub = malloc(sizeof(ck_hp_fifo_entry_t));

        /* Behavior is undefined if stub is NULL. */
        if (stub == NULL) {
                exit(EXIT_FAILURE);
	}

        /* This is called once to initialize the fifo. */
        ck_hp_fifo_init(&fifo, stub);

	/* Create threads */
	for (i = 0; i < thread_count; i++) {
		count[i] = (element_count + i) / thread_count;
		if (pthread_create(&thr[i], NULL, queue_50_50, (void *) &count[i]) != 0) {
                	exit(EXIT_FAILURE);
                }
	}

	/* start barrier */
	ck_pr_inc_uint(&start_barrier);
	while (ck_pr_load_uint(&start_barrier) < thread_count + 1);

	/* end barrier */
	ck_pr_inc_uint(&end_barrier);
	while (ck_pr_load_uint(&end_barrier) < thread_count + 1);

	/* Join threads */
	for (i = 0; i < thread_count; i++)
		pthread_join(thr[i], NULL);

        return 0;
}
Exemple #21
0
int
fq_client_data_backlog(fq_client conn) {
  fq_conn_s *conn_s = conn;
  return ck_pr_load_uint(&conn_s->qlen);
}
/* function for thread */
static void *
queue_50_50(void *elements)
{
        struct entry *entry;
        ck_hp_fifo_entry_t *fifo_entry;
	ck_hp_record_t *record;
	void *slots;
        unsigned long j, element_count = *(unsigned long *)elements;
	unsigned int seed;

	record = malloc(sizeof(ck_hp_record_t));
	assert(record);
	
	slots = malloc(CK_HP_FIFO_SLOTS_SIZE);
	assert(slots);
	
        /* different seed for each thread */
	seed = 1337; /*(unsigned int) pthread_self(); */

        /*
         * This subscribes the thread to the fifo_hp state using the thread-owned
         * record.
         * FIFO queue needs 2 hazard pointers.
         */
        ck_hp_register(&fifo_hp, record, slots);

	/* start barrier */
	ck_pr_inc_uint(&start_barrier);
	while (ck_pr_load_uint(&start_barrier) < thread_count + 1)
		ck_pr_stall();

	/* 50/50 enqueue-dequeue */
	for(j = 0; j < element_count; j++) {
		/* rand_r with thread local state should be thread safe */
		if( 50 < (1+(int) (100.0*common_rand_r(&seed)/(RAND_MAX+1.0)))) {
			/* This is the container for the enqueued data. */
        		fifo_entry = malloc(sizeof(ck_hp_fifo_entry_t));

        		if (fifo_entry == NULL) {
        	        	exit(EXIT_FAILURE);
			}

        		/* This is the data. */
        		entry = malloc(sizeof(struct entry));
        		if (entry != NULL) {
        	        	entry->value = j;
			}

        	       /*
        	 	* Enqueue the value of the pointer entry into FIFO queue using the
        	 	* container fifo_entry.
        	 	*/
        		ck_hp_fifo_enqueue_mpmc(record, &fifo, fifo_entry, entry);
		} else {
			/*
        		 * ck_hp_fifo_dequeue_mpmc will return a pointer to the first unused node and store
        		 * the value of the first pointer in the FIFO queue in entry.
        		 */
  		      	fifo_entry = ck_hp_fifo_dequeue_mpmc(record, &fifo, &entry);
        		if (fifo_entry != NULL) {
               		 	/*
               		 	 * Safely reclaim memory associated with fifo_entry.
                		 * This inserts garbage into a local list. Once the list (plist) reaches
      			       	 * a length of 100, ck_hp_free will attempt to reclaim all references
                		 * to objects on the list.
        		       	 */
                		ck_hp_free(record, &fifo_entry->hazard, fifo_entry, fifo_entry);
        		}
		}
	}

	/* end barrier */
	ck_pr_inc_uint(&end_barrier);
	while (ck_pr_load_uint(&end_barrier) < thread_count + 1)
		ck_pr_stall();

       	return NULL;
}