Exemple #1
0
void
ck_barrier_mcs(struct ck_barrier_mcs *barrier,
               struct ck_barrier_mcs_state *state)
{

	/*
	 * Wait until all children have reached the barrier and are done waiting
	 * for their children.
	 */
	while (ck_barrier_mcs_check_children(barrier[state->vpid].childnotready) == false)
		ck_pr_stall();

	/* Reinitialize for next barrier. */
	ck_barrier_mcs_reinitialize_children(&barrier[state->vpid]);

	/* Inform parent thread and its children have arrived at the barrier. */
	ck_pr_store_uint(barrier[state->vpid].parent, 0);

	/* Wait until parent indicates all threads have arrived at the barrier. */
	if (state->vpid != 0) {
		while (ck_pr_load_uint(&barrier[state->vpid].parentsense) != state->sense)
			ck_pr_stall();
	}

	/* Inform children of successful barrier. */
	ck_pr_store_uint(barrier[state->vpid].children[0], state->sense);
	ck_pr_store_uint(barrier[state->vpid].children[1], state->sense);
	state->sense = ~state->sense;
	return;
}
Exemple #2
0
static void *
thread(CK_CC_UNUSED void *unused)
{
	ck_barrier_tournament_state_t state;
	int j, counter;
	int i = 0;

	aff_iterate(&a);
	ck_barrier_tournament_subscribe(&barrier, &state);

	ck_pr_inc_int(&barrier_wait);
	while (ck_pr_load_int(&barrier_wait) != nthr)
		ck_pr_stall();

	for (j = 0; j < ITERATE; j++) {
		i = j++ & (ENTRIES - 1);
		ck_pr_inc_int(&counters[i]);
		ck_barrier_tournament(&barrier, &state);
		counter = ck_pr_load_int(&counters[i]);
		if (counter != nthr * (j / ENTRIES + 1)) {
			fprintf(stderr, "FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr);
			exit(EXIT_FAILURE);
		}
	}

	ck_pr_inc_int(&barrier_wait);
	while (ck_pr_load_int(&barrier_wait) != nthr * 2)
		ck_pr_stall();

	return (NULL);
}
Exemple #3
0
static void *
test(void *c)
{
#ifdef CK_F_FIFO_MPMC
	struct context *context = c;
	struct entry *entry;
	ck_fifo_mpmc_entry_t *fifo_entry, *garbage;
	int i, j;

        if (aff_iterate(&a)) {
                perror("ERROR: Could not affine thread");
                exit(EXIT_FAILURE);
        }

	ck_pr_inc_uint(&barrier);
	while (ck_pr_load_uint(&barrier) < (unsigned int)nthr);

	for (i = 0; i < ITERATIONS; i++) {
		for (j = 0; j < size; j++) {
			fifo_entry = malloc(sizeof(ck_fifo_mpmc_entry_t));
			entry = malloc(sizeof(struct entry));
			entry->tid = context->tid;
			ck_fifo_mpmc_enqueue(&fifo, fifo_entry, entry);
			if (ck_fifo_mpmc_dequeue(&fifo, &entry, &garbage) == false) {
				fprintf(stderr, "ERROR [%u] Queue should never be empty.\n", context->tid);
				exit(EXIT_FAILURE);
			}

			if (entry->tid < 0 || entry->tid >= nthr) {
				fprintf(stderr, "ERROR [%u] Incorrect value in entry.\n", entry->tid);
				exit(EXIT_FAILURE);
			}
		}
	}

	for (i = 0; i < ITERATIONS; i++) {
		for (j = 0; j < size; j++) {
			fifo_entry = malloc(sizeof(ck_fifo_mpmc_entry_t));
			entry = malloc(sizeof(struct entry));
			entry->tid = context->tid;
			while (ck_fifo_mpmc_tryenqueue(&fifo, fifo_entry, entry) == false)
				ck_pr_stall();

			while (ck_fifo_mpmc_trydequeue(&fifo, &entry, &garbage) == false)
				ck_pr_stall();

			if (entry->tid < 0 || entry->tid >= nthr) {
				fprintf(stderr, "ERROR [%u] Incorrect value in entry when using try interface.\n", entry->tid);
				exit(EXIT_FAILURE);
			}
		}
	}
#endif

	return (NULL);
}
Exemple #4
0
static void *
thread(void *b)
{
    ck_barrier_mcs_t *barrier = b;
    ck_barrier_mcs_state_t state;
    int j, counter;
    int i = 0;

    aff_iterate(&a);

    ck_barrier_mcs_subscribe(barrier, &state);

    ck_pr_inc_int(&barrier_wait);
    while (ck_pr_load_int(&barrier_wait) != nthr)
        ck_pr_stall();

    for (j = 0; j < ITERATE; j++) {
        i = j++ & (ENTRIES - 1);
        ck_pr_inc_int(&counters[i]);
        ck_barrier_mcs(barrier, &state);
        counter = ck_pr_load_int(&counters[i]);
        if (counter != nthr * (j / ENTRIES + 1)) {
            ck_error("FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr);
        }
    }

    return (NULL);
}
void
ck_barrier_dissemination(struct ck_barrier_dissemination *barrier,
			 struct ck_barrier_dissemination_state *state)
{
	unsigned int i;
	unsigned int size = barrier->size;

	for (i = 0; i < size; ++i) {
		/* Unblock current partner. */
		ck_pr_store_uint(barrier[state->tid].flags[state->parity][i].pflag, state->sense);

		/* Wait until some other thread unblocks this one. */
		while (ck_pr_load_uint(&barrier[state->tid].flags[state->parity][i].tflag) != state->sense)
			ck_pr_stall();
	}

	/*
	 * Dissemination barriers use two sets of flags to prevent race conditions
	 * between successive calls to the barrier. Parity indicates which set will
	 * be used for the next barrier. They also use a sense reversal technique 
	 * to avoid re-initialization of the flags for every two calls to the barrier.
	 */
	if (state->parity == 1)
		state->sense = ~state->sense;

	state->parity = 1 - state->parity;
	return;
}
Exemple #6
0
static void *
thread(void *group)
{
	ck_barrier_combining_state_t state = CK_BARRIER_COMBINING_STATE_INITIALIZER;
	int j, counter;
	int i = 0;

	aff_iterate(&a);

	ck_pr_inc_int(&barrier_wait);
	while (ck_pr_load_int(&barrier_wait) != (nthr * ngroups))
		ck_pr_stall();

	for (j = 0; j < ITERATE; j++) {
		i = j++ & (ENTRIES - 1);
		ck_pr_inc_int(&counters[i]);
		ck_barrier_combining(&barrier, group, &state);
		counter = ck_pr_load_int(&counters[i]);
		if (counter != nthr * ngroups * (j / ENTRIES + 1)) {
			fprintf(stderr, "FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr * ngroups);
			exit(EXIT_FAILURE);
		}
	}

	return (NULL);
}
Exemple #7
0
void
ck_barrier_centralized(struct ck_barrier_centralized *barrier,
    struct ck_barrier_centralized_state *state,
    unsigned int n_threads)
{
	unsigned int sense, value;

	/*
	 * Every execution context has a sense associated with it.
	 * This sense is reversed when the barrier is entered. Every
	 * thread will spin on the global sense until the last thread
	 * reverses it.
	 */
	sense = state->sense = ~state->sense;
	value = ck_pr_faa_uint(&barrier->value, 1);
	if (value == n_threads - 1) {
		ck_pr_store_uint(&barrier->value, 0);
		ck_pr_fence_memory();
		ck_pr_store_uint(&barrier->sense, sense);
		return;
	}

	ck_pr_fence_load();
	while (sense != ck_pr_load_uint(&barrier->sense))
		ck_pr_stall();

	ck_pr_fence_memory();
	return;
}
Exemple #8
0
static void *
writer_thread(void *unused)
{
	unsigned int i;
	unsigned int iteration = 0;

	(void)unused;

	for (;;) {
		iteration++;
		ck_epoch_write_begin(&epoch_wr);
		for (i = 1; i <= writer_max; i++) {
			if (ck_bag_put_spmc(&bag, (void *)(uintptr_t)i) == false) {
				perror("ck_bag_put_spmc");
				exit(EXIT_FAILURE);
			}

			if (ck_bag_member_spmc(&bag, (void *)(uintptr_t)i) == false) {
				fprintf(stderr, "ck_bag_put_spmc [%u]: %u\n", iteration, i);
				exit(EXIT_FAILURE);
			}
		}

		if (ck_pr_load_int(&leave) == 1)
			break;

		for (i = 1; i < writer_max; i++) {
			void *replace = (void *)(uintptr_t)i;
			if (ck_bag_set_spmc(&bag, (void *)(uintptr_t)i, replace) == false) {
				fprintf(stderr, "ERROR: set %ju != %ju",
						(uintmax_t)(uintptr_t)replace, (uintmax_t)i);
				exit(EXIT_FAILURE);
			}
		}

		for (i = writer_max; i > 0; i--) {
			if (ck_bag_member_spmc(&bag, (void *)(uintptr_t)i) == false) {
				fprintf(stderr, "ck_bag_member_spmc [%u]: %u\n", iteration, i);
				exit(EXIT_FAILURE);
			}

			if (ck_bag_remove_spmc(&bag, (void *)(uintptr_t)i) == false) {
				fprintf(stderr, "ck_bag_remove_spmc [%u]: %u\n", iteration, i);
				exit(EXIT_FAILURE);
			}
		}

		ck_epoch_write_end(&epoch_wr);
	}

	fprintf(stderr, "Writer %u iterations, %u writes per iteration.\n", iteration, writer_max);
	while (ck_pr_load_uint(&barrier) != NUM_READER_THREADS)
		ck_pr_stall();

	ck_pr_inc_uint(&barrier);
	return NULL;
}
Exemple #9
0
static CK_CC_INLINE void
rwlock_write_lock(rwlock_t *rw)
{

        ck_spinlock_fas_lock(&rw->writer);
        while (ck_pr_load_uint(&rw->readers) != 0)
                ck_pr_stall();

        return;
}
Exemple #10
0
void
fq_client_publish(fq_client conn, fq_msg *msg) {
  fq_conn_s *conn_s = conn;
  ck_fifo_mpmc_entry_t *fifo_entry;
  while(conn_s->qlen > conn_s->qmaxlen) {
    if(conn_s->q_stall_time > 0) usleep(conn_s->q_stall_time);
    else ck_pr_stall();
  }
  fifo_entry = malloc(sizeof(ck_fifo_mpmc_entry_t));
  fq_msg_ref(msg);
  ck_fifo_mpmc_enqueue(&conn_s->q, fifo_entry, msg);
  ck_pr_inc_uint(&conn_s->qlen);
}
Exemple #11
0
static CK_CC_INLINE void
rwlock_read_lock(rwlock_t *rw)
{

        for (;;) {
                while (ck_pr_load_uint(&rw->writer.value) != 0)
                        ck_pr_stall();

                ck_pr_inc_uint(&rw->readers);
                if (ck_pr_load_uint(&rw->writer.value) == 0)
                        break;
                ck_pr_dec_uint(&rw->readers);
        }

        return;
}
Exemple #12
0
static void
ck_barrier_combining_aux(struct ck_barrier_combining *barrier,
    struct ck_barrier_combining_group *tnode,
    unsigned int sense)
{

	/*
	 * If this is the last thread in the group, it moves on to the parent group.
	 * Otherwise, it spins on this group's sense.
	 */
	if (ck_pr_faa_uint(&tnode->count, 1) == tnode->k - 1) {
		/*
		 * If we are and will be the last thread entering the barrier for the
		 * current group then signal the parent group if one exists.
		 */
		if (tnode->parent != NULL)
			ck_barrier_combining_aux(barrier, tnode->parent, sense);

		/*
		 * Once the thread returns from its parent(s), it reinitializes the group's
		 * arrival count and signals other threads to continue by flipping the group
		 * sense. Order of these operations is not important since we assume a static
		 * number of threads are members of a barrier for the lifetime of the barrier.
		 * Since count is explicitly reinitialized, it is guaranteed that at any point
		 * tnode->count is equivalent to tnode->k if and only if that many threads
		 * are at the barrier.
		 */
		ck_pr_store_uint(&tnode->count, 0);
		ck_pr_fence_store();
		ck_pr_store_uint(&tnode->sense, ~tnode->sense);
	} else {
		ck_pr_fence_memory();
		while (sense != ck_pr_load_uint(&tnode->sense))
			ck_pr_stall();
	}

	return;
}
/* function for thread */
static void *
queue_50_50(void *elements)
{
        struct entry *entry;
        ck_hp_fifo_entry_t *fifo_entry;
	ck_hp_record_t *record;
	void *slots;
        unsigned long j, element_count = *(unsigned long *)elements;
	unsigned int seed;

	record = malloc(sizeof(ck_hp_record_t));
	assert(record);
	
	slots = malloc(CK_HP_FIFO_SLOTS_SIZE);
	assert(slots);
	
        /* different seed for each thread */
	seed = 1337; /*(unsigned int) pthread_self(); */

        /*
         * This subscribes the thread to the fifo_hp state using the thread-owned
         * record.
         * FIFO queue needs 2 hazard pointers.
         */
        ck_hp_register(&fifo_hp, record, slots);

	/* start barrier */
	ck_pr_inc_uint(&start_barrier);
	while (ck_pr_load_uint(&start_barrier) < thread_count + 1)
		ck_pr_stall();

	/* 50/50 enqueue-dequeue */
	for(j = 0; j < element_count; j++) {
		/* rand_r with thread local state should be thread safe */
		if( 50 < (1+(int) (100.0*common_rand_r(&seed)/(RAND_MAX+1.0)))) {
			/* This is the container for the enqueued data. */
        		fifo_entry = malloc(sizeof(ck_hp_fifo_entry_t));

        		if (fifo_entry == NULL) {
        	        	exit(EXIT_FAILURE);
			}

        		/* This is the data. */
        		entry = malloc(sizeof(struct entry));
        		if (entry != NULL) {
        	        	entry->value = j;
			}

        	       /*
        	 	* Enqueue the value of the pointer entry into FIFO queue using the
        	 	* container fifo_entry.
        	 	*/
        		ck_hp_fifo_enqueue_mpmc(record, &fifo, fifo_entry, entry);
		} else {
			/*
        		 * ck_hp_fifo_dequeue_mpmc will return a pointer to the first unused node and store
        		 * the value of the first pointer in the FIFO queue in entry.
        		 */
  		      	fifo_entry = ck_hp_fifo_dequeue_mpmc(record, &fifo, &entry);
        		if (fifo_entry != NULL) {
               		 	/*
               		 	 * Safely reclaim memory associated with fifo_entry.
                		 * This inserts garbage into a local list. Once the list (plist) reaches
      			       	 * a length of 100, ck_hp_free will attempt to reclaim all references
                		 * to objects on the list.
        		       	 */
                		ck_hp_free(record, &fifo_entry->hazard, fifo_entry, fifo_entry);
        		}
		}
	}

	/* end barrier */
	ck_pr_inc_uint(&end_barrier);
	while (ck_pr_load_uint(&end_barrier) < thread_count + 1)
		ck_pr_stall();

       	return NULL;
}
Exemple #14
0
static void *
thread_brlock(void *pun)
{
	uint64_t s_b, e_b, a, i;
	ck_brlock_reader_t r;
	uint64_t *value = pun;

	if (aff_iterate(&affinity) != 0) {
		perror("ERROR: Could not affine thread");
		exit(EXIT_FAILURE);
	}

	ck_brlock_read_register(&brlock, &r);
	ck_pr_inc_int(&barrier);
	while (ck_pr_load_int(&barrier) != threads)
		ck_pr_stall();

	for (i = 1, a = 0;; i++) {
		s_b = rdtsc();
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		e_b = rdtsc();

		a += (e_b - s_b) >> 4;

		if (ck_pr_load_uint(&flag) == 1)
			break;
	}

	ck_pr_inc_int(&barrier);
	while (ck_pr_load_int(&barrier) != threads * 2)
		ck_pr_stall();

	*value = (a / i);
	return NULL;
}
Exemple #15
0
void
ck_barrier_tournament(struct ck_barrier_tournament *barrier,
                      struct ck_barrier_tournament_state *state)
{
	struct ck_barrier_tournament_round **rounds = ck_pr_load_ptr(&barrier->rounds);
	int round = 1;

	for (;; ++round) {
		switch (rounds[state->vpid][round].role) { // MIGHT NEED TO USE CK_PR_LOAD***
		case CK_BARRIER_TOURNAMENT_BYE:
			break;
		case CK_BARRIER_TOURNAMENT_CHAMPION:
			/*
			 * The CK_BARRIER_TOURNAMENT_CHAMPION waits until it wins the tournament; it then
			 * sets the final flag before the wakeup phase of the barrier.
			 */
			while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
				ck_pr_stall();

			ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
			goto wakeup;
		case CK_BARRIER_TOURNAMENT_DROPOUT:
			/* NOTREACHED */
			break;
		case CK_BARRIER_TOURNAMENT_LOSER:
			/*
			 * CK_BARRIER_TOURNAMENT_LOSERs set the flags of their opponents and wait until
			 * their opponents release them after the tournament is over.
			 */
			ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
			while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
				ck_pr_stall();

			goto wakeup;
		case CK_BARRIER_TOURNAMENT_WINNER:
			/*
			 * CK_BARRIER_TOURNAMENT_WINNERs wait until their current opponent sets their flag; they then
			 * continue to the next round of the tournament.
			 */
			while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
				ck_pr_stall();
			break;
		}
	}

wakeup:
	for (round -= 1 ;; --round) {
		switch (rounds[state->vpid][round].role) { // MIGHT NEED TO USE CK_PR_LOAD***
		case CK_BARRIER_TOURNAMENT_BYE:
			break;
		case CK_BARRIER_TOURNAMENT_CHAMPION:
			/* NOTREACHED */
			break;
		case CK_BARRIER_TOURNAMENT_DROPOUT:
			goto leave;
			break;
		case CK_BARRIER_TOURNAMENT_LOSER:
			/* NOTREACHED */
			break;
		case CK_BARRIER_TOURNAMENT_WINNER:
			/* 
			 * Winners inform their old opponents the tournament is over
			 * by setting their flags.
			 */
			ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
			break;
		}
	}

leave:
	state->sense = ~state->sense;
	return;
}
Exemple #16
0
static void *
thread_lock_rtm(void *pun)
{
	uint64_t s_b, e_b, a, i;
	uint64_t *value = pun;

	if (aff_iterate(&affinity) != 0) {
		perror("ERROR: Could not affine thread");
		exit(EXIT_FAILURE);
	}

	ck_pr_inc_int(&barrier);
	while (ck_pr_load_int(&barrier) != threads)
		ck_pr_stall();

	for (i = 1, a = 0;; i++) {
		s_b = rdtsc();
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		e_b = rdtsc();

		a += (e_b - s_b) >> 4;

		if (ck_pr_load_uint(&flag) == 1)
			break;
	}

	ck_pr_inc_int(&barrier);
	while (ck_pr_load_int(&barrier) != threads * 2)
		ck_pr_stall();

	*value = (a / i);
	return NULL;
}
Exemple #17
0
int
main(int argc, char *argv[])
{
	int i, r;
	unsigned long l;
	pthread_t *thread;
	ck_ring_buffer_t *buffer;

	if (argc != 4) {
		ck_error("Usage: validate <threads> <affinity delta> <size>\n");
	}

	a.request = 0;
	a.delta = atoi(argv[2]);

	nthr = atoi(argv[1]);
	assert(nthr >= 1);

	size = atoi(argv[3]);
	assert(size >= 4 && (size & size - 1) == 0);
	size -= 1;

	ring = malloc(sizeof(ck_ring_t) * nthr);
	assert(ring);

	_context = malloc(sizeof(*_context) * nthr);
	assert(_context);

	thread = malloc(sizeof(pthread_t) * nthr);
	assert(thread);

	fprintf(stderr, "SPSC test:");
	for (i = 0; i < nthr; i++) {
		_context[i].tid = i;
		if (i == 0) {
			_context[i].previous = nthr - 1;
			_context[i].next = i + 1;
		} else if (i == nthr - 1) {
			_context[i].next = 0;
			_context[i].previous = i - 1;
		} else {
			_context[i].next = i + 1;
			_context[i].previous = i - 1;
		}

		buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
		assert(buffer);
		memset(buffer, 0, sizeof(ck_ring_buffer_t) * (size + 1));
		_context[i].buffer = buffer;
		ck_ring_init(ring + i, size + 1);
		r = pthread_create(thread + i, NULL, test, _context + i);
		assert(r == 0);
	}

	for (i = 0; i < nthr; i++)
		pthread_join(thread[i], NULL);

	fprintf(stderr, " done\n");

	fprintf(stderr, "SPMC test:\n");
	buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
	assert(buffer);
	memset(buffer, 0, sizeof(void *) * (size + 1));
	ck_ring_init(&ring_spmc, size + 1);
	for (i = 0; i < nthr - 1; i++) {
		_context[i].buffer = buffer;
		r = pthread_create(thread + i, NULL, test_spmc, _context + i);
		assert(r == 0);
	}

	for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
		struct entry *entry = malloc(sizeof *entry);

		assert(entry != NULL);
		entry->value_long = l;
		entry->value = (int)l;
		entry->tid = (int)l;
		entry->magic = 0xdead;
		entry->ref = 0;

		/* Wait until queue is not full. */
		if (l & 1) {
			while (ck_ring_enqueue_spmc(&ring_spmc,
			    buffer,
			    entry) == false)
				ck_pr_stall();
		} else {
			unsigned int s;

			while (ck_ring_enqueue_spmc_size(&ring_spmc,
			    buffer, entry, &s) == false) {
				ck_pr_stall();
			}

			if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
				ck_error("MPMC: Unexpected size of %u\n", s);
			}
		}
	}

	for (i = 0; i < nthr - 1; i++)
		pthread_join(thread[i], NULL);

	return (0);
}
Exemple #18
0
static void *
test_spmc(void *c)
{
	unsigned int observed = 0;
	unsigned long previous = 0;
	unsigned int seed;
	int i, k, j, tid;
	struct context *context = c;
	ck_ring_buffer_t *buffer;

	buffer = context->buffer;
        if (aff_iterate(&a)) {
                perror("ERROR: Could not affine thread");
                exit(EXIT_FAILURE);
        }

	tid = ck_pr_faa_int(&eb, 1);
	ck_pr_fence_memory();
	while (ck_pr_load_int(&eb) != nthr - 1);

	for (i = 0; i < ITERATIONS; i++) {
		for (j = 0; j < size; j++) {
			struct entry *o;
			int spin;

			/* Keep trying until we encounter at least one node. */
			if (j & 1) {
				while (ck_ring_dequeue_spmc(&ring_spmc, buffer,
				    &o) == false);
			} else {
				while (ck_ring_trydequeue_spmc(&ring_spmc, buffer,
				    &o) == false);
			}

			observed++;
			if (o->value < 0
			    || o->value != o->tid
			    || o->magic != 0xdead
			    || (previous != 0 && previous >= o->value_long)) {
				ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
					(void *)o, o->magic, o->tid, o->value, size);
			}

			o->magic = 0xbeef;
			o->value = -31337;
			o->tid = -31338;
			previous = o->value_long;

			if (ck_pr_faa_uint(&o->ref, 1) != 0) {
				ck_error("[%p] We dequeued twice.\n", (void *)o);
			}

			if ((i % 4) == 0) {
				spin = common_rand_r(&seed) % 16384;
				for (k = 0; k < spin; k++) {
					ck_pr_stall();
				}
			}

			free(o);
		}
	}

	fprintf(stderr, "[%d] Observed %u\n", tid, observed);
	return NULL;
}
Exemple #19
0
static void *
reader(void *arg)
{
	void *curr_ptr;
	intptr_t curr, prev, curr_max, prev_max;
	unsigned long long n_entries = 0, iterations = 0;
	ck_epoch_record_t epoch_record;
	ck_bag_iterator_t iterator;
	struct ck_bag_block *block = NULL;

	(void)arg;

	ck_epoch_register(&epoch_bag, &epoch_record);

	/*
	 * Check if entries within a block are sequential. Since ck_bag inserts
	 * newly occupied blocks at the beginning of the list, there is no ordering
	 * guarantee across the bag.
	 */
	for (;;) {
		ck_epoch_read_begin(&epoch_record);
		ck_bag_iterator_init(&iterator, &bag);
		curr_max = prev_max = prev = -1;
		block = NULL;

		while (ck_bag_next(&iterator, &curr_ptr) == true) {
			if (block != iterator.block) {
				prev = -1;
				curr = 0;
				prev_max = curr_max;
				curr_max = 0;
				block = iterator.block;
			}

			curr = (uintptr_t)(curr_ptr);
			if (curr < prev) {
				/* Ascending order within block violated */
				fprintf(stderr, "%p: %p: %ju\n", (void *)&epoch_record, (void *)iterator.block, (uintmax_t)curr);
				fprintf(stderr, "ERROR: %ju < %ju \n",
				    (uintmax_t)curr, (uintmax_t)prev);
				exit(EXIT_FAILURE);
			} else if (prev_max != -1 && curr > prev_max) {
				/* Max of prev block > max of current block */
				fprintf(stderr, "%p: %p: %ju\n", (void *)&epoch_record, (void *)iterator.block, (uintmax_t)curr);
				fprintf(stderr, "ERROR: %ju > prev_max: %ju\n",
				    (uintmax_t)curr, (uintmax_t)prev_max);
				exit(EXIT_FAILURE);
			}

			curr_max = curr;

			prev = curr;
			n_entries++;
		}

		ck_epoch_read_end(&epoch_record);

		iterations++;
		if (ck_pr_load_int(&leave) == 1)
			break;
	}

	fprintf(stderr, "Read %llu entries in %llu iterations.\n", n_entries, iterations);

	ck_pr_inc_uint(&barrier);
	while (ck_pr_load_uint(&barrier) != NUM_READER_THREADS + 1)
		ck_pr_stall();

	return NULL;

}
Exemple #20
0
static void *
test(void *c)
{
	struct context *context = c;
	struct entry *entry;
	ck_hp_fifo_entry_t *fifo_entry;
	ck_hp_record_t record;
	int i, j;

        if (aff_iterate(&a)) {
                perror("ERROR: Could not affine thread");
                exit(EXIT_FAILURE);
        }

	ck_hp_register(&fifo_hp, &record, malloc(sizeof(void *) * 2));
	ck_pr_inc_uint(&barrier);
	while (ck_pr_load_uint(&barrier) < (unsigned int)nthr);

	for (i = 0; i < ITERATIONS; i++) {
		for (j = 0; j < size; j++) {
			fifo_entry = malloc(sizeof(ck_hp_fifo_entry_t));
			entry = malloc(sizeof(struct entry));
			entry->tid = context->tid;
			ck_hp_fifo_enqueue_mpmc(&record, &fifo, fifo_entry, entry);

			fifo_entry = ck_hp_fifo_dequeue_mpmc(&record, &fifo, &entry);
			if (fifo_entry == NULL) {
				fprintf(stderr, "ERROR [%u] Queue should never be empty.\n", context->tid);
				exit(EXIT_FAILURE);
			}

			if (entry->tid < 0 || entry->tid >= nthr) {
				fprintf(stderr, "ERROR [%u] Incorrect value in entry.\n", entry->tid);
				exit(EXIT_FAILURE);
			}

			ck_hp_free(&record, &fifo_entry->hazard, fifo_entry, fifo_entry);
		}
	}

	for (i = 0; i < ITERATIONS; i++) {
		for (j = 0; j < size; j++) {
			fifo_entry = malloc(sizeof(ck_hp_fifo_entry_t));
			entry = malloc(sizeof(struct entry));
			entry->tid = context->tid;

			while (ck_hp_fifo_tryenqueue_mpmc(&record, &fifo, fifo_entry, entry) == false)
				ck_pr_stall();

			while (fifo_entry = ck_hp_fifo_trydequeue_mpmc(&record, &fifo, &entry), fifo_entry == NULL)
				ck_pr_stall();

			if (entry->tid < 0 || entry->tid >= nthr) {
				fprintf(stderr, "ERROR [%u] Incorrect value in entry.\n", entry->tid);
				exit(EXIT_FAILURE);
			}

			ck_hp_free(&record, &fifo_entry->hazard, fifo_entry, fifo_entry);
		}
	}

	ck_pr_inc_uint(&e_barrier);
	while (ck_pr_load_uint(&e_barrier) < (unsigned int)nthr);

	return (NULL);
}