Esempio n. 1
0
bool
ck_array_put(struct ck_array *array, void *value)
{
	struct _ck_array *target;
	unsigned int size;

	/*
	 * If no transaction copy has been necessary, attempt to do in-place
	 * modification of the array.
	 */
	if (array->transaction == NULL) {
		target = array->active;

		if (array->n_entries == target->length) {
			size = target->length << 1;

			target = array->allocator->realloc(target,
			    sizeof(struct _ck_array) + sizeof(void *) * array->n_entries,
			    sizeof(struct _ck_array) + sizeof(void *) * size,
			    true);

			if (target == NULL)
				return false;

			ck_pr_store_uint(&target->length, size);

			/* Serialize with respect to contents. */
			ck_pr_fence_store();
			ck_pr_store_ptr(&array->active, target);
		}

		target->values[array->n_entries++] = value;
		return true;
	}

	target = array->transaction;
	if (array->n_entries == target->length) {
		size = target->length << 1;

		target = array->allocator->realloc(target,
		    sizeof(struct _ck_array) + sizeof(void *) * array->n_entries,
		    sizeof(struct _ck_array) + sizeof(void *) * size,
		    true);

		if (target == NULL)
			return false;

		target->length = size;
		array->transaction = target;
	}

	target->values[array->n_entries++] = value;
	return false;
}
Esempio n. 2
0
bool
ck_array_remove(struct ck_array *array, void *value)
{
	struct _ck_array *target;
	unsigned int i;

	if (array->transaction != NULL) {
		target = array->transaction;

		for (i = 0; i < array->n_entries; i++) {
			if (target->values[i] == value) {
				target->values[i] = target->values[--array->n_entries];
				return true;
			}
		}

		return false;
	}

	target = array->active;

	for (i = 0; i < array->n_entries; i++) {
		if (target->values[i] == value)
			break;
	}

	if (i == array->n_entries)
		return false;

	/* If there are pending additions, immediately eliminate the operation. */
	if (target->n_committed != array->n_entries) {
		ck_pr_store_ptr(&target->values[i], target->values[--array->n_entries]);
		return true;
	}

	/*
	 * The assumption is that these allocations are small to begin with.
	 * If there is no immediate opportunity for transaction, allocate a
	 * transactional array which will be applied upon commit time.
	 */
	target = ck_array_create(array->allocator, array->n_entries);
	if (target == NULL)
		return false;

	memcpy(target->values, array->active->values, sizeof(void *) * array->n_entries);
	target->length = array->n_entries;
	target->n_committed = array->n_entries;
	target->values[i] = target->values[--array->n_entries];

	array->transaction = target;
	return true;
}
Esempio n. 3
0
bool
ck_hs_reset_size(struct ck_hs *hs, unsigned long capacity)
{
	struct ck_hs_map *map, *previous;

	previous = hs->map;
	map = ck_hs_map_create(hs, capacity);
	if (map == NULL)
		return false;

	ck_pr_store_ptr(&hs->map, map);
	ck_hs_map_destroy(hs->m, previous, true);
	return true;
}
Esempio n. 4
0
void
ck_barrier_tournament_init(struct ck_barrier_tournament *barrier,
			   struct ck_barrier_tournament_round **rounds,
			   unsigned int nthr)
{
	unsigned int i, k, size, twok, twokm1, imod2k;

	ck_pr_store_uint(&barrier->tid, 0);
	size = ck_barrier_tournament_size(nthr);

	for (i = 0; i < nthr; ++i) {
		/* The first role is always CK_BARRIER_TOURNAMENT_DROPOUT. */
		rounds[i][0].flag = 0;
		rounds[i][0].role = CK_BARRIER_TOURNAMENT_DROPOUT;
		for (k = 1, twok = 2, twokm1 = 1; k < size; ++k, twokm1 = twok, twok <<= 1) {
			rounds[i][k].flag = 0;

			imod2k = i & (twok - 1);
			if (imod2k == 0) {
				if ((i + twokm1 < nthr) && (twok < nthr))
					rounds[i][k].role = CK_BARRIER_TOURNAMENT_WINNER;
				else if (i + twokm1 >= nthr)
					rounds[i][k].role = CK_BARRIER_TOURNAMENT_BYE;
			}

			if (imod2k == twokm1)
				rounds[i][k].role = CK_BARRIER_TOURNAMENT_LOSER;
			else if ((i == 0) && (twok >= nthr))
				rounds[i][k].role = CK_BARRIER_TOURNAMENT_CHAMPION;

			if (rounds[i][k].role == CK_BARRIER_TOURNAMENT_LOSER)
				rounds[i][k].opponent = &rounds[i - twokm1][k].flag;
			else if (rounds[i][k].role == CK_BARRIER_TOURNAMENT_WINNER ||
				 rounds[i][k].role == CK_BARRIER_TOURNAMENT_CHAMPION)
				rounds[i][k].opponent = &rounds[i + twokm1][k].flag;  
		}
	}

	ck_pr_store_ptr(&barrier->rounds, rounds);
	return;
}
Esempio n. 5
0
static void *ph_thread_boot(void *arg)
{
  struct ph_thread_boot_data data;
  ph_thread_t *me;
  void *retval;

  /* copy in the boot data from the stack of our creator */
  memcpy(&data, arg, sizeof(data));

  me = ph_thread_init_myself(true);

  /* this publishes that we're ready to run to
   * the thread that spawned us */
  ck_pr_store_ptr(data.thr, ck_pr_load_ptr(&me));
  ck_pr_fence_store();

  retval = data.func(data.arg);
  ck_epoch_barrier(&me->epoch_record);

  return retval;
}
Esempio n. 6
0
bool
ck_array_commit(ck_array_t *array)
{
	struct _ck_array *m = array->transaction;

	if (m != NULL) {
		struct _ck_array *p;

		m->n_committed = array->n_entries;
		ck_pr_fence_store();
		p = array->active;
		ck_pr_store_ptr(&array->active, m);
		array->allocator->free(p, sizeof(struct _ck_array) +
		    p->length * sizeof(void *), true);
		array->transaction = NULL;

		return true;
	}

	ck_pr_fence_store();
	ck_pr_store_uint(&array->active->n_committed, array->n_entries);
	return true;
}
Esempio n. 7
0
/*
 * Replace prev_entry with new entry if exists, otherwise insert into bag.
 */
bool
ck_bag_set_spmc(struct ck_bag *bag, void *compare, void *update)
{
	struct ck_bag_block *cursor;
	uint16_t block_index;
	uint16_t n_entries_block = 0;

	cursor = bag->head;
	while (cursor != NULL) {
		n_entries_block = ck_bag_block_count(cursor);
		for (block_index = 0; block_index < n_entries_block; block_index++) {
			if (cursor->array[block_index] != compare)
				continue;

			ck_pr_store_ptr(&cursor->array[block_index], update);
			return true;
		}

		cursor = ck_bag_block_next(cursor->next.ptr);
	}

	return ck_bag_put_spmc(bag, update);
}
Esempio n. 8
0
bool
ck_hs_grow(struct ck_hs *hs,
    unsigned long capacity)
{
	struct ck_hs_map *map, *update;
	void **bucket, *previous;
	unsigned long k, i, j, offset, probes;

restart:
	map = hs->map;

	if (map->capacity > capacity)
		return false;

	update = ck_hs_map_create(hs, capacity);
	if (update == NULL)
		return false;

	for (k = 0; k < map->capacity; k++) {
		unsigned long h;

		previous = map->entries[k];
		if (previous == CK_HS_EMPTY || previous == CK_HS_TOMBSTONE)
			continue;

#ifdef CK_HS_PP
		if (hs->mode & CK_HS_MODE_OBJECT)
			previous = (void *)((uintptr_t)previous & (((uintptr_t)1 << CK_MD_VMA_BITS) - 1));
#endif

		h = hs->hf(previous, hs->seed);
		offset = h & update->mask;
		i = probes = 0;

		for (;;) {
			bucket = (void *)((uintptr_t)&update->entries[offset] & ~(CK_MD_CACHELINE - 1));

			for (j = 0; j < CK_HS_PROBE_L1; j++) {
				void **cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));

				if (probes++ == update->probe_limit)
					break;

				if (CK_CC_LIKELY(*cursor == CK_HS_EMPTY)) {
					*cursor = map->entries[k];
					update->n_entries++;

					if (probes > update->probe_maximum)
						update->probe_maximum = probes;

					break;
				}
			}

			if (j < CK_HS_PROBE_L1)
				break;

			offset = ck_hs_map_probe_next(update, offset, h, i++, probes);
		}

		if (probes > update->probe_limit) {
			/*
			 * We have hit the probe limit, map needs to be even larger.
			 */
			ck_hs_map_destroy(hs->m, update, false);
			capacity <<= 1;
			goto restart;
		}
	}

	ck_pr_fence_store();
	ck_pr_store_ptr(&hs->map, update);
	ck_hs_map_destroy(hs->m, map, true);
	return true;
}
static inline void
set_nodes(as_cluster* cluster, as_nodes* nodes)
{
	ck_pr_fence_store();
	ck_pr_store_ptr(&cluster->nodes, nodes);
}
Esempio n. 10
0
File: ck_rhs.c Progetto: RaHus/ck
bool
ck_rhs_grow(struct ck_rhs *hs,
    unsigned long capacity)
{
	struct ck_rhs_map *map, *update;
	void *previous, *prev_saved;
	unsigned long k, offset, probes;

restart:
	map = hs->map;
	if (map->capacity > capacity)
		return false;

	update = ck_rhs_map_create(hs, capacity);
	if (update == NULL)
		return false;

	for (k = 0; k < map->capacity; k++) {
		unsigned long h;

		prev_saved = previous = ck_rhs_entry(map, k);
		if (previous == CK_RHS_EMPTY)
			continue;

#ifdef CK_RHS_PP
		if (hs->mode & CK_RHS_MODE_OBJECT)
			previous = CK_RHS_VMA(previous);
#endif

		h = hs->hf(previous, hs->seed);
		offset = h & update->mask;
		probes = 0;

		for (;;) {
			void **cursor = ck_rhs_entry_addr(update, offset);

			if (probes++ == update->probe_limit) {
				/*
				 * We have hit the probe limit, map needs to be even larger.
				 */
				ck_rhs_map_destroy(hs->m, update, false);
				capacity <<= 1;
				goto restart;
			}

			if (CK_CC_LIKELY(*cursor == CK_RHS_EMPTY)) {
				*cursor = prev_saved;
				update->n_entries++;
				ck_rhs_set_probes(update, offset, probes);
				ck_rhs_map_bound_set(update, h, probes);
				break;
			} else if (ck_rhs_probes(update, offset) < probes) {
				void *tmp = prev_saved;
				unsigned int old_probes;
				prev_saved = previous = *cursor;
#ifdef CK_RHS_PP
				if (hs->mode & CK_RHS_MODE_OBJECT)
					previous = CK_RHS_VMA(previous);
#endif
				*cursor = tmp;
				ck_rhs_map_bound_set(update, h, probes);
				h = hs->hf(previous, hs->seed);
				old_probes = ck_rhs_probes(update, offset);
				ck_rhs_set_probes(update, offset, probes);
				probes = old_probes - 1;
				continue;
			}
			ck_rhs_wanted_inc(update, offset);
			offset = ck_rhs_map_probe_next(update, offset,  probes);
		}

	}

	ck_pr_fence_store();
	ck_pr_store_ptr(&hs->map, update);
	ck_rhs_map_destroy(hs->m, map, true);
	return true;
}
Esempio n. 11
0
bool
ck_bag_remove_spmc(struct ck_bag *bag, void *entry)
{
	struct ck_bag_block *cursor, *copy, *prev;
	uint16_t block_index, n_entries;

	cursor = bag->head;
	prev = NULL;
	while (cursor != NULL) {
		n_entries = ck_bag_block_count(cursor);

		for (block_index = 0; block_index < n_entries; block_index++) {
			if (cursor->array[block_index] == entry)
				goto found;

		}

		prev = cursor;
		cursor = ck_bag_block_next(cursor->next.ptr);
	}

	return true;

found:
	/* Cursor points to containing block, block_index is index of deletion */
	if (n_entries == 1) {
		/* If a block's single entry is being removed, remove the block. */
		if (prev == NULL) {
			struct ck_bag_block *new_head = ck_bag_block_next(cursor->next.ptr);
			ck_pr_store_ptr(&bag->head, new_head);
		} else {
			uintptr_t next;
#ifdef __x86_64__
			next = ((uintptr_t)prev->next.ptr & (CK_BAG_BLOCK_ENTRIES_MASK)) |
				(uintptr_t)(void *)ck_bag_block_next(cursor->next.ptr);
#else
			next = (uintptr_t)(void *)cursor->next.ptr;
#endif
			ck_pr_store_ptr(&prev->next.ptr, (struct ck_bag_block *)next);
		}

		/* Remove block from available list */
		if (cursor->avail_prev != NULL)
			cursor->avail_prev->avail_next = cursor->avail_next;

		if (cursor->avail_next != NULL)
			cursor->avail_next->avail_prev = cursor->avail_prev;

		bag->n_blocks--;
		copy = cursor->avail_next;
	} else {
		uintptr_t next_ptr;

		copy = allocator.malloc(bag->info.bytes);
		if (copy == NULL)
			return false;

		memcpy(copy, cursor, bag->info.bytes);
		copy->array[block_index] = copy->array[--n_entries];

		next_ptr = (uintptr_t)(void *)ck_bag_block_next(copy->next.ptr);
#ifdef __x86_64__
		copy->next.ptr = (void *)(((uintptr_t)n_entries << 48) | next_ptr);
#else
		copy->next.n_entries = n_entries;
		copy->next.ptr = (struct ck_bag_block *)next_ptr;
#endif

		ck_pr_fence_store();

		if (prev == NULL) {
			ck_pr_store_ptr(&bag->head, copy);
		} else {
#ifdef __x86_64__
			uintptr_t next = ((uintptr_t)prev->next.ptr & (CK_BAG_BLOCK_ENTRIES_MASK)) |
				(uintptr_t)(void *)ck_bag_block_next(copy);
			ck_pr_store_ptr(&prev->next.ptr, (struct ck_bag_block *)next);
#else
			ck_pr_store_ptr(&prev->next.ptr, copy);
#endif
		}

		if (n_entries == bag->info.max - 1) {
			/* Block was previously fully, add to head of avail. list */
			copy->avail_next = bag->avail_head;
			copy->avail_prev = NULL;
			bag->avail_head = copy;
		}

	}

	/* Update available list. */
	if (bag->avail_head == cursor)
		bag->avail_head = copy;

	if (bag->avail_tail == cursor)
		bag->avail_tail = copy;

	allocator.free(cursor, sizeof(struct ck_bag_block), true);
	ck_pr_store_uint(&bag->n_entries, bag->n_entries - 1);
	return true;
}
Esempio n. 12
0
bool
ck_bag_put_spmc(struct ck_bag *bag, void *entry)
{
	struct ck_bag_block *cursor, *new_block, *new_block_prev, *new_tail;
	uint16_t n_entries_block;
	size_t blocks_alloc, i;
	uintptr_t next = 0;

	new_block = new_block_prev = new_tail = NULL;

	/*
	 * Blocks with available entries are stored in
	 * the bag's available list.
	 */
	cursor = bag->avail_head;
	if (cursor != NULL) {
		n_entries_block = ck_bag_block_count(cursor);
	} else {
		/* The bag is full, allocate a new set of blocks */
		if (bag->alloc_strat == CK_BAG_ALLOCATE_GEOMETRIC)
			blocks_alloc = (bag->n_blocks + 1) << 1;
		else
			blocks_alloc = 1;

		for (i = 0; i < blocks_alloc-1; i++) {
			new_block = allocator.malloc(bag->info.bytes);

			if (new_block == NULL)
				return false;

			/*
			 * First node is the tail of the Bag.
			 * Second node is the new tail of the Available list.
			 */
			if (i == 0)
				new_tail = new_block;

#ifndef __x86_64__
			new_block->next.n_entries = 0;
#endif

			new_block->next.ptr = new_block_prev;
			new_block->avail_next = new_block_prev;
			if (new_block_prev != NULL)
				new_block_prev->avail_prev = new_block;

			new_block_prev = new_block;
		}

		/*
		 * Insert entry into last allocated block.
		 * cursor is new head of available list.
		 */
		cursor = allocator.malloc(bag->info.bytes);
		cursor->avail_next = new_block;
		cursor->avail_prev = NULL;
		new_block->avail_prev = cursor;
		n_entries_block = 0;
		bag->n_blocks += blocks_alloc; /* n_blocks and n_avail_blocks? */
	}

	/* Update the available list */
	if (new_block != NULL) {
		if (bag->avail_tail != NULL) {
			cursor->avail_prev = bag->avail_tail;
			bag->avail_tail->avail_next = cursor;
		} else {
			/* Available list was previously empty */
			bag->avail_head = cursor;
		}

		bag->avail_tail = new_tail;
	} else if (n_entries_block == bag->info.max-1) {
	    /* New entry will fill up block, remove from avail list */
		    if (cursor->avail_prev != NULL)
			    cursor->avail_prev->avail_next = cursor->avail_next;

		    if (cursor->avail_next != NULL)
			    cursor->avail_next->avail_prev = cursor->avail_prev;

		    if (bag->avail_head == cursor)
			    bag->avail_head = cursor->avail_next;

		    if (bag->avail_tail == cursor)
			    bag->avail_tail = cursor->avail_prev;

		    /* For debugging purposes */
		    cursor->avail_next = NULL;
		    cursor->avail_prev = NULL;
	}

	/* Update array and block->n_entries */
	cursor->array[n_entries_block++] = entry;
	ck_pr_fence_store();

#ifdef __x86_64__
	next = ((uintptr_t)n_entries_block << 48);
#endif

	/* Update bag's list */
	if (n_entries_block == 1) {

		if (bag->head != NULL) {
#ifdef __x86_64__
			next += ((uintptr_t)(void *)ck_bag_block_next(bag->head));
#else
			next = (uintptr_t)(void *)ck_bag_block_next(bag->head);
#endif
		}

#ifndef __x86_64__
		ck_pr_store_ptr(&cursor->next.n_entries, (void *)(uintptr_t)n_entries_block);
#endif

		ck_pr_store_ptr(&cursor->next.ptr, (void *)next);
		ck_pr_store_ptr(&bag->head, cursor);
	} else {

#ifdef __x86_64__
		next += ((uintptr_t)(void *)ck_bag_block_next(cursor->next.ptr));
		ck_pr_store_ptr(&cursor->next, (void *)next);
#else
		ck_pr_store_ptr(&cursor->next.n_entries, (void *)(uintptr_t)n_entries_block);
#endif

	}

	ck_pr_store_uint(&bag->n_entries, bag->n_entries + 1);
	return true;
}