Exemplo n.º 1
0
static inline as_seeds*
swap_seeds(as_cluster* cluster, as_seeds* seeds)
{
	ck_pr_fence_store();
	as_seeds* old = ck_pr_fas_ptr(&cluster->seeds, seeds);
	ck_pr_fence_store();
	ck_pr_inc_32(&cluster->version);
	return old;
}
Exemplo n.º 2
0
static inline as_addr_maps*
swap_ip_map(as_cluster* cluster, as_addr_maps* ip_map)
{
	ck_pr_fence_store();
	as_addr_maps* old = ck_pr_fas_ptr(&cluster->ip_map, ip_map);
	ck_pr_fence_store();
	ck_pr_inc_32(&cluster->version);
	return old;
}
Exemplo n.º 3
0
void
as_cluster_set_async_max_conns_per_node(as_cluster* cluster, uint32_t async_size, uint32_t pipe_size)
{
	// Note: This setting only affects pools in new nodes.  Existing node pools are not changed.
	cluster->async_max_conns_per_node = async_size;
	cluster->pipe_max_conns_per_node = pipe_size;
	ck_pr_fence_store();
	ck_pr_inc_32(&cluster->version);
}
Exemplo n.º 4
0
bool
ck_array_put(struct ck_array *array, void *value)
{
	struct _ck_array *target;
	unsigned int size;

	/*
	 * If no transaction copy has been necessary, attempt to do in-place
	 * modification of the array.
	 */
	if (array->transaction == NULL) {
		target = array->active;

		if (array->n_entries == target->length) {
			size = target->length << 1;

			target = array->allocator->realloc(target,
			    sizeof(struct _ck_array) + sizeof(void *) * array->n_entries,
			    sizeof(struct _ck_array) + sizeof(void *) * size,
			    true);

			if (target == NULL)
				return false;

			ck_pr_store_uint(&target->length, size);

			/* Serialize with respect to contents. */
			ck_pr_fence_store();
			ck_pr_store_ptr(&array->active, target);
		}

		target->values[array->n_entries++] = value;
		return true;
	}

	target = array->transaction;
	if (array->n_entries == target->length) {
		size = target->length << 1;

		target = array->allocator->realloc(target,
		    sizeof(struct _ck_array) + sizeof(void *) * array->n_entries,
		    sizeof(struct _ck_array) + sizeof(void *) * size,
		    true);

		if (target == NULL)
			return false;

		target->length = size;
		array->transaction = target;
	}

	target->values[array->n_entries++] = value;
	return false;
}
Exemplo n.º 5
0
int
main(void)
{
	int r = 0;

	/* Below serves as a marker. */
	ck_pr_sub_int(&r, 31337);

	/*
	 * This is a simple test to help ensure all fences compile or crash
	 * on target. Below are generated according to the underlying memory
	 * model's ordering.
	 */
	ck_pr_fence_atomic();
	ck_pr_fence_atomic_store();
	ck_pr_fence_atomic_load();
	ck_pr_fence_store_atomic();
	ck_pr_fence_load_atomic();
	ck_pr_fence_load();
	ck_pr_fence_load_store();
	ck_pr_fence_store();
	ck_pr_fence_store_load();
	ck_pr_fence_memory();
	ck_pr_fence_release();
	ck_pr_fence_acquire();
	ck_pr_fence_acqrel();
	ck_pr_fence_lock();
	ck_pr_fence_unlock();

	/* Below serves as a marker. */
	ck_pr_sub_int(&r, 31337);

	/* The following are generating assuming RMO. */
	ck_pr_fence_strict_atomic();
	ck_pr_fence_strict_atomic_store();
	ck_pr_fence_strict_atomic_load();
	ck_pr_fence_strict_store_atomic();
	ck_pr_fence_strict_load_atomic();
	ck_pr_fence_strict_load();
	ck_pr_fence_strict_load_store();
	ck_pr_fence_strict_store();
	ck_pr_fence_strict_store_load();
	ck_pr_fence_strict_memory();
	ck_pr_fence_strict_release();
	ck_pr_fence_strict_acquire();
	ck_pr_fence_strict_acqrel();
	ck_pr_fence_strict_lock();
	ck_pr_fence_strict_unlock();
	return 0;
}
Exemplo n.º 6
0
bool
ck_array_commit(ck_array_t *array)
{
	struct _ck_array *m = array->transaction;

	if (m != NULL) {
		struct _ck_array *p;

		m->n_committed = array->n_entries;
		ck_pr_fence_store();
		p = array->active;
		ck_pr_store_ptr(&array->active, m);
		array->allocator->free(p, sizeof(struct _ck_array) +
		    p->length * sizeof(void *), true);
		array->transaction = NULL;

		return true;
	}

	ck_pr_fence_store();
	ck_pr_store_uint(&array->active->n_committed, array->n_entries);
	return true;
}
Exemplo n.º 7
0
static void *ph_thread_boot(void *arg)
{
  struct ph_thread_boot_data data;
  ph_thread_t *me;
  void *retval;

  /* copy in the boot data from the stack of our creator */
  memcpy(&data, arg, sizeof(data));

  me = ph_thread_init_myself(true);

  /* this publishes that we're ready to run to
   * the thread that spawned us */
  ck_pr_store_ptr(data.thr, ck_pr_load_ptr(&me));
  ck_pr_fence_store();

  retval = data.func(data.arg);
  ck_epoch_barrier(&me->epoch_record);

  return retval;
}
Exemplo n.º 8
0
static void
ck_barrier_combining_aux(struct ck_barrier_combining *barrier,
    struct ck_barrier_combining_group *tnode,
    unsigned int sense)
{

	/*
	 * If this is the last thread in the group, it moves on to the parent group.
	 * Otherwise, it spins on this group's sense.
	 */
	if (ck_pr_faa_uint(&tnode->count, 1) == tnode->k - 1) {
		/*
		 * If we are and will be the last thread entering the barrier for the
		 * current group then signal the parent group if one exists.
		 */
		if (tnode->parent != NULL)
			ck_barrier_combining_aux(barrier, tnode->parent, sense);

		/*
		 * Once the thread returns from its parent(s), it reinitializes the group's
		 * arrival count and signals other threads to continue by flipping the group
		 * sense. Order of these operations is not important since we assume a static
		 * number of threads are members of a barrier for the lifetime of the barrier.
		 * Since count is explicitly reinitialized, it is guaranteed that at any point
		 * tnode->count is equivalent to tnode->k if and only if that many threads
		 * are at the barrier.
		 */
		ck_pr_store_uint(&tnode->count, 0);
		ck_pr_fence_store();
		ck_pr_store_uint(&tnode->sense, ~tnode->sense);
	} else {
		ck_pr_fence_memory();
		while (sense != ck_pr_load_uint(&tnode->sense))
			ck_pr_stall();
	}

	return;
}
Exemplo n.º 9
0
static struct ck_hs_map *
ck_hs_map_create(struct ck_hs *hs, unsigned long entries)
{
	struct ck_hs_map *map;
	unsigned long size, n_entries, limit;

	n_entries = ck_internal_power_2(entries);
	size = sizeof(struct ck_hs_map) + (sizeof(void *) * n_entries + CK_MD_CACHELINE - 1);

	map = hs->m->malloc(size);
	if (map == NULL)
		return NULL;

	map->size = size;

	/* We should probably use a more intelligent heuristic for default probe length. */
	limit = ck_internal_max(n_entries >> (CK_HS_PROBE_L1_SHIFT + 2), CK_HS_PROBE_L1_DEFAULT);
	if (limit > UINT_MAX)
		limit = UINT_MAX;

	map->probe_limit = (unsigned int)limit;
	map->probe_maximum = 0;
	map->capacity = n_entries;
	map->step = ck_internal_bsf(n_entries);
	map->mask = n_entries - 1;
	map->n_entries = 0;

	/* Align map allocation to cache line. */
	map->entries = (void *)(((uintptr_t)(map + 1) + CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
	memset(map->entries, 0, sizeof(void *) * n_entries);
	memset(map->generation, 0, sizeof map->generation);

	/* Commit entries purge with respect to map publication. */
	ck_pr_fence_store();
	return map;
}
Exemplo n.º 10
0
Arquivo: ck_rhs.c Projeto: RaHus/ck
static inline void
ck_rhs_map_bound_set(struct ck_rhs_map *m,
    unsigned long h,
    unsigned long n_probes)
{
	unsigned long offset = h & m->mask;
	struct ck_rhs_entry_desc *desc;

	if (n_probes > m->probe_maximum)
		ck_pr_store_uint(&m->probe_maximum, n_probes);
	if (!(m->read_mostly)) {
		desc = &m->entries.descs[offset];

		if (desc->probe_bound < n_probes) {
			if (n_probes > CK_RHS_WORD_MAX)
				n_probes = CK_RHS_WORD_MAX;

			CK_RHS_STORE(&desc->probe_bound, n_probes);
			ck_pr_fence_store();
		}
	}

	return;
}
Exemplo n.º 11
0
bool
ck_hs_grow(struct ck_hs *hs,
    unsigned long capacity)
{
	struct ck_hs_map *map, *update;
	void **bucket, *previous;
	unsigned long k, i, j, offset, probes;

restart:
	map = hs->map;

	if (map->capacity > capacity)
		return false;

	update = ck_hs_map_create(hs, capacity);
	if (update == NULL)
		return false;

	for (k = 0; k < map->capacity; k++) {
		unsigned long h;

		previous = map->entries[k];
		if (previous == CK_HS_EMPTY || previous == CK_HS_TOMBSTONE)
			continue;

#ifdef CK_HS_PP
		if (hs->mode & CK_HS_MODE_OBJECT)
			previous = (void *)((uintptr_t)previous & (((uintptr_t)1 << CK_MD_VMA_BITS) - 1));
#endif

		h = hs->hf(previous, hs->seed);
		offset = h & update->mask;
		i = probes = 0;

		for (;;) {
			bucket = (void *)((uintptr_t)&update->entries[offset] & ~(CK_MD_CACHELINE - 1));

			for (j = 0; j < CK_HS_PROBE_L1; j++) {
				void **cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));

				if (probes++ == update->probe_limit)
					break;

				if (CK_CC_LIKELY(*cursor == CK_HS_EMPTY)) {
					*cursor = map->entries[k];
					update->n_entries++;

					if (probes > update->probe_maximum)
						update->probe_maximum = probes;

					break;
				}
			}

			if (j < CK_HS_PROBE_L1)
				break;

			offset = ck_hs_map_probe_next(update, offset, h, i++, probes);
		}

		if (probes > update->probe_limit) {
			/*
			 * We have hit the probe limit, map needs to be even larger.
			 */
			ck_hs_map_destroy(hs->m, update, false);
			capacity <<= 1;
			goto restart;
		}
	}

	ck_pr_fence_store();
	ck_pr_store_ptr(&hs->map, update);
	ck_hs_map_destroy(hs->m, map, true);
	return true;
}
Exemplo n.º 12
0
static inline void
set_nodes(as_cluster* cluster, as_nodes* nodes)
{
	ck_pr_fence_store();
	ck_pr_store_ptr(&cluster->nodes, nodes);
}
Exemplo n.º 13
0
Arquivo: ck_rhs.c Projeto: RaHus/ck
bool
ck_rhs_grow(struct ck_rhs *hs,
    unsigned long capacity)
{
	struct ck_rhs_map *map, *update;
	void *previous, *prev_saved;
	unsigned long k, offset, probes;

restart:
	map = hs->map;
	if (map->capacity > capacity)
		return false;

	update = ck_rhs_map_create(hs, capacity);
	if (update == NULL)
		return false;

	for (k = 0; k < map->capacity; k++) {
		unsigned long h;

		prev_saved = previous = ck_rhs_entry(map, k);
		if (previous == CK_RHS_EMPTY)
			continue;

#ifdef CK_RHS_PP
		if (hs->mode & CK_RHS_MODE_OBJECT)
			previous = CK_RHS_VMA(previous);
#endif

		h = hs->hf(previous, hs->seed);
		offset = h & update->mask;
		probes = 0;

		for (;;) {
			void **cursor = ck_rhs_entry_addr(update, offset);

			if (probes++ == update->probe_limit) {
				/*
				 * We have hit the probe limit, map needs to be even larger.
				 */
				ck_rhs_map_destroy(hs->m, update, false);
				capacity <<= 1;
				goto restart;
			}

			if (CK_CC_LIKELY(*cursor == CK_RHS_EMPTY)) {
				*cursor = prev_saved;
				update->n_entries++;
				ck_rhs_set_probes(update, offset, probes);
				ck_rhs_map_bound_set(update, h, probes);
				break;
			} else if (ck_rhs_probes(update, offset) < probes) {
				void *tmp = prev_saved;
				unsigned int old_probes;
				prev_saved = previous = *cursor;
#ifdef CK_RHS_PP
				if (hs->mode & CK_RHS_MODE_OBJECT)
					previous = CK_RHS_VMA(previous);
#endif
				*cursor = tmp;
				ck_rhs_map_bound_set(update, h, probes);
				h = hs->hf(previous, hs->seed);
				old_probes = ck_rhs_probes(update, offset);
				ck_rhs_set_probes(update, offset, probes);
				probes = old_probes - 1;
				continue;
			}
			ck_rhs_wanted_inc(update, offset);
			offset = ck_rhs_map_probe_next(update, offset,  probes);
		}

	}

	ck_pr_fence_store();
	ck_pr_store_ptr(&hs->map, update);
	ck_rhs_map_destroy(hs->m, map, true);
	return true;
}
Exemplo n.º 14
0
Arquivo: ck_rhs.c Projeto: RaHus/ck
static struct ck_rhs_map *
ck_rhs_map_create(struct ck_rhs *hs, unsigned long entries)
{
	struct ck_rhs_map *map;
	unsigned long size, n_entries, limit;

	n_entries = ck_internal_power_2(entries);
	if (n_entries < CK_RHS_PROBE_L1)
		return NULL;

	if (hs->mode & CK_RHS_MODE_READ_MOSTLY)
		size = sizeof(struct ck_rhs_map) +
		    (sizeof(void *) * n_entries +
		     sizeof(struct ck_rhs_no_entry_desc) * n_entries +
		     2 * CK_MD_CACHELINE - 1);
	else
		size = sizeof(struct ck_rhs_map) +
		    (sizeof(struct ck_rhs_entry_desc) * n_entries +
		     CK_MD_CACHELINE - 1);
	map = hs->m->malloc(size);
	if (map == NULL)
		return NULL;
	map->read_mostly = !!(hs->mode & CK_RHS_MODE_READ_MOSTLY);

	map->size = size;
	/* We should probably use a more intelligent heuristic for default probe length. */
	limit = ck_internal_max(n_entries >> (CK_RHS_PROBE_L1_SHIFT + 2), CK_RHS_PROBE_L1_DEFAULT);
	if (limit > UINT_MAX)
		limit = UINT_MAX;

	map->probe_limit = (unsigned int)limit;
	map->probe_maximum = 0;
	map->capacity = n_entries;
	map->step = ck_internal_bsf(n_entries);
	map->mask = n_entries - 1;
	map->n_entries = 0;

	/* Align map allocation to cache line. */
	if (map->read_mostly) {
		map->entries.no_entries.entries = (void *)(((uintptr_t)&map[1] +
		    CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
		map->entries.no_entries.descs = (void *)(((uintptr_t)map->entries.no_entries.entries + (sizeof(void *) * n_entries) + CK_MD_CACHELINE - 1) &~ (CK_MD_CACHELINE - 1));
		memset(map->entries.no_entries.entries, 0,
		    sizeof(void *) * n_entries);
		memset(map->entries.no_entries.descs, 0,
		    sizeof(struct ck_rhs_no_entry_desc));
		map->offset_mask = (CK_MD_CACHELINE / sizeof(void *)) - 1;
		map->probe_func = ck_rhs_map_probe_rm;

	} else {
		map->entries.descs = (void *)(((uintptr_t)&map[1] +
		    CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
		memset(map->entries.descs, 0, sizeof(struct ck_rhs_entry_desc) * n_entries);
		map->offset_mask = (CK_MD_CACHELINE / sizeof(struct ck_rhs_entry_desc)) - 1;
		map->probe_func = ck_rhs_map_probe;
	}
	memset(map->generation, 0, sizeof map->generation);

	/* Commit entries purge with respect to map publication. */
	ck_pr_fence_store();
	return map;
}
Exemplo n.º 15
0
bool
ck_bag_remove_spmc(struct ck_bag *bag, void *entry)
{
	struct ck_bag_block *cursor, *copy, *prev;
	uint16_t block_index, n_entries;

	cursor = bag->head;
	prev = NULL;
	while (cursor != NULL) {
		n_entries = ck_bag_block_count(cursor);

		for (block_index = 0; block_index < n_entries; block_index++) {
			if (cursor->array[block_index] == entry)
				goto found;

		}

		prev = cursor;
		cursor = ck_bag_block_next(cursor->next.ptr);
	}

	return true;

found:
	/* Cursor points to containing block, block_index is index of deletion */
	if (n_entries == 1) {
		/* If a block's single entry is being removed, remove the block. */
		if (prev == NULL) {
			struct ck_bag_block *new_head = ck_bag_block_next(cursor->next.ptr);
			ck_pr_store_ptr(&bag->head, new_head);
		} else {
			uintptr_t next;
#ifdef __x86_64__
			next = ((uintptr_t)prev->next.ptr & (CK_BAG_BLOCK_ENTRIES_MASK)) |
				(uintptr_t)(void *)ck_bag_block_next(cursor->next.ptr);
#else
			next = (uintptr_t)(void *)cursor->next.ptr;
#endif
			ck_pr_store_ptr(&prev->next.ptr, (struct ck_bag_block *)next);
		}

		/* Remove block from available list */
		if (cursor->avail_prev != NULL)
			cursor->avail_prev->avail_next = cursor->avail_next;

		if (cursor->avail_next != NULL)
			cursor->avail_next->avail_prev = cursor->avail_prev;

		bag->n_blocks--;
		copy = cursor->avail_next;
	} else {
		uintptr_t next_ptr;

		copy = allocator.malloc(bag->info.bytes);
		if (copy == NULL)
			return false;

		memcpy(copy, cursor, bag->info.bytes);
		copy->array[block_index] = copy->array[--n_entries];

		next_ptr = (uintptr_t)(void *)ck_bag_block_next(copy->next.ptr);
#ifdef __x86_64__
		copy->next.ptr = (void *)(((uintptr_t)n_entries << 48) | next_ptr);
#else
		copy->next.n_entries = n_entries;
		copy->next.ptr = (struct ck_bag_block *)next_ptr;
#endif

		ck_pr_fence_store();

		if (prev == NULL) {
			ck_pr_store_ptr(&bag->head, copy);
		} else {
#ifdef __x86_64__
			uintptr_t next = ((uintptr_t)prev->next.ptr & (CK_BAG_BLOCK_ENTRIES_MASK)) |
				(uintptr_t)(void *)ck_bag_block_next(copy);
			ck_pr_store_ptr(&prev->next.ptr, (struct ck_bag_block *)next);
#else
			ck_pr_store_ptr(&prev->next.ptr, copy);
#endif
		}

		if (n_entries == bag->info.max - 1) {
			/* Block was previously fully, add to head of avail. list */
			copy->avail_next = bag->avail_head;
			copy->avail_prev = NULL;
			bag->avail_head = copy;
		}

	}

	/* Update available list. */
	if (bag->avail_head == cursor)
		bag->avail_head = copy;

	if (bag->avail_tail == cursor)
		bag->avail_tail = copy;

	allocator.free(cursor, sizeof(struct ck_bag_block), true);
	ck_pr_store_uint(&bag->n_entries, bag->n_entries - 1);
	return true;
}
Exemplo n.º 16
0
bool
ck_bag_put_spmc(struct ck_bag *bag, void *entry)
{
	struct ck_bag_block *cursor, *new_block, *new_block_prev, *new_tail;
	uint16_t n_entries_block;
	size_t blocks_alloc, i;
	uintptr_t next = 0;

	new_block = new_block_prev = new_tail = NULL;

	/*
	 * Blocks with available entries are stored in
	 * the bag's available list.
	 */
	cursor = bag->avail_head;
	if (cursor != NULL) {
		n_entries_block = ck_bag_block_count(cursor);
	} else {
		/* The bag is full, allocate a new set of blocks */
		if (bag->alloc_strat == CK_BAG_ALLOCATE_GEOMETRIC)
			blocks_alloc = (bag->n_blocks + 1) << 1;
		else
			blocks_alloc = 1;

		for (i = 0; i < blocks_alloc-1; i++) {
			new_block = allocator.malloc(bag->info.bytes);

			if (new_block == NULL)
				return false;

			/*
			 * First node is the tail of the Bag.
			 * Second node is the new tail of the Available list.
			 */
			if (i == 0)
				new_tail = new_block;

#ifndef __x86_64__
			new_block->next.n_entries = 0;
#endif

			new_block->next.ptr = new_block_prev;
			new_block->avail_next = new_block_prev;
			if (new_block_prev != NULL)
				new_block_prev->avail_prev = new_block;

			new_block_prev = new_block;
		}

		/*
		 * Insert entry into last allocated block.
		 * cursor is new head of available list.
		 */
		cursor = allocator.malloc(bag->info.bytes);
		cursor->avail_next = new_block;
		cursor->avail_prev = NULL;
		new_block->avail_prev = cursor;
		n_entries_block = 0;
		bag->n_blocks += blocks_alloc; /* n_blocks and n_avail_blocks? */
	}

	/* Update the available list */
	if (new_block != NULL) {
		if (bag->avail_tail != NULL) {
			cursor->avail_prev = bag->avail_tail;
			bag->avail_tail->avail_next = cursor;
		} else {
			/* Available list was previously empty */
			bag->avail_head = cursor;
		}

		bag->avail_tail = new_tail;
	} else if (n_entries_block == bag->info.max-1) {
	    /* New entry will fill up block, remove from avail list */
		    if (cursor->avail_prev != NULL)
			    cursor->avail_prev->avail_next = cursor->avail_next;

		    if (cursor->avail_next != NULL)
			    cursor->avail_next->avail_prev = cursor->avail_prev;

		    if (bag->avail_head == cursor)
			    bag->avail_head = cursor->avail_next;

		    if (bag->avail_tail == cursor)
			    bag->avail_tail = cursor->avail_prev;

		    /* For debugging purposes */
		    cursor->avail_next = NULL;
		    cursor->avail_prev = NULL;
	}

	/* Update array and block->n_entries */
	cursor->array[n_entries_block++] = entry;
	ck_pr_fence_store();

#ifdef __x86_64__
	next = ((uintptr_t)n_entries_block << 48);
#endif

	/* Update bag's list */
	if (n_entries_block == 1) {

		if (bag->head != NULL) {
#ifdef __x86_64__
			next += ((uintptr_t)(void *)ck_bag_block_next(bag->head));
#else
			next = (uintptr_t)(void *)ck_bag_block_next(bag->head);
#endif
		}

#ifndef __x86_64__
		ck_pr_store_ptr(&cursor->next.n_entries, (void *)(uintptr_t)n_entries_block);
#endif

		ck_pr_store_ptr(&cursor->next.ptr, (void *)next);
		ck_pr_store_ptr(&bag->head, cursor);
	} else {

#ifdef __x86_64__
		next += ((uintptr_t)(void *)ck_bag_block_next(cursor->next.ptr));
		ck_pr_store_ptr(&cursor->next, (void *)next);
#else
		ck_pr_store_ptr(&cursor->next.n_entries, (void *)(uintptr_t)n_entries_block);
#endif

	}

	ck_pr_store_uint(&bag->n_entries, bag->n_entries + 1);
	return true;
}