Esempio n. 1
0
/*
 * alloc_class_assign_by_size -- (internal) chooses the allocation class that
 *	best approximates the provided size
 */
static struct alloc_class *
alloc_class_assign_by_size(struct alloc_class_collection *ac,
	size_t size)
{
	LOG(10, NULL);

	size_t class_map_index = SIZE_TO_CLASS_MAP_INDEX(size,
		ac->granularity);

	struct alloc_class *c = alloc_class_find_min_frag(ac,
		class_map_index * ac->granularity);
	ASSERTne(c, NULL);

	/*
	 * We don't lock this array because locking this section here and then
	 * bailing out if someone else was faster would be still slower than
	 * just calculating the class and failing to assign the variable.
	 * We are using a compare and swap so that helgrind/drd don't complain.
	 */
	util_bool_compare_and_swap64(
		&ac->class_map_by_alloc_size[class_map_index],
		MAX_ALLOCATION_CLASSES, c->id);

	return c;
}
Esempio n. 2
0
/*
 * alloc_class_reservation_clear -- removes the reservation on class id
 */
static void
alloc_class_reservation_clear(struct alloc_class_collection *ac, int id)
{
	int ret = util_bool_compare_and_swap64(&ac->aclasses[id],
		ACLASS_RESERVED, NULL);
	ASSERT(ret);
}
Esempio n. 3
0
/*
 * alloc_class_reserve -- reserve the specified class id
 */
int
alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id)
{
	LOG(10, NULL);

	return util_bool_compare_and_swap64(&ac->aclasses[id],
			NULL, ACLASS_RESERVED) ? 0 : -1;
}
Esempio n. 4
0
/*
 * alloc_class_find_first_free_slot -- searches for the
 *	first available allocation class slot
 *
 * This function must be thread-safe because allocation classes can be created
 * at runtime.
 */
int
alloc_class_find_first_free_slot(struct alloc_class_collection *ac,
	uint8_t *slot)
{
	for (int n = 0; n < MAX_ALLOCATION_CLASSES; ++n) {
		if (util_bool_compare_and_swap64(&ac->aclasses[n],
				NULL, ACLASS_RESERVED)) {
			*slot = (uint8_t)n;
			return 0;
		}
	}

	return -1;
}
Esempio n. 5
0
File: lane.c Progetto: ChandKV/nvml
/*
 * get_lane -- (internal) get free lane index
 */
static inline void
get_lane(uint64_t *locks, uint64_t *index, uint64_t nlocks)
{
	while (1) {
		do {
			*index %= nlocks;
			if (likely(util_bool_compare_and_swap64(
					&locks[*index], 0, 1)))
				return;

			++(*index);
		} while (*index < nlocks);

		sched_yield();
	}
}
Esempio n. 6
0
File: lane.c Progetto: ChandKV/nvml
/*
 * lane_release -- drops the per-thread lane
 */
void
lane_release(PMEMobjpool *pop)
{
	if (unlikely(!pop->lanes_desc.runtime_nlanes)) {
		ASSERT(pop->has_remote_replicas);
		return;
	}

	struct lane_info *lane = get_lane_info_record(pop);

	ASSERTne(lane, NULL);
	ASSERTne(lane->lane_idx, UINT64_MAX);

	if (unlikely(lane->nest_count == 0)) {
		FATAL("lane_release");
	} else if (--(lane->nest_count) == 0) {
		if (unlikely(!util_bool_compare_and_swap64(
				&pop->lanes_desc.lane_locks[lane->lane_idx],
				1, 0))) {
			FATAL("util_bool_compare_and_swap64");
		}
	}
}