예제 #1
0
파일: lane.c 프로젝트: ChandKV/nvml
/*
 * lane_boot -- initializes all lanes
 */
int
lane_boot(PMEMobjpool *pop)
{
	int err = 0;

	pop->lanes_desc.lane = Malloc(sizeof(struct lane) * pop->nlanes);
	if (pop->lanes_desc.lane == NULL) {
		err = ENOMEM;
		ERR("!Malloc of volatile lanes");
		goto error_lanes_malloc;
	}

	pop->lanes_desc.next_lane_idx = 0;

	pop->lanes_desc.lane_locks =
		Zalloc(sizeof(*pop->lanes_desc.lane_locks) * pop->nlanes);
	if (pop->lanes_desc.lane_locks == NULL) {
		ERR("!Malloc for lane locks");
		goto error_locks_malloc;
	}

	/* add lanes to pmemcheck ignored list */
	VALGRIND_ADD_TO_GLOBAL_TX_IGNORE((char *)pop + pop->lanes_offset,
		(sizeof(struct lane_layout) * pop->nlanes));

	uint64_t i;
	for (i = 0; i < pop->nlanes; ++i) {
		struct lane_layout *layout = lane_get_layout(pop, i);

		if ((err = lane_init(pop, &pop->lanes_desc.lane[i], layout))) {
			ERR("!lane_init");
			goto error_lane_init;
		}
	}

	return 0;

error_lane_init:
	for (; i >= 1; --i)
		lane_destroy(pop, &pop->lanes_desc.lane[i - 1]);
	Free(pop->lanes_desc.lane_locks);
	pop->lanes_desc.lane_locks = NULL;
error_locks_malloc:
	Free(pop->lanes_desc.lane);
	pop->lanes_desc.lane = NULL;
error_lanes_malloc:
	return err;
}
예제 #2
0
파일: heap.c 프로젝트: tomaszkapela/nvml
/*
 * heap_populate_buckets -- (internal) creates volatile state of memory blocks
 */
static int
heap_populate_buckets(struct palloc_heap *heap)
{
	struct heap_rt *h = heap->rt;

	if (h->zones_exhausted == h->max_zone)
		return ENOMEM;

	uint32_t zone_id = h->zones_exhausted++;
	struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);

	/* ignore zone and chunk headers */
	VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(z, sizeof(z->header) +
		sizeof(z->chunk_headers));

	if (z->header.magic != ZONE_HEADER_MAGIC)
		heap_zone_init(heap, zone_id);

	return heap_reclaim_zone_garbage(heap, zone_id, 1 /* init */);
}
예제 #3
0
파일: lane.c 프로젝트: harrybaa/nvml
/*
 * lane_boot -- initializes all lanes
 */
int
lane_boot(PMEMobjpool *pop)
{
	ASSERTeq(pop->lanes, NULL);

	int err = 0;

	pop->lanes = Malloc(sizeof (struct lane) * pop->nlanes);

	if (pop->lanes == NULL) {
		ERR("!Malloc of volatile lanes");
		err = ENOMEM;
		goto error_lanes_malloc;
	}

	/* add lanes to pmemcheck ignored list */
	VALGRIND_ADD_TO_GLOBAL_TX_IGNORE((void *)pop + pop->lanes_offset,
		(sizeof (struct lane_layout) * pop->nlanes));
	int i;
	for (i = 0; i < pop->nlanes; ++i) {
		struct lane_layout *layout = lane_get_layout(pop, i);

		if ((err = lane_init(&pop->lanes[i], layout)) != 0) {
			ERR("!lane_init");
			goto error_lane_init;
		}
	}

	return 0;

error_lane_init:
	for (i = i - 1; i >= 0; --i)
		if (lane_destroy(&pop->lanes[i]) != 0)
			ERR("!lane_destroy");
	Free(pop->lanes);
	pop->lanes = NULL;
error_lanes_malloc:
	return err;
}
예제 #4
0
파일: heap.c 프로젝트: sudkannan/nvml
/*
 * heap_populate_buckets -- (internal) creates volatile state of memory blocks
 */
static void
heap_populate_buckets(PMEMobjpool *pop)
{
	struct pmalloc_heap *h = pop->heap;

	if (h->zones_exhausted == h->max_zone)
		return;

	uint32_t zone_id = h->zones_exhausted++;
	struct zone *z = &h->layout->zones[zone_id];

	/* ignore zone and chunk headers */
	VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(z, sizeof (z->header) +
		sizeof (z->chunk_headers));

	if (z->header.magic != ZONE_HEADER_MAGIC)
		heap_zone_init(pop, zone_id);

	struct bucket *def_bucket = h->buckets[DEFAULT_BUCKET];

	for (uint32_t i = 0; i < z->header.size_idx; ) {
		struct chunk_header *hdr = &z->chunk_headers[i];
		heap_chunk_write_footer(hdr, hdr->size_idx);

		if (hdr->type == CHUNK_TYPE_RUN) {
			struct chunk_run *run =
				(struct chunk_run *)&z->chunks[i];
			heap_populate_run_bucket(pop,
				h->bucket_map[run->block_size], i, zone_id);
		} else if (hdr->type == CHUNK_TYPE_FREE) {
			struct memory_block m = {i, zone_id, hdr->size_idx, 0};
			bucket_insert_block(def_bucket, m);
		}

		i += hdr->size_idx;
	}
}
예제 #5
0
파일: lane.c 프로젝트: andreas-bluemle/nvml
/*
 * lane_boot -- initializes all lanes
 */
int
lane_boot(PMEMobjpool *pop)
{
	ASSERTeq(pop->lanes, NULL);

	int err;

	pthread_mutexattr_t lock_attr;
	if ((err = pthread_mutexattr_init(&lock_attr)) != 0) {
		ERR("!pthread_mutexattr_init");
		goto error_lanes_malloc;
	}

	if ((err = pthread_mutexattr_settype(
			&lock_attr, PTHREAD_MUTEX_RECURSIVE)) != 0) {
		ERR("!pthread_mutexattr_settype");
		goto error_lanes_malloc;
	}

	pop->lanes = Malloc(sizeof (struct lane) * pop->nlanes);
	if (pop->lanes == NULL) {
		err = ENOMEM;
		ERR("!Malloc of volatile lanes");
		goto error_lanes_malloc;
	}

	pop->lane_locks = Malloc(sizeof (pthread_mutex_t) * pop->nlanes);
	if (pop->lane_locks == NULL) {
		err = ENOMEM;
		ERR("!Malloc for lane locks");
		goto error_lock_malloc;
	}

	/* add lanes to pmemcheck ignored list */
	VALGRIND_ADD_TO_GLOBAL_TX_IGNORE((char *)pop + pop->lanes_offset,
		(sizeof (struct lane_layout) * pop->nlanes));

	uint64_t i;
	for (i = 0; i < pop->nlanes; ++i) {
		struct lane_layout *layout = lane_get_layout(pop, i);

		if ((err = lane_init(pop, &pop->lanes[i], layout,
				&pop->lane_locks[i], &lock_attr)) != 0) {
			ERR("!lane_init");
			goto error_lane_init;
		}
	}

	if (pthread_mutexattr_destroy(&lock_attr) != 0) {
		ERR("!pthread_mutexattr_destroy");
		goto error_mutexattr_destroy;
	}

	return 0;

error_lane_init:
	for (; i >= 1; --i)
		if (lane_destroy(pop, &pop->lanes[i - 1]) != 0)
			ERR("!lane_destroy");

	Free(pop->lane_locks);
	pop->lane_locks = NULL;

error_lock_malloc:
	Free(pop->lanes);
	pop->lanes = NULL;

error_lanes_malloc:
	if (pthread_mutexattr_destroy(&lock_attr) != 0)
		ERR("!pthread_mutexattr_destroy");

error_mutexattr_destroy:
	return err;
}