Exemplo n.º 1
0
int omx_xen_page_alloc(omx_xenif_t * omx_xenif, uint32_t count)
{
	struct omx_xen_page_cookie *cookie;
	struct page *page;
	int err = 0, i;

#ifdef OMX_XEN_COOKIES
	dprintk_in();

	for (i = 0; i < count; i++) {

		cookie =
		    kmalloc(sizeof(struct omx_xen_page_cookie), GFP_KERNEL);
		if (!cookie) {
			printk_err("cannot create cookie\n");
			err = -ENOMEM;
			goto out;
		}
		page = alloc_page(GFP_KERNEL);
		if (!page) {
			printk_err("cannot allocate page\n");
			err = -ENOMEM;
			goto out;
		}

		cookie->page = page;

		//      write_lock(&omx_xenif->page_cookies_freelock);
		list_add_tail(&cookie->node, &omx_xenif->page_cookies_free);
		//      write_unlock(&omx_xenif->page_cookies_freelock);

		dprintk_deb
		    ("allocated, and appended to list, %#lx, page = %#lx\n",
		     (unsigned long)cookie, (unsigned long)page);

	}

out:
	dprintk_out();
#endif
	return err;
}
Exemplo n.º 2
0
Arquivo: elf.c Projeto: codyd51/axle
bool elf_load_segment(page_directory_t* new_dir, unsigned char* src, elf_phdr* seg) {
	//loadable?
	if (seg->type != PT_LOAD) {
		printf_err("Tried to load non-loadable segment");
		printk_err("Tried to load non-loadable segment");
		return false; 
	}

	unsigned char* src_base = src + seg->offset;
	//figure out range to map this binary to in virtual memory
	uint32_t dest_base = seg->vaddr;
	uint32_t dest_limit = dest_base + seg->memsz;

	printf("dest_base %x dest_limit %x\n", dest_base, dest_limit);
	//alloc enough mem for new task
	for (uint32_t i = dest_base, page_counter = 0; i <= dest_limit; i += PAGE_SIZE, page_counter++) {
		page_t* page = get_page(i, 1, new_dir);
		ASSERT(page, "elf_load_segment couldn't get page in new addrspace at %x\n", i);
		bool got_frame = alloc_frame(page, 0, 0);
		ASSERT(got_frame, "elf_load_segment couldn't alloc frame for page %x\n", i);
		
		char* pagebuf = kmalloc_a(PAGE_SIZE);
		page_t* local_page = get_page((uint32_t)pagebuf, 0, page_dir_current());
		ASSERT(local_page, "couldn't get local_page!");
		int old_frame = local_page->frame;
		local_page->frame = page->frame;
		invlpg(pagebuf);

		//create buffer in current address space,
		//copy data,
		//and then map frame into new address space
		memset(pagebuf, 0, (dest_limit - dest_base));
		//only seg->filesz bytes are garuanteed to be in the file!
		//_not_ memsz
		//any extra bytes between filesz and memsz should be set to 0, which is done above
		//memcpy(dest_base, src_base, seg->filesz);
		memcpy(pagebuf, src_base + (page_counter * PAGE_SIZE), seg->filesz);

		//now that we've copied the data in the local address space, 
		//get the page in local address space, 
		//and copy backing physical frame data to physical frame of
		//page in new address space

		//now that the buffer has been copied, we can safely free the buffer
		local_page->frame = old_frame;
		invlpg(pagebuf);
		kfree(pagebuf);
	}

	// Copy data
	//memset((void*)dest_base, 0, (void*)(dest_limit - dest_base));

	return true;
}
Exemplo n.º 3
0
int omx_xen_destroy_user_region(omx_xenif_t * omx_xenif, uint32_t id,
				uint32_t seqnum, uint8_t eid)
{
	struct backend_info *be = omx_xenif->be;
	struct omxback_dev *dev = be->omxdev;
	struct omx_endpoint *endpoint = dev->endpoints[eid];
	struct omx_xen_user_region *region;
	int ret = 0;

	dprintk_in();

	TIMER_START(&t_destroy_reg);
	if (eid >= 0 && eid < 255) {
		endpoint = dev->endpoints[eid];
	} else {
		printk_err
		    ("Wrong endpoint number (%u) check your frontend/backend communication!\n",
		     eid);
		ret = -EINVAL;
		goto out;
	}

	region = rcu_dereference_protected(endpoint->xen_regions[id], 1);
	if (unlikely(!region)) {
		printk_err(
		       "%s: Cannot access non-existing region %d\n", __func__, id);
		//ret = -EINVAL;
		goto out;
	}

	rcu_assign_pointer(endpoint->xen_regions[region->id], NULL);
	//omx_xen_user_region_release(region);
	kfree(region);
out:
	TIMER_STOP(&t_destroy_reg);
	dprintk_out();
	return ret;

}
Exemplo n.º 4
0
int omx_xenfront_init(void)
{
	int ret = 0;
	dprintk_in();

	if (!xen_domain() || xen_initial_domain()) {
		ret = -ENODEV;
		printk_err
		    ("We are not running under Xen, or this "
		     "*is* a privileged domain\n");
		goto out;
	}

	ret = xenbus_register_frontend(&omx_xenfront_driver);
	if (ret) {
		printk_err("XenBus Registration Failed\n");
		goto out;
	}

	printk_inf("init\n");
out:
	dprintk_out();
	return ret;
}
Exemplo n.º 5
0
/*!
 * Sets up resources for pmem context.
 * Later this will be split into implementation specific code,
 * one for pmem_block, one for pmem_mem.
 * The pmem_block implementation will allocate a double buffer,
 * the pmem_mem implementation will call DAX to retrieve the virtual
 * addresses for data and metadata for "cache_block" and "cloned_cache_block".
 */
int pmem_context_setup(struct bittern_cache *bc,
		       struct kmem_cache *kmem_slab,
		       struct cache_block *cache_block,
		       struct cache_block *cloned_cache_block,
		       struct pmem_context *ctx)
{
	struct data_buffer_info *dbi;

	ASSERT_BITTERN_CACHE(bc);
	ASSERT(kmem_slab == bc->bc_kmem_map ||
	       kmem_slab == bc->bc_kmem_threads);
	ASSERT(ctx != NULL);
	M_ASSERT(ctx->magic1 == PMEM_CONTEXT_MAGIC1);
	M_ASSERT(ctx->magic2 == PMEM_CONTEXT_MAGIC2);
	dbi = &ctx->dbi;
	/*
	 * this code copied from pagebuf_allocate_dbi()
	 * in bittern_cache_main.h
	 */
	ASSERT(dbi->di_buffer_vmalloc_buffer == NULL);
	ASSERT(dbi->di_buffer_vmalloc_page == NULL);
	ASSERT(dbi->di_buffer_slab == NULL);
	ASSERT(dbi->di_buffer == NULL);
	ASSERT(dbi->di_page == NULL);
	ASSERT(dbi->di_flags == 0x0);
	ASSERT(atomic_read(&dbi->di_busy) == 0);

	dbi->di_buffer_vmalloc_buffer = kmem_cache_alloc(kmem_slab, GFP_NOIO);
	/*TODO_ADD_ERROR_INJECTION*/
	if (dbi->di_buffer_vmalloc_buffer == NULL) {
		BT_DEV_TRACE(BT_LEVEL_ERROR, bc, NULL, cache_block, NULL, NULL,
			     "kmem_cache_alloc kmem_slab failed");
		printk_err("%s: kmem_cache_alloc kmem_slab failed\n",
			   bc->bc_name);
		return -ENOMEM;
	}

	ASSERT(PAGE_ALIGNED(dbi->di_buffer_vmalloc_buffer));
	dbi->di_buffer_vmalloc_page =
				virtual_to_page(dbi->di_buffer_vmalloc_buffer);
	ASSERT(dbi->di_buffer_vmalloc_page != NULL);
	dbi->di_buffer_slab = kmem_slab;

	return 0;
}
Exemplo n.º 6
0
static void omx_xenfront_backend_changed(struct xenbus_device *dev,
					 enum xenbus_state backend_state)
{
	struct omx_xenfront_info *fe = dev_get_drvdata(&dev->dev);
	int ret = 0;

	dprintk_in();

	dprintk_deb("backend state %s\n", xenbus_strstate(backend_state));

	switch (backend_state) {
	case XenbusStateInitialising:
	case XenbusStateInitWait:
		break;
	case XenbusStateInitialised:
		ret = talk_to_backend(dev, fe);
		if (ret) {
			printk_err("Error trying to talk to backend"
				   ", ret=%d\n", ret);
			//kfree(info);
		}
		break;
	case XenbusStateReconfiguring:
	case XenbusStateReconfigured:
	case XenbusStateUnknown:
	case XenbusStateClosed:
		break;
	case XenbusStateConnected:
		if (dev->state == XenbusStateConnected)
			break;
		omx_xenfront_connect(fe);
		break;
	case XenbusStateClosing:
		dprintk_deb("Closing Xenbus\n");
		xenbus_frontend_closed(dev);
		break;
	}
	dprintk_out();

	return;
}
Exemplo n.º 7
0
int pmem_header_initialize(struct bittern_cache *bc)
{
	int ret;
	struct pmem_api *pa = &bc->bc_papi;
	struct pmem_header *pm = &pa->papi_hdr;
	size_t cache_size_bytes;

	ASSERT(bc != NULL);
	ASSERT(pa->papi_bdev_size_bytes > 0);
	ASSERT(pa->papi_bdev != NULL);
	ASSERT(sizeof(struct pmem_header) == PAGE_SIZE);

	cache_size_bytes = pa->papi_bdev_size_bytes;
	printk_info("cache_size_bytes=%lu, cache_size_mbytes=%lu\n",
		    cache_size_bytes, cache_size_bytes / (1024 * 1024));

	memset(pm, 0, sizeof(struct pmem_header));
	pm->lm_magic = LM_MAGIC;
	pm->lm_version = LM_VERSION;
	pm->lm_cache_block_size = PAGE_SIZE;

	printk_info("pmem_layout='%c'\n", pmem_cache_layout(bc));
	ASSERT(pmem_cache_layout(bc) == CACHE_LAYOUT_INTERLEAVED ||
	       pmem_cache_layout(bc) == CACHE_LAYOUT_SEQUENTIAL);

	pmem_initialize_pmem_header_sizes(bc, cache_size_bytes);

	ASSERT(LM_NAME_SIZE == sizeof(bc->bc_name));
	ASSERT(sizeof(pm->lm_uuid) == 16);
	ASSERT(sizeof(pm->lm_device_uuid) == 16);

	generate_random_uuid(pm->lm_uuid);
	snprintf(pm->lm_name, LM_NAME_SIZE, "%s", bc->bc_name);

	generate_random_uuid(pm->lm_device_uuid);
	snprintf(pm->lm_device_name,
		 LM_NAME_SIZE, "%s", bc->bc_cached_device_name);

	printk_info("pm->lm_name=%s\n", pm->lm_name);
	printk_info("pm->lm_uuid=%pUb\n", pm->lm_uuid);
	printk_info("pm->lm_device_name=%s\n", pm->lm_device_name);
	printk_info("pm->lm_device_uuid=%pUb\n", pm->lm_device_uuid);
	printk_info("pm->lm_cache_size_bytes=%llu\n",
		    pm->lm_cache_size_bytes);

	pm->lm_xid_first = 1ULL;
	pm->lm_xid_current = 1ULL;

	__pmem_assert_offsets(bc);

	/*
	 * initialize mem copy #0
	 */
	pm->lm_xid_current++;
	pm->lm_hash = murmurhash3_128(pm, PMEM_HEADER_HASHING_SIZE);
	ASSERT(sizeof(struct pmem_header) <= PAGE_SIZE);
	ret = pmem_write_sync(bc,
			      CACHE_MEM_HEADER_0_OFFSET_BYTES,
			      pm,
			      sizeof(struct pmem_header));
	/*TODO_ADD_ERROR_INJECTION*/
	if (ret != 0) {
		ASSERT(ret < 0);
		BT_DEV_TRACE(BT_LEVEL_ERROR, bc, NULL, NULL, NULL, NULL,
			     "pmem_write_sync header0 failed, ret=%d",
			     ret);
		printk_err("%s: pmem_write_sync header0 failed, ret=%d\n",
			   bc->bc_name,
			   ret);
		return ret;
	}

	/*
	 * initialize mem copy #1
	 */
	pm->lm_xid_current++;
	pm->lm_hash = murmurhash3_128(pm, PMEM_HEADER_HASHING_SIZE);
	ASSERT(sizeof(struct pmem_header) <= PAGE_SIZE);
	ret = pmem_write_sync(bc,
			      CACHE_MEM_HEADER_1_OFFSET_BYTES,
			      pm,
			      sizeof(struct pmem_header));
	/*TODO_ADD_ERROR_INJECTION*/
	if (ret != 0) {
		ASSERT(ret < 0);
		BT_DEV_TRACE(BT_LEVEL_ERROR, bc, NULL, NULL, NULL, NULL,
			     "pmem_write_sync header0 failed, ret=%d",
			     ret);
		printk_err("%s: pmem_write_sync header0 failed, ret=%d\n",
			   bc->bc_name,
			   ret);
		return ret;
	}

	/*
	 * also initialize xid and bc_buffer_entries
	 */
	cache_xid_set(bc, pm->lm_xid_current + 1);

	printk_info("cache_blocks=%llu\n", pm->lm_cache_blocks);

	return 0;
}
Exemplo n.º 8
0
/*
 * return values:
 * - negative errno values for unrecoverable data corruption.
 * - 1 for successful restore.
 * - 0 for no restore (crash occurred in the middle of a transaction).
 */
int pmem_block_restore(struct bittern_cache *bc,
		       struct cache_block *cache_block)
{
	struct pmem_block_metadata *pmbm;
	uint128_t hash_metadata, hash_data;
	int ret;
	void *buffer_vaddr;
	struct page *buffer_page;
	struct pmem_api *pa = &bc->bc_papi;
	int block_id;

	ASSERT(bc != NULL);
	ASSERT(pa->papi_bdev_size_bytes > 0);
	ASSERT(pa->papi_bdev != NULL);
	ASSERT(sizeof(struct pmem_header) == PAGE_SIZE);

	block_id = cache_block->bcb_block_id;

	ASSERT(pa->papi_hdr.lm_cache_blocks != 0);
	ASSERT(block_id >= 1 && block_id <= pa->papi_hdr.lm_cache_blocks);
	ASSERT(cache_block != NULL);
	ASSERT(cache_block->bcb_block_id == block_id);

	pmbm = kmem_alloc(sizeof(struct pmem_block_metadata), GFP_NOIO);
	/*TODO_ADD_ERROR_INJECTION*/
	if (pmbm == NULL) {
		BT_DEV_TRACE(BT_LEVEL_ERROR, bc, NULL, cache_block, NULL, NULL,
			     "kmem_alloc pmem_block_metadata failed");
		printk_err("%s: kmem_alloc pmem_block_metadata failed\n",
			   bc->bc_name);
		return -ENOMEM;
	}

	ret = pmem_read_sync(bc,
			__cache_block_id_2_metadata_pmem_offset(bc, block_id),
			pmbm,
			sizeof(struct pmem_block_metadata));
	/*TODO_ADD_ERROR_INJECTION*/
	if (ret != 0) {
		ASSERT(ret < 0);
		BT_DEV_TRACE(BT_LEVEL_ERROR, bc, NULL, NULL, NULL, NULL,
			     "pmem_read_sync failed, ret=%d",
			     ret);
		printk_err("%s: pmem_read_sync failed, ret=%d\n",
			   bc->bc_name,
			   ret);
		kmem_free(pmbm, sizeof(struct pmem_block_metadata));
		return ret;
	}

	/*
	 * this can only happen if pmem is corrupt
	 */
	if (pmbm->pmbm_magic != MCBM_MAGIC) {
		pa->papi_stats.restore_corrupt_metadata_blocks++;
		printk_err("block id #%u: error: magic number(s) mismatch, magic=0x%x/0x%x\n",
			   block_id,
			   pmbm->pmbm_magic,
			   MCBM_MAGIC);
		kmem_free(pmbm, sizeof(struct pmem_block_metadata));
		return -EHWPOISON;
	}

	hash_metadata = murmurhash3_128(pmbm, PMEM_BLOCK_METADATA_HASHING_SIZE);

	if (uint128_ne(hash_metadata, pmbm->pmbm_hash_metadata)) {
		printk_err("block id #%u: metadata hash mismatch: stored_hash_metadata=" UINT128_FMT ", computed_hash_metadata" UINT128_FMT "\n",
			   block_id,
			   UINT128_ARG(pmbm->pmbm_hash_metadata),
			   UINT128_ARG(hash_metadata));
		pa->papi_stats.restore_hash_corrupt_metadata_blocks++;
		kmem_free(pmbm, sizeof(struct pmem_block_metadata));
		return -EHWPOISON;
	}

	if (CACHE_STATE_VALID(pmbm->pmbm_status)) {
		printk_info_ratelimited("block id #%u: metadata cache status valid %u(%s)\n",
					block_id,
					pmbm->pmbm_status,
					cache_state_to_str(pmbm->pmbm_status));
	} else {
		/*
		 * this can only happen if pmem is corrupt
		 */
		pa->papi_stats.restore_corrupt_metadata_blocks++;
		printk_err("block id #%u: error: metadata cache status invalid %u(%s)\n",
			   block_id,
			   pmbm->pmbm_status,
		     cache_state_to_str(pmbm->pmbm_status));
		kmem_free(pmbm, sizeof(struct pmem_block_metadata));
		return -EHWPOISON;
	}

	if (pmbm->pmbm_status == S_INVALID) {
		printk_info_ratelimited("block id #%u: warning: metadata cache status is %u(%s), nothing to restore\n",
					block_id,
					pmbm->pmbm_status,
					cache_state_to_str(pmbm->pmbm_status));
		pa->papi_stats.restore_invalid_metadata_blocks++;
		pa->papi_stats.restore_invalid_data_blocks++;
		kmem_free(pmbm, sizeof(struct pmem_block_metadata));
		/*
		 * restore ok
		 */
		return 1;
	}

	if (pmbm->pmbm_status != S_CLEAN && pmbm->pmbm_status != S_DIRTY) {
		printk_info_ratelimited("block id #%u: warning: metadata cache status is %u(%s) (transaction in progress), nothing to restore\n",
					block_id,
					pmbm->pmbm_status,
					cache_state_to_str(pmbm->pmbm_status));
		pa->papi_stats.restore_pending_metadata_blocks++;
		kmem_free(pmbm, sizeof(struct pmem_block_metadata));
		/*
		 * Intermediate state (crashed during a transaction).
		 * Caller will ignore this restore and reinitialize.
		 */
		return 0;
	}

	if (pmbm->pmbm_status == S_CLEAN) {
		pa->papi_stats.restore_valid_clean_metadata_blocks++;
	} else {
		ASSERT(pmbm->pmbm_status == S_DIRTY);
		pa->papi_stats.restore_valid_dirty_metadata_blocks++;
	}

	/*
	 * if the metadata crc32c is ok, none of this should ever happen.
	 */
	ASSERT(block_id == pmbm->pmbm_block_id);
	ASSERT(is_sector_cache_aligned(pmbm->pmbm_device_sector));

	buffer_vaddr = kmem_cache_alloc(bc->bc_kmem_map, GFP_NOIO);
	/*TODO_ADD_ERROR_INJECTION*/
	if (buffer_vaddr == NULL) {
		BT_DEV_TRACE(BT_LEVEL_ERROR, bc, NULL, cache_block, NULL, NULL,
			     "kmem_alloc kmem_map failed");
		printk_err("%s: kmem_alloc kmem_map failed\n", bc->bc_name);
		kmem_free(pmbm, sizeof(struct pmem_block_metadata));
		return -ENOMEM;
	}

	ASSERT(PAGE_ALIGNED(buffer_vaddr));
	buffer_page = virtual_to_page(buffer_vaddr);
	M_ASSERT(buffer_page != NULL);

	ret = pmem_read_sync(bc,
			     __cache_block_id_2_data_pmem_offset(bc, block_id),
			     buffer_vaddr,
			     PAGE_SIZE);
	/*TODO_ADD_ERROR_INJECTION*/
	if (ret != 0) {
		ASSERT(ret < 0);
		BT_DEV_TRACE(BT_LEVEL_ERROR, bc, NULL, NULL, NULL, NULL,
			     "pmem_read_sync failed, ret=%d",
			     ret);
		printk_err("%s: pmem_read_sync failed, ret=%d\n",
			   bc->bc_name,
			   ret);
		kmem_cache_free(bc->bc_kmem_map, buffer_vaddr);
		kmem_free(pmbm, sizeof(struct pmem_block_metadata));
		return ret;
	}

	hash_data = murmurhash3_128(buffer_vaddr, PAGE_SIZE);

	ASSERT(PAGE_ALIGNED(buffer_vaddr));
	ASSERT(buffer_page != NULL);
	ASSERT(buffer_page == virtual_to_page(buffer_vaddr));

	kmem_cache_free(bc->bc_kmem_map, buffer_vaddr);

	if (uint128_ne(hash_data, pmbm->pmbm_hash_data)) {
		printk_err("block id #%u: data hash mismatch: stored_hash_data=" UINT128_FMT ", computed_hash_data" UINT128_FMT "\n",
			   block_id,
			   UINT128_ARG(pmbm->pmbm_hash_data),
			   UINT128_ARG(hash_data));
		pa->papi_stats.restore_hash_corrupt_data_blocks++;
		kmem_free(pmbm, sizeof(struct pmem_block_metadata));
		return -EHWPOISON;
	}

	if (pmbm->pmbm_status == S_CLEAN) {
		pa->papi_stats.restore_valid_clean_data_blocks++;
	} else {
		ASSERT(pmbm->pmbm_status == S_DIRTY);
		pa->papi_stats.restore_valid_dirty_data_blocks++;
	}

	/*
	 * every checks out, restore metadata info into cache_block descriptor
	 */
	cache_block->bcb_sector = pmbm->pmbm_device_sector;
	cache_block->bcb_state = pmbm->pmbm_status;
	cache_block->bcb_xid = pmbm->pmbm_xid;
	cache_block->bcb_hash_data = pmbm->pmbm_hash_data;
	ASSERT(cache_block->bcb_state == S_CLEAN ||
	       cache_block->bcb_state == S_DIRTY);
	ASSERT(cache_block->bcb_sector != -1);
	ASSERT(is_sector_number_valid(cache_block->bcb_sector));
	ASSERT(cache_block->bcb_sector >= 0);

	printk_info_ratelimited("block id #%u: status=%u(%s), xid=%llu, sector=%llu, hash_metadata=" UINT128_FMT ", hash_data=" UINT128_FMT ": restore ok\n",
				pmbm->pmbm_block_id,
				pmbm->pmbm_status,
				cache_state_to_str(pmbm->pmbm_status),
				pmbm->pmbm_xid,
				pmbm->pmbm_device_sector,
				UINT128_ARG(pmbm->pmbm_hash_metadata),
				UINT128_ARG(pmbm->pmbm_hash_data));

	kmem_free(pmbm, sizeof(struct pmem_block_metadata));

	return 1;
}
Exemplo n.º 9
0
int pmem_header_restore(struct bittern_cache *bc)
{
	uint64_t hdr_0_xid, hdr_1_xid;
	int ret;
	struct pmem_api *pa = &bc->bc_papi;
	struct pmem_header *pm = &pa->papi_hdr;

	ASSERT(bc != NULL);
	ASSERT(sizeof(struct pmem_header) == PAGE_SIZE);
	ASSERT(bc != NULL);
	ASSERT(pa->papi_bdev_size_bytes > 0);
	ASSERT(pa->papi_bdev != NULL);
	ASSERT(pmem_cache_layout(bc) == CACHE_LAYOUT_INTERLEAVED ||
	       pmem_cache_layout(bc) == CACHE_LAYOUT_SEQUENTIAL);

	/*
	 * try restore from header #0
	 */
	ret = __pmem_header_restore(bc, 0, &hdr_0_xid);
	printk_info("[0]: ret=%d, hdr_0_xid=%llu\n", ret, hdr_0_xid);
	if (ret == 0)
		pa->papi_stats.restore_header0_valid = 1;

	/*
	 * try restore from header #1
	 */
	ret = __pmem_header_restore(bc, 1, &hdr_1_xid);
	printk_info("[1]: ret=%d, hdr_1_xid=%llu\n", ret, hdr_1_xid);
	if (ret == 0)
		pa->papi_stats.restore_header1_valid = 1;

	/*
	 * use the header with the highest xid
	 */
	if (pa->papi_stats.restore_header0_valid == 0 &&
	    pa->papi_stats.restore_header1_valid == 0) {
		printk_err("error: both headers invalid, ret=%d\n", ret);
		M_ASSERT(ret < 0);
		return ret;
	}

	printk_info("hdr_0_xid=%llu\n", hdr_0_xid);
	printk_info("hdr_1_xid=%llu\n", hdr_1_xid);

	if (pa->papi_stats.restore_header1_valid == 0) {
		/* only header0 valid */
		printk_info("[0/1]: using hdr_0_xid %llu\n", hdr_0_xid);
		ret = __pmem_header_restore(bc, 0, &hdr_0_xid);
		printk_info("[0/1]: using hdr_0_xid %llu\n", hdr_0_xid);
		cache_xid_set(bc, hdr_0_xid + 1);
	} else if (pa->papi_stats.restore_header0_valid == 0) {
		/* only header0 valid */
		printk_info("[1/0]: using hdr_1_xid %llu\n", hdr_1_xid);
		ret = __pmem_header_restore(bc, 1, &hdr_1_xid);
		printk_info("[1/0]: using hdr_1_xid %llu\n", hdr_1_xid);
		cache_xid_set(bc, hdr_1_xid + 1);
	} else if (hdr_0_xid > hdr_1_xid) {
		/* both headers valid, use header0 as it has highest xid */
		printk_info("[1/1]: using hdr_0_xid=%llu\n", hdr_0_xid);
		ret = __pmem_header_restore(bc, 0, &hdr_0_xid);
		printk_info("[1/1]: using hdr_0_xid=%llu\n", hdr_0_xid);
		cache_xid_set(bc, hdr_0_xid + 1);
	} else {
		/* both headers valid, use header1 as it's highest or equal */
		printk_info("[1/1]: using hdr_1_xid=%llu\n", hdr_1_xid);
		ret = __pmem_header_restore(bc, 1, &hdr_1_xid);
		printk_info("[1/1]: using hdr_1_xid=%llu\n", hdr_1_xid);
		cache_xid_set(bc, hdr_1_xid + 1);
	}

	/*TODO_ADD_ERROR_INJECTION*/
	if (ret != 0) {
		printk_err("error: header re-read failed, ret=%d\n", ret);
		return ret;
	}
	M_ASSERT(pa->papi_stats.restore_header0_valid == 1 ||
		pa->papi_stats.restore_header1_valid == 1);

	cache_xid_inc(bc);

	printk_info("bc->bc_xid=%lu\n", atomic64_read(&bc->bc_xid));
	printk_info("pm->lm_cache_layout='%c'(0x%x)\n",
		    pm->lm_cache_layout,
		    pm->lm_cache_layout);
	printk_info("pm->lm_cache_block_size=%llu\n", pm->lm_cache_block_size);
	printk_info("pm->lm_xid_current=%llu\n", pm->lm_xid_current);

	printk_info("bc->bc_name=%s\n", bc->bc_name);
	printk_info("bc->bc_cache_device_name=%s\n", bc->bc_cache_device_name);
	printk_info("bc->bc_cached_device_name=%s\n",
		    bc->bc_cached_device_name);
	printk_info("pm->lm_name=%s\n", pm->lm_name);
	printk_info("pm->lm_uuid=%pUb\n", pm->lm_uuid);
	printk_info("pm->lm_device_name=%s\n", pm->lm_device_name);
	printk_info("pm->lm_device_uuid=%pUb\n", pm->lm_device_uuid);
	printk_info("pm->lm_cache_size_bytes=%llu\n", pm->lm_cache_size_bytes);
	printk_info("pm->lm_mcb_size_bytes=%llu\n", pm->lm_mcb_size_bytes);

	/*TODO_ADD_ERROR_INJECTION*/
	if (pm->lm_header_size_bytes != sizeof(struct pmem_header)) {
		printk_err("lm_header_size mismatch %u/%lu\n",
			   pm->lm_header_size_bytes,
			   sizeof(struct pmem_header));
		return -EBADMSG;
	}

	if (pm->lm_cache_block_size != PAGE_SIZE) {
		printk_err("lm_header_cache_block_size mismatch %llu/%lu\n",
			   pm->lm_cache_block_size,
			   PAGE_SIZE);
		return -EBADMSG;
	}

	if (pm->lm_cache_layout != pmem_cache_layout(bc)) {
		printk_err("lm_cache_layout mismatch 0x%x/0x%x\n",
			   pm->lm_cache_layout,
			   pmem_cache_layout(bc));
		return -EBADMSG;
	}

	if (pm->lm_mcb_size_bytes != sizeof(struct pmem_block_metadata) &&
	    pm->lm_mcb_size_bytes != PAGE_SIZE) {
		printk_err("lm_mcb_size mismatch %llu:%lu/%lu\n",
			   pm->lm_mcb_size_bytes,
			   sizeof(struct pmem_header), PAGE_SIZE);
		return -EBADMSG;
	}

	if (pmem_page_size_transfer_only(bc)) {
		if (pm->lm_mcb_size_bytes != PAGE_SIZE) {
			printk_err("lm_mcb_size is %llu, provider only hass PAGE_SIZE transfers\n",
				   pm->lm_mcb_size_bytes);
			return -EINVAL;
		}
	} else {
		if (pm->lm_mcb_size_bytes !=
		    sizeof(struct pmem_block_metadata)) {
			printk_err("lm_mcb_size %llu does not match struct\n",
				   pm->lm_mcb_size_bytes);
			return -EINVAL;
		}
	}

	if (pm->lm_first_offset_bytes != CACHE_MEM_FIRST_OFFSET_BYTES) {
		printk_err("lm_first_offset_bytes mismatch %llu/%lu\n",
			   pm->lm_first_offset_bytes,
			   CACHE_MEM_FIRST_OFFSET_BYTES);
		return -EBADMSG;
	}

	if (pm->lm_cache_layout == CACHE_LAYOUT_SEQUENTIAL) {
		uint64_t m = pm->lm_first_offset_bytes;

		m += pm->lm_cache_blocks * pm->lm_mcb_size_bytes;
		m = round_up(m, PAGE_SIZE);
		if (m != pm->lm_first_data_block_offset_bytes) {
			printk_err("first_data_block_offset mismatch %llu\n",
				   pm->lm_first_data_block_offset_bytes);
			return -EBADMSG;
		}
		m += pm->lm_cache_blocks * PAGE_SIZE;
		if (m > pm->lm_cache_size_bytes) {
			printk_err("last offset exceeds cache size %llu/%llu\n",
				   m,
				   pm->lm_cache_size_bytes);
			return -EBADMSG;
		}
	} else {
		uint64_t m = pm->lm_first_offset_bytes;

		m += pm->lm_cache_blocks * (PAGE_SIZE * 2);
		ASSERT(pm->lm_cache_layout == 'I');
		if (pm->lm_first_data_block_offset_bytes !=
						CACHE_MEM_FIRST_OFFSET_BYTES) {
			printk_err("first_data_block_offset mismatch %llu\n",
				   pm->lm_first_data_block_offset_bytes);
			return -EBADMSG;
		}
		if (m > pm->lm_cache_size_bytes) {
			printk_err("last offset exceeds cache size %llu/%llu\n",
				   m,
				   pm->lm_cache_size_bytes);
			return -EBADMSG;
		}
	}

	ASSERT(pa->papi_bdev_size_bytes > 0);
	ASSERT(pa->papi_bdev != NULL);
	if (pm->lm_cache_size_bytes < pa->papi_bdev_size_bytes) {
		printk_warning("size %llu less than allocated size %lu\n",
				pm->lm_cache_size_bytes,
				pa->papi_bdev_size_bytes);
		printk_warning("size %llumb less than allocated size %llumb\n",
				pm->lm_cache_size_bytes / 1024ULL / 1024ULL,
				pa->papi_bdev_size_bytes / 1024ULL / 1024ULL);
	}
	if (pm->lm_cache_size_bytes > pa->papi_bdev_size_bytes) {
		printk_err("device size %llu exceeds allocated size %lu\n",
				pm->lm_cache_size_bytes,
				pa->papi_bdev_size_bytes);
		printk_err("device size %llumb exceeds allocated size %llumb\n",
				pm->lm_cache_size_bytes / 1024ULL / 1024ULL,
				pa->papi_bdev_size_bytes / 1024ULL / 1024ULL);
		return -EBADMSG;
	}
	if (pm->lm_cache_size_bytes == pa->papi_bdev_size_bytes) {
		printk_info("device size %llu equals allocated size %lu\n",
				pm->lm_cache_size_bytes,
				pa->papi_bdev_size_bytes);
		printk_info("device size %llumb equals allocated size %llumb\n",
				pm->lm_cache_size_bytes / 1024ULL / 1024ULL,
				pa->papi_bdev_size_bytes / 1024ULL / 1024ULL);
	}

	__pmem_assert_offsets(bc);

	printk_info("cache '%s' on '%s' restore ok, %llu cache blocks\n",
			pm->lm_name,
			pm->lm_device_name,
			pm->lm_cache_blocks);

	pa->papi_stats.restore_header_valid = 1;

	return 0;
}
Exemplo n.º 10
0
int __pmem_header_restore(struct bittern_cache *bc,
			  int header_block_number,
			  uint64_t *out_xid)
{
	uint32_t header_block_offset_bytes;
	uint128_t computed_hash;
	int ret;
	struct pmem_api *pa = &bc->bc_papi;
	struct pmem_header *pm = &pa->papi_hdr;

	M_ASSERT(bc != NULL);
	M_ASSERT(sizeof(struct pmem_header) == PAGE_SIZE);
	M_ASSERT(header_block_number == 0 || header_block_number == 1);
	M_ASSERT(bc != NULL);
	ASSERT(pa->papi_bdev_size_bytes > 0);
	ASSERT(pa->papi_bdev != NULL);
	ASSERT(pmem_cache_layout(bc) == CACHE_LAYOUT_INTERLEAVED ||
	       pmem_cache_layout(bc) == CACHE_LAYOUT_SEQUENTIAL);

	header_block_offset_bytes = header_block_number == 0 ?
	    CACHE_MEM_HEADER_0_OFFSET_BYTES :
	    CACHE_MEM_HEADER_1_OFFSET_BYTES;
	printk_info("[%d]: header_block_offset_bytes=%u\n",
		    header_block_number, header_block_offset_bytes);

	/*
	 * load requested block
	 */
	M_ASSERT(sizeof(struct pmem_header) <= PAGE_SIZE);
	ret = pmem_read_sync(bc,
			     header_block_offset_bytes,
			     pm,
			     sizeof(struct pmem_header));
	/*TODO_ADD_ERROR_INJECTION*/
	if (ret != 0) {
		ASSERT(ret < 0);
		BT_DEV_TRACE(BT_LEVEL_ERROR, bc, NULL, NULL, NULL, NULL,
			     "pmem_read_sync failed, ret=%d",
			     ret);
		printk_err("%s: pmem_read_sync failed, ret=%d\n",
			   bc->bc_name,
			   ret);
		return ret;
	}

	/*TODO_ADD_ERROR_INJECTION*/
	if (pm->lm_magic != LM_MAGIC) {
		printk_err("[%d]: magic number invalid (0x%x/0x%x)\n",
			   header_block_number,
			   pm->lm_magic,
			   LM_MAGIC);
		return -EBADMSG;
	}
	/*TODO_ADD_ERROR_INJECTION*/
	if (pm->lm_version != LM_VERSION) {
		printk_err("[%d]: error: version number is incorrect %d/%d\n",
			   header_block_number, pm->lm_version, LM_VERSION);
		return -EBADMSG;
	}

	computed_hash = murmurhash3_128(pm, PMEM_HEADER_HASHING_SIZE);
	/*TODO_ADD_ERROR_INJECTION*/
	if (uint128_ne(computed_hash, pm->lm_hash)) {
		printk_err("[%d]: hash mismatch: stored_hash=" UINT128_FMT ", computed_hash" UINT128_FMT "\n",
			    header_block_number,
			    UINT128_ARG(pm->lm_hash),
			    UINT128_ARG(computed_hash));
		return -EBADMSG;
	}
	printk_info("[%d]: stored_hash=" UINT128_FMT ", computed_hash" UINT128_FMT "\n",
		    header_block_number,
		    UINT128_ARG(pm->lm_hash),
		    UINT128_ARG(computed_hash));

	printk_info("[%d]: restore: xid_first=%llu, xid_current=%llu: %llu\n",
		    header_block_number,
		    pm->lm_xid_first,
		    pm->lm_xid_current, cache_xid_get(bc));

	*out_xid = pm->lm_xid_current;

	return 0;
}
Exemplo n.º 11
0
int pmem_header_update(struct bittern_cache *bc, int update_both)
{
	int ret;
	struct pmem_api *pa = &bc->bc_papi;

	ASSERT(bc != NULL);
	ASSERT_BITTERN_CACHE(bc);
	ASSERT(pa->papi_bdev_size_bytes > 0);
	ASSERT(pa->papi_bdev != NULL);
	ASSERT(sizeof(struct pmem_header) == PAGE_SIZE);

	M_ASSERT(pa->papi_hdr.lm_xid_current <= cache_xid_get(bc));

	if (pa->papi_hdr.lm_xid_current == cache_xid_get(bc))
		return 0;

	pa->papi_hdr.lm_xid_current = cache_xid_get(bc);

	if (pa->papi_hdr_updated_last == 1 || update_both) {
		/*
		 * update mem copy #0
		 */
		pa->papi_hdr.lm_hash = murmurhash3_128(&pa->papi_hdr,
						PMEM_HEADER_HASHING_SIZE);
		ret = pmem_write_sync(bc,
				      CACHE_MEM_HEADER_0_OFFSET_BYTES,
				      &pa->papi_hdr,
				      sizeof(struct pmem_header));
		/*TODO_ADD_ERROR_INJECTION*/
		if (ret != 0) {
			ASSERT(ret < 0);
			BT_DEV_TRACE(BT_LEVEL_ERROR, bc, NULL, NULL, NULL, NULL,
				     "pmem_write_sync header0 failed, ret=%d",
				     ret);
			printk_err("%s: pmem_write_sync header0 failed, ret=%d\n",
				   bc->bc_name,
				   ret);
			return ret;
		}
	}
	if (pa->papi_hdr_updated_last == 0 || update_both) {
		/*
		 * update mem copy #1
		 */
		pa->papi_hdr.lm_hash = murmurhash3_128(&pa->papi_hdr,
						PMEM_HEADER_HASHING_SIZE);
		ret = pmem_write_sync(bc,
				      CACHE_MEM_HEADER_1_OFFSET_BYTES,
				      &pa->papi_hdr,
				      sizeof(struct pmem_header));
		/*TODO_ADD_ERROR_INJECTION*/
		if (ret != 0) {
			ASSERT(ret < 0);
			BT_DEV_TRACE(BT_LEVEL_ERROR, bc, NULL, NULL, NULL, NULL,
				     "pmem_write_sync header1 failed, ret=%d",
				     ret);
			printk_err("%s: pmem_write_sync header1 failed, ret=%d\n",
				   bc->bc_name,
				   ret);
			return ret;
		}
	}
	pa->papi_hdr_updated_last = (pa->papi_hdr_updated_last + 1) % 2;

	return 0;
}
Exemplo n.º 12
0
int
omx_xen_user_region_offset_cache_init(struct omx_xen_user_region *region,
				      struct omx_user_region_offset_cache
				      *cache, unsigned long offset,
				      unsigned long length)
{
	struct omx_xen_user_region_segment *seg;
	unsigned long segoff;
	int ret = 0;
	dprintk_in();

	if (unlikely
	    (!region->nr_segments || offset + length > region->total_length)) {
		ret = -EINVAL;
		printk_err("Invalid Offset\n");
		goto out;
	}

	dprintk(REG, "Cache -> XEN = 1\n");
	cache->xen = 1;
	cache->xregion = region;

	if (unlikely(region->nr_segments > 1)) {
		unsigned long tmp;
		printk(KERN_INFO
		       "It is highly unlikely to cross this code path\n");
		ret = -EINVAL;
		goto out;

		/* vectorial callbacks */
		cache->append_pages_to_skb =
		    omx_user_region_offset_cache_vect_append_callback;
		cache->copy_pages_to_buf =
		    omx_user_region_offset_cache_vect_copy_callback;
#ifdef OMX_HAVE_DMA_ENGINE
		cache->dma_memcpy_from_pg =
		    omx_user_region_offset_cache_dma_vect_memcpy_from_pg_callback;
		cache->dma_memcpy_from_buf =
		    omx_user_region_offset_cache_dma_vect_memcpy_from_buf_callback;
#endif

		/* find the segment */
		for (tmp = 0, seg =
		     (struct omx_xen_user_region_segment *)&region->segments[0];
		     tmp + seg->length <= offset; tmp += seg->length, seg++) ;

		/* find the segment offset */
		segoff = offset - tmp;

	} else {
		/* vectorial callbacks */
		cache->append_pages_to_skb =
		    omx_user_region_offset_cache_contig_append_callback;
		cache->copy_pages_to_buf =
		    omx_user_region_offset_cache_contig_copy_callback;
#ifdef OMX_HAVE_DMA_ENGINE
		cache->dma_memcpy_from_pg =
		    omx_user_region_offset_cache_dma_contig_memcpy_from_pg_callback;
		cache->dma_memcpy_from_buf =
		    omx_user_region_offset_cache_dma_contig_memcpy_from_buf_callback;
#endif

		/* use the first segment */
		seg =
		    (struct omx_xen_user_region_segment *)&region->segments[0];
		segoff = offset;
	}

	/* setup the segment and offset */
	cache->xseg = seg;
	cache->segoff = segoff;

	dprintk_deb("seg->pages@%#lx \n", (unsigned long)seg->pages);
	dprintk_deb("seg@%#lx, segoff = %#lx, first_page_offset=%#x\n",
		    (unsigned long)seg, segoff, seg->first_page_offset);
#ifdef EXTRA_DEBUG_OMX
	if (seg->first_page_offset > PAGE_SIZE) {
		printk_err("Something is really really wrong:S\n");
		ret = -EINVAL;
		goto out;
	}
#endif
#ifdef EXTRA_DEBUG_OMX
	if (seg->pages) {
#endif
		/* find the page and offset */
		cache->page =
		    &seg->
		    pages[(segoff + seg->first_page_offset) >> PAGE_SHIFT];
		cache->pageoff =
		    (segoff + seg->first_page_offset) & (~PAGE_MASK);

		dprintk_deb
		    ("initialized region offset cache to seg (@%#lx) #%ld offset %ld page (@%#lx) #%ld offset %d\n",
		     (unsigned long)(seg),
		     (unsigned long)(seg - &region->segments[0]), segoff,
		     (unsigned long)(cache->page),
		     (unsigned long)(cache->page - &seg->pages[0]),
		     cache->pageoff);
#ifdef EXTRA_DEBUG_OMX
	} else {
Exemplo n.º 13
0
int omx_xen_deregister_user_segment(omx_xenif_t * omx_xenif, uint32_t id,
				    uint32_t sid, uint8_t eid)
{
	struct gnttab_unmap_grant_ref ops;
	struct backend_info *be = omx_xenif->be;
	struct omxback_dev *dev = be->omxdev;
	struct omx_endpoint *endpoint = dev->endpoints[eid];
	struct omx_xen_user_region *region;
	struct omx_xen_user_region_segment *seg;
	int i, k, ret = 0;
	unsigned int level;

	dprintk_in();

	TIMER_START(&t_dereg_seg);
	if (eid < 0 && eid >= 255) {
		printk_err
		    ("Wrong endpoint number (%u) check your frontend/backend communication!\n",
		     eid);
		ret = -EINVAL;
		goto out;
	}

	region = rcu_dereference_protected(endpoint->xen_regions[id], 1);
	if (unlikely(!region)) {
		printk_err(
		       "%s: Cannot access non-existing region %d\n", __func__, id);
		//ret = -EINVAL;
		goto out;
	}
	seg = &region->segments[sid];


	TIMER_START(&t_release_grants);
	if (!seg->unmap) {
		printk_err("seg->unmap is NULL\n");
		ret = -EINVAL;
		goto out;
	}
	gnttab_unmap_refs(seg->unmap, NULL, seg->pages, seg->nr_pages);
	TIMER_STOP(&t_release_grants);

	TIMER_START(&t_release_gref_list);
	for (k = 0; k < seg->nr_parts; k++) {
#ifdef EXTRA_DEBUG_OMX
		if (!seg->vm_gref) {
			printk(KERN_ERR "vm_gref is NULL\n");
			ret = -EFAULT;
			goto out;
		}
		if (!seg->vm_gref[k]) {
			printk(KERN_ERR "vm_gref[%d] is NULL\n", k);
			ret = -EFAULT;
			goto out;
		}
		if (!seg->vm_gref[k]->addr) {
			printk(KERN_ERR "vm_gref[%d]->addr is NULL\n", k);
			ret = -EFAULT;
			goto out;
		}
		if (!seg->all_handle[k]) {
			printk(KERN_ERR "all_handle[%d] is NULL\n", k);
			ret = -EINVAL;
			goto out;
		}
#endif
		gnttab_set_unmap_op(&ops, (unsigned long)seg->vm_gref[k]->addr,
				    GNTMAP_host_map | GNTMAP_contains_pte,
				    seg->all_handle[k]);
		ops.host_addr =
		    arbitrary_virt_to_machine(lookup_address
					      ((unsigned long)(seg->vm_gref[k]->
							       addr),
					       &level)).maddr;

		dprintk_deb("putting vm_area[%d] %#lx, handle = %#x \n", k,
			    (unsigned long)seg->vm_gref[k], seg->all_handle[k]);
		if (HYPERVISOR_grant_table_op
		    (GNTTABOP_unmap_grant_ref, &ops, 1)){
			printk_err
				("HYPERVISOR operation failed\n");
			//BUG();
		}
		if (ops.status) {
			printk_err
				("HYPERVISOR unmap grant ref[%d]=%#lx failed status = %d",
				 k, seg->all_handle[k], ops.status);
			ret = ops.status;
			goto out;
		}
	}
	TIMER_STOP(&t_release_gref_list);

	TIMER_START(&t_free_pages);
	for (k=0;k<seg->nr_parts;k++)
		if (ops.status == GNTST_okay)
			free_vm_area(seg->vm_gref[k]);

	kfree(seg->map);
	kfree(seg->unmap);
	kfree(seg->gref_list);
#ifdef OMX_XEN_COOKIES
	omx_xen_page_put_cookie(omx_xenif, seg->cookie);
#else
	free_xenballooned_pages(seg->nr_pages, seg->pages);
	kfree(seg->pages);
#endif
	TIMER_STOP(&t_free_pages);

out:
	TIMER_STOP(&t_dereg_seg);
	dprintk_out();
	return ret;

}
Exemplo n.º 14
0
static int omx_xen_accept_gref_list(omx_xenif_t * omx_xenif,
				    struct omx_xen_user_region_segment *seg,
				    uint32_t gref, void **vaddr, uint8_t part)
{
	int ret = 0;
	struct backend_info *be = omx_xenif->be;
	struct vm_struct *area;
	pte_t *pte;
	struct gnttab_map_grant_ref ops = {
		.flags = GNTMAP_host_map | GNTMAP_contains_pte,
		//.flags = GNTMAP_host_map,
		.ref = gref,
		.dom = be->remoteDomain,
	};

	dprintk_in();

	area = alloc_vm_area(PAGE_SIZE, &pte);
	if (!area) {
		ret = -ENOMEM;
		goto out;
	}

	seg->vm_gref[part] = area;

	ops.host_addr = arbitrary_virt_to_machine(pte).maddr;

	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1)) {
		printk_err("HYPERVISOR map grant ref failed");
		ret = -ENOSYS;
		goto out;
	}
	dprintk_deb("addr=%#lx, mfn=%#lx, kaddr=%#lx\n",
		    (unsigned long)area->addr, ops.dev_bus_addr >> PAGE_SHIFT,
		    ops.host_addr);
	if (ops.status) {
		printk_err("HYPERVISOR map grant ref failed status = %d",
			   ops.status);

		ret = ops.status;
		goto out;
	}

	dprintk_deb("gref_offset = %#x\n", seg->gref_offset);
	*vaddr = (area->addr + seg->gref_offset);

	ret = ops.handle;
#if 0
	for (i = 0; i < (size + 2); i++) {
		dprintk_deb("gref_list[%d] = %u\n", i,
			    *(((uint32_t *) * vaddr) + i));
	}
#endif

	seg->all_handle[part] = ops.handle;
	dprintk_deb("vaddr = %p, area->addr=%p, handle[%d]=%d\n", vaddr,
		    area->addr, part, seg->all_handle[part]);

out:
	dprintk_out();
	return ret;
}

int omx_xen_register_user_segment(omx_xenif_t * omx_xenif,
				  struct omx_ring_msg_register_user_segment *req)
{

	struct backend_info *be = omx_xenif->be;
	void *vaddr = NULL;
	uint32_t **gref_list;
	struct page **page_list;
	struct omxback_dev *omxdev = be->omxdev;
	struct omx_endpoint *endpoint;
	struct omx_xen_user_region *region;
	struct omx_xen_user_region_segment *seg;
	int ret = 0;
	int i = 0, k = 0;
	uint8_t eid, nr_parts;
	uint16_t first_page_offset, gref_offset;
	uint32_t sid, id, nr_grefs, nr_pages, length,
	    gref[OMX_XEN_GRANT_PAGES_MAX];
	uint64_t domU_vaddr;
	int idx = 0, sidx = 0;
	struct gnttab_map_grant_ref *map;
	struct gnttab_unmap_grant_ref *unmap;

	dprintk_in();

	TIMER_START(&t_reg_seg);
	sid = req->sid;
	id = req->rid;
	eid = req->eid;
	domU_vaddr = req->aligned_vaddr;
	nr_grefs = req->nr_grefs;
	nr_pages = req->nr_pages;
	nr_parts = req->nr_parts;
	length = req->length;
	dprintk_deb("nr_parts = %#x\n", nr_parts);
	for (k = 0; k < nr_parts; k++) {
		gref[k] = req->gref[k];
		dprintk_deb("printing gref = %lu\n", gref[k]);
	}
	gref_offset = req->gref_offset;
	first_page_offset = req->first_page_offset;
	endpoint = omxdev->endpoints[eid];

	region = rcu_dereference_protected(endpoint->xen_regions[id], 1);
	if (unlikely(!region)) {
		printk_err(KERN_ERR "Cannot access non-existing region %d\n",
			   id);
		ret = -EINVAL;
		goto out;
	}
	dprintk_deb("Got region @%#lx id=%u\n", (unsigned long)region, id);

	seg = &region->segments[sid];
	if (unlikely(!seg)) {
		printk(KERN_ERR "Cannot access non-existing segment %d\n", sid);
		ret = -EINVAL;
		goto out;
	}
	dprintk_deb("Got segment @%#lx id=%u\n", (unsigned long)seg, sid);

	seg->gref_offset = gref_offset;
	dprintk_deb
	    ("Offset of actual list of grant references (in the frontend) = %#x\n",
	     gref_offset);

	for (k = 0; k < nr_parts; k++) {
		seg->all_gref[k] = gref[k];
		dprintk_deb("grant reference for list of grefs = %#x\n",
			    gref[k]);
	}
	seg->nr_parts = nr_parts;
	dprintk_deb("parts of gref list = %#x\n", nr_parts);

	TIMER_START(&t_alloc_pages);
	gref_list = kzalloc(sizeof(uint32_t *) * nr_parts, GFP_ATOMIC);
	if (!gref_list) {
		ret = -ENOMEM;
		printk_err("gref list is NULL, ENOMEM!!!\n");
		goto out;
	}

	map =
	    kzalloc(sizeof(struct gnttab_map_grant_ref) * nr_pages,
		    GFP_ATOMIC);
	if (!map) {
		ret = -ENOMEM;
		printk_err(" map is NULL, ENOMEM!!!\n");
		goto out;
	}
	unmap =
	    kzalloc(sizeof(struct gnttab_unmap_grant_ref) * nr_pages,
		    GFP_ATOMIC);
	if (!unmap) {
		ret = -ENOMEM;
		printk_err(" unmap is NULL, ENOMEM!!!\n");
		goto out;
	}

#ifdef OMX_XEN_COOKIES
	seg->cookie = omx_xen_page_get_cookie(omx_xenif, nr_pages);
	if (!seg->cookie) {
		printk_err("cannot get cookie\n");
		goto out;
	}
	page_list = seg->cookie->pages;
#else
	page_list = kzalloc(sizeof(struct page *) * nr_pages, GFP_ATOMIC);
	if (!page_list) {
		ret = -ENOMEM;
		printk_err(" page list is NULL, ENOMEM!!!\n");
		goto out;
	}

	ret = alloc_xenballooned_pages(nr_pages, page_list, false /* lowmem */);
	if (ret) {
		printk_err("cannot allocate xenballooned_pages\n");
		goto out;
	}
#endif
	TIMER_STOP(&t_alloc_pages);

	TIMER_START(&t_accept_gref_list);
	for (k = 0; k < nr_parts; k++) {
		ret =
		    omx_xen_accept_gref_list(omx_xenif, seg, gref[k], &vaddr,
					     k);
		if (ret < 0) {
			printk_err("Cannot accept gref list, = %d\n", ret);
			goto out;
		}

		gref_list[k] = (uint32_t *) vaddr;
		if (!gref_list) {
			printk_err("gref_list is NULL!!!, = %p\n", gref_list);
			ret = -ENOSYS;
			goto out;
		}
	}
	TIMER_STOP(&t_accept_gref_list);
	seg->gref_list = gref_list;

	seg->nr_pages = nr_pages;
	seg->first_page_offset = first_page_offset;

	i = 0;
	idx = 0;
	sidx = 0;
	seg->map = map;
	seg->unmap = unmap;
	while (i < nr_pages) {
		void *tmp_vaddr;
		unsigned long addr = (unsigned long)pfn_to_kaddr(page_to_pfn(page_list[i]));
		if (sidx % 256 == 0)
			dprintk_deb("gref_list[%d][%d] = %#x\n", idx, sidx,
				    gref_list[idx][sidx]);


		gnttab_set_map_op(&map[i], addr, GNTMAP_host_map,
				  gref_list[idx][sidx], be->remoteDomain);
		gnttab_set_unmap_op(&unmap[i], addr, GNTMAP_host_map, -1 /* handle */ );
		i++;
		if ((unlikely(i % nr_grefs == 0))) {
			idx++;
			sidx = 0;
		} else {
			sidx++;
		}
		//printk(KERN_INFO "idx=%d, i=%d, sidx=%d\n", idx, i, sidx);
	}
	TIMER_START(&t_accept_grants);
        ret = gnttab_map_refs(map, NULL, page_list, nr_pages);
        if (ret) {
		printk_err("Error mapping, ret= %d\n", ret);
                goto out;
	}
	TIMER_STOP(&t_accept_grants);

        for (i = 0; i < nr_pages; i++) {
                if (map[i].status) {
                        ret = -EINVAL;
			printk_err("idx %d, status =%d\n", i, map[i].status);
			goto out;
		}
                else {
                        //BUG_ON(map->map_ops[i].handle == -1);
                        unmap[i].handle = map[i].handle;
                        dprintk_deb("map handle=%d\n", map[i].handle);
                }
        }

	seg->pages = page_list;
	seg->nr_pages = nr_pages;
	seg->length = length;
	region->total_length += length;
	dprintk_deb("total_length = %#lx, nrpages=%lu, pages = %#lx\n",
		    region->total_length, seg->nr_pages,
		    (unsigned long)seg->pages);
	goto all_ok;
out:
	printk_err("error registering, try to debug MORE!!!!\n");

all_ok:
	TIMER_STOP(&t_reg_seg);
	dprintk_out();
	return ret;
}

int omx_xen_create_user_region(omx_xenif_t * omx_xenif, uint32_t id,
			       uint64_t vaddr, uint32_t nr_segments,
			       uint32_t nr_pages, uint32_t nr_grefs,
			       uint8_t eid)
{

	struct backend_info *be = omx_xenif->be;
	struct omxback_dev *omxdev = be->omxdev;
	struct omx_endpoint *endpoint = omxdev->endpoints[eid];
	struct omx_xen_user_region *region;
	int ret = 0;

	dprintk_in();
	TIMER_START(&t_create_reg);
	//udelay(1000);
	/* allocate the relevant region */
	region =
	    kzalloc(sizeof(struct omx_xen_user_region) +
		    nr_segments * sizeof(struct omx_xen_user_region_segment),
		    GFP_KERNEL);
	if (!region) {
		printk_err
		    ("No memory to allocate the region/segment buffers\n");
		ret = -ENOMEM;
		goto out;
	}

	/* init stuff needed :S */
	kref_init(&region->refcount);
	region->total_length = 0;
	region->nr_vmalloc_segments = 0;

	region->total_registered_length = 0;

	region->id = id;
	region->nr_segments = nr_segments;
	region->eid = eid;

	region->endpoint = endpoint;
	region->dirty = 0;

	if (unlikely(rcu_access_pointer(endpoint->xen_regions[id]) != NULL)) {
		printk(KERN_ERR "Cannot create busy region %d\n", id);
		ret = -EBUSY;
		goto out;
	}

	rcu_assign_pointer(endpoint->xen_regions[id], region);

out:
	TIMER_STOP(&t_create_reg);
	dprintk_out();
	return ret;
}

/* Various region/segment handler functions */

void
omx_xen_user_region_destroy_segments(struct omx_xen_user_region *region,
				     struct omx_endpoint *endpoint)
{
	int i;

	dprintk_in();
	if (!endpoint) {
		printk_err("endpoint is null!!\n");
		return;
	}
	for (i = 0; i < region->nr_segments; i++)
		omx_xen_deregister_user_segment(endpoint->be->omx_xenif,
						region->id, i,
						endpoint->endpoint_index);

	dprintk_out();
}
Exemplo n.º 15
0
static int omx_xenfront_probe(struct xenbus_device *dev,
			      const struct xenbus_device_id *id)
{
	struct omx_xenfront_info *fe;
	struct omx_xenif_sring *sring, *recv_sring;
	int err = 0;
	int i = 0;

	dprintk_in();

	dprintk_deb("Frontend Probe Fired!\n");
	fe = kzalloc(sizeof(*fe), GFP_KERNEL);
	dprintk_deb("fe info is @%#llx!\n", (unsigned long long)fe);
	if (!fe) {
		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
		err = -ENOMEM;
		goto out;
	}
	__omx_xen_frontend = fe;

	for (i = 0; i < OMX_XEN_MAX_ENDPOINTS; i++) {
		fe->endpoints[i] = NULL;
	}

        fe->requests = kzalloc(OMX_MAX_INFLIGHT_REQUESTS * sizeof(enum frontend_status), GFP_KERNEL);

        spin_lock_init(&fe->status_lock);

	fe->xbdev = dev;
	fe->connected = OMXIF_STATE_DISCONNECTED;

        init_waitqueue_head(&fe->wq);
        fe->msg_workq =
            create_singlethread_workqueue("ReQ_FE");
        if (unlikely(!fe->msg_workq)) {
                printk_err("Couldn't create msg_workq!\n");
                err = -ENOMEM;
                goto out;
        }

        INIT_WORK(&fe->msg_workq_task, omx_xenif_interrupt);


	spin_lock_init(&fe->lock);
	dprintk_deb("Setting up shared ring\n");

	sring =
	    (struct omx_xenif_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
	if (!sring) {
		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
		err = -ENOMEM;
		goto out;
	}
	SHARED_RING_INIT(sring);
	FRONT_RING_INIT(&fe->ring, sring, PAGE_SIZE);

	err = xenbus_grant_ring(dev, virt_to_mfn(fe->ring.sring));
	if (err < 0) {
		free_page((unsigned long)sring);
		fe->ring.sring = NULL;
		printk_err("Failed to grant ring\n");
		goto out;
	}
	fe->ring_ref = err;


	recv_sring =
	    (struct omx_xenif_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
	if (!sring) {
		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
		err = -ENOMEM;
		goto out;
	}
	SHARED_RING_INIT(recv_sring);
	FRONT_RING_INIT(&fe->recv_ring, recv_sring, PAGE_SIZE);

	err = xenbus_grant_ring(dev, virt_to_mfn(fe->recv_ring.sring));
	if (err < 0) {
		free_page((unsigned long)recv_sring);
		fe->recv_ring.sring = NULL;
		printk_err("Failed to grant recv_ring\n");
		goto out;
	}
	fe->recv_ring_ref = err;

	fe->handle = simple_strtoul(strrchr(dev->nodename, '/') + 1, NULL, 0);
	dprintk_deb("setting handle = %u\n", fe->handle);
	dev_set_drvdata(&dev->dev, fe);
	err = 0;
	//omx_xenfront_dev->info = info;
	//fe->endpoints = kzalloc(sizeof(struct omx_endpoint*) * OMX_XEN_MAX_ENDPOINTS, GFP_KERNEL);
	xenbus_switch_state(dev, XenbusStateInitialising);

out:
	dprintk_out();
	return err;

}