Exemplo n.º 1
0
static int
xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
{
	int i, rc;
	int dma_bits;

	dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;

	i = 0;
	do {
		int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);

		do {
			rc = xen_create_contiguous_region(
				(unsigned long)buf + (i << IO_TLB_SHIFT),
				get_order(slabs << IO_TLB_SHIFT),
				dma_bits);
		} while (rc && dma_bits++ < max_dma_bits);
		if (rc)
			return rc;

		i += slabs;
	} while (i < nslabs);
	return 0;
}
Exemplo n.º 2
0
static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
{
	int order = 0;

	while (skbuff_order_cachep[order] != cachep)
		order++;

	/* Do our best to allocate contiguous memory but fall back to IOMMU. */
	if (order != 0)
		(void)xen_create_contiguous_region(
			(unsigned long)buf, order, 0);

	scrub_pages(buf, 1 << order);
}
Exemplo n.º 3
0
void *dma_alloc_coherent(struct device *dev, size_t size,
			   dma_addr_t *dma_handle, gfp_t gfp)
{
	void *ret;
	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
	unsigned int order = get_order(size);
	unsigned long vstart;
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);

	if (mem) {
		int page = bitmap_find_free_region(mem->bitmap, mem->size,
						     order);
		if (page >= 0) {
			*dma_handle = mem->device_base + (page << PAGE_SHIFT);
			ret = mem->virt_base + (page << PAGE_SHIFT);
			memset(ret, 0, size);
			return ret;
		}
		if (mem->flags & DMA_MEMORY_EXCLUSIVE)
			return NULL;
	}

	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
		gfp |= GFP_DMA;

	vstart = __get_free_pages(gfp, order);
	ret = (void *)vstart;

	if (ret != NULL) {
		/* NB. Hardcode 31 address bits for now: aacraid limitation. */
		if (xen_create_contiguous_region(vstart, order, 31) != 0) {
			free_pages(vstart, order);
			return NULL;
		}
		memset(ret, 0, size);
		*dma_handle = virt_to_bus(ret);
	}
	return ret;
}
Exemplo n.º 4
0
void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
{
	unsigned long flags;

	if (PTRS_PER_PMD > 1) {
		if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
			int rc = xen_create_contiguous_region(
				(unsigned long)pgd, 0, 32);
			BUG_ON(rc);
		}
		if (HAVE_SHARED_KERNEL_PMD)
			clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
					swapper_pg_dir + USER_PTRS_PER_PGD,
					KERNEL_PGD_PTRS);
	} else {
		spin_lock_irqsave(&pgd_lock, flags);
		clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
				swapper_pg_dir + USER_PTRS_PER_PGD,
				KERNEL_PGD_PTRS);
		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
		pgd_list_add(pgd);
		spin_unlock_irqrestore(&pgd_lock, flags);
	}
}
Exemplo n.º 5
0
static int create_packet(void *data, size_t length)
{
	struct packet_data *newpacket;
	int ordernum = 0;
	int retval = 0;
	unsigned int packet_array_size = 0;
	void **invalid_addr_packet_array = NULL;
	void *packet_data_temp_buf = NULL;
	unsigned int idx = 0;

	pr_debug("create_packet: entry \n");

	if (!rbu_data.packetsize) {
		pr_debug("create_packet: packetsize not specified\n");
		retval = -EINVAL;
		goto out_noalloc;
	}

	spin_unlock(&rbu_data.lock);

	newpacket = kzalloc(sizeof (struct packet_data), GFP_KERNEL);

	if (!newpacket) {
		printk(KERN_WARNING
			"dell_rbu:%s: failed to allocate new "
			"packet\n", __func__);
		retval = -ENOMEM;
		spin_lock(&rbu_data.lock);
		goto out_noalloc;
	}

	ordernum = get_order(length);

	/*
	 * BIOS errata mean we cannot allocate packets below 1MB or they will
	 * be overwritten by BIOS.
	 *
	 * array to temporarily hold packets
	 * that are below the allocation floor
	 *
	 * NOTE: very simplistic because we only need the floor to be at 1MB
	 *       due to BIOS errata. This shouldn't be used for higher floors
	 *       or you will run out of mem trying to allocate the array.
	 */
	packet_array_size = max(
	       		(unsigned int)(allocation_floor / rbu_data.packetsize),
			(unsigned int)1);
	invalid_addr_packet_array = kzalloc(packet_array_size * sizeof(void*),
						GFP_KERNEL);

	if (!invalid_addr_packet_array) {
		printk(KERN_WARNING
			"dell_rbu:%s: failed to allocate "
			"invalid_addr_packet_array \n",
			__func__);
		retval = -ENOMEM;
		spin_lock(&rbu_data.lock);
		goto out_alloc_packet;
	}

	while (!packet_data_temp_buf) {
		packet_data_temp_buf = (unsigned char *)
			__get_free_pages(GFP_KERNEL, ordernum);
		if (!packet_data_temp_buf) {
			printk(KERN_WARNING
				"dell_rbu:%s: failed to allocate new "
				"packet\n", __func__);
			retval = -ENOMEM;
			spin_lock(&rbu_data.lock);
			goto out_alloc_packet_array;
		}
#ifdef CONFIG_XEN
		if (ordernum && xen_create_contiguous_region(
			(unsigned long)packet_data_temp_buf, ordernum, 0)) {
			free_pages((unsigned long)packet_data_temp_buf,
				   ordernum);
			printk(KERN_WARNING
				"dell_rbu:%s: failed to adjust new "
				"packet\n", __func__);
			retval = -ENOMEM;
			spin_lock(&rbu_data.lock);
			goto out_alloc_packet_array;
		}
#endif

		if ((unsigned long)virt_to_bus(packet_data_temp_buf)
				< allocation_floor) {
#ifdef CONFIG_XEN
			if (ordernum)
				xen_destroy_contiguous_region(
					(unsigned long)packet_data_temp_buf,
					ordernum);
#endif
			pr_debug("packet 0x%lx below floor at 0x%lx.\n",
					(unsigned long)virt_to_phys(
						packet_data_temp_buf),
					allocation_floor);
			invalid_addr_packet_array[idx++] = packet_data_temp_buf;
			packet_data_temp_buf = NULL;
		}
	}
	spin_lock(&rbu_data.lock);

	newpacket->data = packet_data_temp_buf;

	pr_debug("create_packet: newpacket at physical addr %lx\n",
		(unsigned long)virt_to_bus(newpacket->data));

	/* packets may not have fixed size */
	newpacket->length = length;
	newpacket->ordernum = ordernum;
	++rbu_data.num_packets;

	/* initialize the newly created packet headers */
	INIT_LIST_HEAD(&newpacket->list);
	list_add_tail(&newpacket->list, &packet_data_head.list);

	memcpy(newpacket->data, data, length);

	pr_debug("create_packet: exit \n");

out_alloc_packet_array:
	/* always free packet array */
	for (;idx>0;idx--) {
		pr_debug("freeing unused packet below floor 0x%lx.\n",
			(unsigned long)virt_to_bus(
				invalid_addr_packet_array[idx-1]));
		free_pages((unsigned long)invalid_addr_packet_array[idx-1],
			ordernum);
	}
	kfree(invalid_addr_packet_array);

out_alloc_packet:
	/* if error, free data */
	if (retval)
		kfree(newpacket);

out_noalloc:
	return retval;
}
Exemplo n.º 6
0
static int
dom0_memory_reserve(uint32_t rsv_size)
{
	uint64_t pfn, vstart, vaddr;
	uint32_t i, num_block, size, allocated_size = 0;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
	dma_addr_t dma_handle;
#endif

	/* 2M as memory block */
	num_block = rsv_size / SIZE_PER_BLOCK;

	rsv_mm_info = vmalloc(sizeof(struct memblock_info) * num_block);
	if (!rsv_mm_info) {
		XEN_ERR("Unable to allocate device memory information\n");
		return -ENOMEM;
	}
	memset(rsv_mm_info, 0, sizeof(struct memblock_info) * num_block);

	/* try alloc size of 4M once */
	for (i = 0; i < num_block; i += 2) {
		vstart = (unsigned long)
			__get_free_pages(GFP_ATOMIC, MAX_NUM_ORDER);
		if (vstart == 0)
			break;

		dom0_dev.num_bigblock = i / 2 + 1;
		allocated_size =  SIZE_PER_BLOCK * (i + 2);

		/* size of 4M */
		size = DOM0_MEMBLOCK_SIZE * 2;

		vaddr = vstart;
		while (size > 0) {
			SetPageReserved(virt_to_page(vaddr));
			vaddr += PAGE_SIZE;
			size -= PAGE_SIZE;
		}

		pfn = virt_to_pfn(vstart);
		rsv_mm_info[i].pfn = pfn;
		rsv_mm_info[i].vir_addr = vstart;
		rsv_mm_info[i + 1].pfn =
				pfn + DOM0_MEMBLOCK_SIZE / PAGE_SIZE;
		rsv_mm_info[i + 1].vir_addr =
				vstart + DOM0_MEMBLOCK_SIZE;
	}

	/*if it failed to alloc 4M, and continue to alloc 2M once */
	for (; i < num_block; i++) {
		vstart = (unsigned long)
			__get_free_pages(GFP_ATOMIC, DOM0_CONTIG_NUM_ORDER);
		if (vstart == 0) {
			XEN_ERR("allocate memory fail.\n");
			dom0_memory_free(allocated_size);
			return -ENOMEM;
		}

		allocated_size += SIZE_PER_BLOCK;

		size = DOM0_MEMBLOCK_SIZE;
		vaddr = vstart;
		while (size > 0) {
			SetPageReserved(virt_to_page(vaddr));
			vaddr += PAGE_SIZE;
			size -= PAGE_SIZE;
		}
		pfn = virt_to_pfn(vstart);
		rsv_mm_info[i].pfn = pfn;
		rsv_mm_info[i].vir_addr = vstart;
	}

	sort_viraddr(rsv_mm_info, num_block);

	for (i = 0; i< num_block; i++) {

		/*
		 * This API is used to exchage MFN for getting a block of
		 * contiguous physical addresses, its maximum size is 2M.
		 */
	#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
		if (xen_create_contiguous_region(rsv_mm_info[i].vir_addr,
				DOM0_CONTIG_NUM_ORDER, 0) == 0) {
	#else
		if (xen_create_contiguous_region(rsv_mm_info[i].pfn * PAGE_SIZE,
				DOM0_CONTIG_NUM_ORDER, 0, &dma_handle) == 0) {
	#endif
			rsv_mm_info[i].exchange_flag = 1;
			rsv_mm_info[i].mfn =
				pfn_to_mfn(rsv_mm_info[i].pfn);
			rsv_mm_info[i].used = 0;
		} else {
			XEN_ERR("exchange memeory fail\n");
			rsv_mm_info[i].exchange_flag = 0;
			dom0_dev.fail_times++;
			if (dom0_dev.fail_times > MAX_EXCHANGE_FAIL_TIME) {
				dom0_memory_free(rsv_size);
				return  -EFAULT;
			}
		}
	}

	return 0;
}

static int
dom0_prepare_memsegs(struct memory_info *meminfo, struct dom0_mm_data *mm_data)
{
	uint32_t num_block;
	int idx;

	/* check if there is a free name buffer */
	memcpy(mm_data->name, meminfo->name, DOM0_NAME_MAX);
	mm_data->name[DOM0_NAME_MAX - 1] = '\0';
	idx = dom0_find_mempos();
	if (idx < 0)
		return -1;

	num_block = meminfo->size / SIZE_PER_BLOCK;
	/* find free memory and new memory segments*/
	find_free_memory(num_block, mm_data);
	find_memseg(num_block, mm_data);

	/* update private memory data */
	mm_data->refcnt++;
	mm_data->mem_size = meminfo->size;

	/* update global memory data */
	dom0_dev.mm_data[idx] = mm_data;
	dom0_dev.num_mem_ctx++;
	dom0_dev.used_memsize += mm_data->mem_size;

	return 0;
}