Ejemplo n.º 1
0
int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
{
	xen_pfn_t *pfn;
	unsigned int max_nr_gframes = __max_nr_grant_frames();
	unsigned int i;
	void *vaddr;

	if (xen_auto_xlat_grant_frames.count)
		return -EINVAL;

	vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
	if (vaddr == NULL) {
		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
			&addr);
		return -ENOMEM;
	}
	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
	if (!pfn) {
		xen_unmap(vaddr);
		return -ENOMEM;
	}
	for (i = 0; i < max_nr_gframes; i++)
		pfn[i] = XEN_PFN_DOWN(addr) + i;

	xen_auto_xlat_grant_frames.vaddr = vaddr;
	xen_auto_xlat_grant_frames.pfn = pfn;
	xen_auto_xlat_grant_frames.count = max_nr_gframes;

	return 0;
}
Ejemplo n.º 2
0
static int xen_unmap_device_mmio(const struct resource *resources,
				 unsigned int count)
{
	unsigned int i, j, nr;
	int rc = 0;
	const struct resource *r;
	struct xen_remove_from_physmap xrp;

	for (i = 0; i < count; i++) {
		r = &resources[i];
		nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
		if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0))
			continue;

		for (j = 0; j < nr; j++) {
			xrp.domid = DOMID_SELF;
			xrp.gpfn = XEN_PFN_DOWN(r->start) + j;
			rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap,
						  &xrp);
			if (rc)
				return rc;
		}
	}

	return rc;
}
Ejemplo n.º 3
0
/*
 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
 * can be 32bit when dma_addr_t is 64bit leading to a loss in
 * information if the shift is done before casting to 64bit.
 */
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
{
	unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
	dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;

	dma |= paddr & ~XEN_PAGE_MASK;

	return dma;
}
Ejemplo n.º 4
0
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
{
	unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
	dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
	phys_addr_t paddr = dma;

	paddr |= baddr & ~XEN_PAGE_MASK;

	return paddr;
}
Ejemplo n.º 5
0
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
	unsigned long xen_pfn = XEN_PFN_DOWN(p);
	unsigned int offset = p & ~XEN_PAGE_MASK;

	if (offset + size <= XEN_PAGE_SIZE)
		return 0;
	if (check_pages_physically_contiguous(xen_pfn, offset, size))
		return 0;
	return 1;
}
Ejemplo n.º 6
0
static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
{
	unsigned long bfn = XEN_PFN_DOWN(dma_addr);
	unsigned long xen_pfn = bfn_to_local_pfn(bfn);
	phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);

	/* If the address is outside our domain, it CAN
	 * have the same virtual address as another address
	 * in our domain. Therefore _only_ check address within our domain.
	 */
	if (pfn_valid(PFN_DOWN(paddr))) {
		return paddr >= virt_to_phys(xen_io_tlb_start) &&
		       paddr < virt_to_phys(xen_io_tlb_end);
	}
	return 0;
}
Ejemplo n.º 7
0
void gnttab_foreach_grant_in_range(struct page *page,
				   unsigned int offset,
				   unsigned int len,
				   xen_grant_fn_t fn,
				   void *data)
{
	unsigned int goffset;
	unsigned int glen;
	unsigned long xen_pfn;

	len = min_t(unsigned int, PAGE_SIZE - offset, len);
	goffset = xen_offset_in_page(offset);

	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);

	while (len) {
		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);

		goffset = 0;
		xen_pfn++;
		len -= glen;
	}
}
Ejemplo n.º 8
0
static int xen_map_device_mmio(const struct resource *resources,
			       unsigned int count)
{
	unsigned int i, j, nr;
	int rc = 0;
	const struct resource *r;
	xen_pfn_t *gpfns;
	xen_ulong_t *idxs;
	int *errs;

	for (i = 0; i < count; i++) {
		struct xen_add_to_physmap_range xatp = {
			.domid = DOMID_SELF,
			.space = XENMAPSPACE_dev_mmio
		};

		r = &resources[i];
		nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
		if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0))
			continue;

		gpfns = kzalloc(sizeof(xen_pfn_t) * nr, GFP_KERNEL);
		idxs = kzalloc(sizeof(xen_ulong_t) * nr, GFP_KERNEL);
		errs = kzalloc(sizeof(int) * nr, GFP_KERNEL);
		if (!gpfns || !idxs || !errs) {
			kfree(gpfns);
			kfree(idxs);
			kfree(errs);
			rc = -ENOMEM;
			goto unmap;
		}

		for (j = 0; j < nr; j++) {
			/*
			 * The regions are always mapped 1:1 to DOM0 and this is
			 * fine because the memory map for DOM0 is the same as
			 * the host (except for the RAM).
			 */
			gpfns[j] = XEN_PFN_DOWN(r->start) + j;
			idxs[j] = XEN_PFN_DOWN(r->start) + j;
		}

		xatp.size = nr;

		set_xen_guest_handle(xatp.gpfns, gpfns);
		set_xen_guest_handle(xatp.idxs, idxs);
		set_xen_guest_handle(xatp.errs, errs);

		rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
		kfree(gpfns);
		kfree(idxs);
		kfree(errs);
		if (rc)
			goto unmap;
	}

	return rc;

unmap:
	xen_unmap_device_mmio(resources, i);
	return rc;
}