Пример #1
0
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
		    struct gnttab_map_grant_ref *kmap_ops,
		    struct page **pages, unsigned int count)
{
	int i, ret;
	pte_t *pte;
	unsigned long mfn;

	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
	if (ret)
		return ret;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return ret;

	for (i = 0; i < count; i++) {
		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
		}
		ret = m2p_add_override(mfn, pages[i], kmap_ops ?
				       &kmap_ops[i] : NULL);
		if (ret)
			return ret;
	}

	return ret;
}
Пример #2
0
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
			    struct gnttab_map_grant_ref *kmap_ops,
			    struct page **pages, unsigned int count)
{
	int i, ret = 0;
	bool lazy = false;
	pte_t *pte;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

	if (kmap_ops &&
	    !in_interrupt() &&
	    paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
		arch_enter_lazy_mmu_mode();
		lazy = true;
	}

	for (i = 0; i < count; i++) {
		unsigned long mfn, pfn;

		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
		}
		pfn = page_to_pfn(pages[i]);

		WARN_ON(PagePrivate(pages[i]));
		SetPagePrivate(pages[i]);
		set_page_private(pages[i], mfn);
		pages[i]->index = pfn_to_mfn(pfn);

		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
			ret = -ENOMEM;
			goto out;
		}

		if (kmap_ops) {
			ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
			if (ret)
				goto out;
		}
	}

out:
	if (lazy)
		arch_leave_lazy_mmu_mode();

	return ret;
}
Пример #3
0
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                    struct gnttab_map_grant_ref *kmap_ops,
                    struct page **pages, unsigned int count)
{
    int i, ret;
    bool lazy = false;
    pte_t *pte;
    unsigned long mfn;

    ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
    if (ret)
        return ret;

    /* Retry eagain maps */
    for (i = 0; i < count; i++)
        if (map_ops[i].status == GNTST_eagain)
            gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
                                    &map_ops[i].status, __func__);

    if (xen_feature(XENFEAT_auto_translated_physmap))
        return ret;

    if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
        arch_enter_lazy_mmu_mode();
        lazy = true;
    }

    for (i = 0; i < count; i++) {
        /* Do not add to override if the map failed. */
        if (map_ops[i].status)
            continue;

        if (map_ops[i].flags & GNTMAP_contains_pte) {
            pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
                             (map_ops[i].host_addr & ~PAGE_MASK));
            mfn = pte_mfn(*pte);
        } else {
            mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
        }
        ret = m2p_add_override(mfn, pages[i], kmap_ops ?
                               &kmap_ops[i] : NULL);
        if (ret)
            goto out;
    }

out:
    if (lazy)
        arch_leave_lazy_mmu_mode();

    return ret;
}
Пример #4
0
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
		    struct page **pages, unsigned int count)
{
	int i, ret;
	pte_t *pte;
	unsigned long mfn;

	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
	if (ret)
		return ret;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return ret;

	for (i = 0; i < count; i++) {
		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			/* If you really wanted to do this:
			 * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
			 *
			 * The reason we do not implement it is b/c on the
			 * unmap path (gnttab_unmap_refs) we have no means of
			 * checking whether the page is !GNTMAP_contains_pte.
			 *
			 * That is without some extra data-structure to carry
			 * the struct page, bool clear_pte, and list_head next
			 * tuples and deal with allocation/delallocation, etc.
			 *
			 * The users of this API set the GNTMAP_contains_pte
			 * flag so lets just return not supported until it
			 * becomes neccessary to implement.
			 */
			return -EOPNOTSUPP;
		}
		ret = m2p_add_override(mfn, pages[i],
				       map_ops[i].flags & GNTMAP_contains_pte);
		if (ret)
			return ret;
	}

	return ret;
}