int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
		    struct gnttab_map_grant_ref *kmap_ops,
		    struct page **pages, unsigned int count)
{
	int i, ret;
	pte_t *pte;
	unsigned long mfn;

	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
	if (ret)
		return ret;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return ret;

	for (i = 0; i < count; i++) {
		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
		}
		ret = m2p_add_override(mfn, pages[i], kmap_ops ?
				       &kmap_ops[i] : NULL);
		if (ret)
			return ret;
	}

	return ret;
}
示例#2
0
文件: p2m.c 项目: quadcores/cbs_4.2.4
/*
 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
 *
 * This is called both at boot time, and after resuming from suspend:
 * - At boot time we're called rather early, and must use alloc_bootmem*()
 *   to allocate memory.
 *
 * - After resume we're called from within stop_machine, but the mfn
 *   tree should already be completely allocated.
 */
void __ref xen_build_mfn_list_list(void)
{
    unsigned long pfn, mfn;
    pte_t *ptep;
    unsigned int level, topidx, mididx;
    unsigned long *mid_mfn_p;

    if (xen_feature(XENFEAT_auto_translated_physmap))
        return;

    /* Pre-initialize p2m_top_mfn to be completely missing */
    if (p2m_top_mfn == NULL) {
        p2m_mid_missing_mfn = alloc_p2m_page();
        p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);

        p2m_top_mfn_p = alloc_p2m_page();
        p2m_top_mfn_p_init(p2m_top_mfn_p);

        p2m_top_mfn = alloc_p2m_page();
        p2m_top_mfn_init(p2m_top_mfn);
    } else {
        /* Reinitialise, mfn's all change after migration */
        p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
    }

    for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
            pfn += P2M_PER_PAGE) {
        topidx = p2m_top_index(pfn);
        mididx = p2m_mid_index(pfn);

        mid_mfn_p = p2m_top_mfn_p[topidx];
        ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
                              &level);
        BUG_ON(!ptep || level != PG_LEVEL_4K);
        mfn = pte_mfn(*ptep);
        ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));

        /* Don't bother allocating any mfn mid levels if
         * they're just missing, just update the stored mfn,
         * since all could have changed over a migrate.
         */
        if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) {
            BUG_ON(mididx);
            BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
            p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
            pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
            continue;
        }

        if (mid_mfn_p == p2m_mid_missing_mfn) {
            mid_mfn_p = alloc_p2m_page();
            p2m_mid_mfn_init(mid_mfn_p, p2m_missing);

            p2m_top_mfn_p[topidx] = mid_mfn_p;
        }

        p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
        mid_mfn_p[mididx] = mfn;
    }
}
示例#3
0
xmaddr_t arbitrary_virt_to_machine(unsigned long address)
{
	pte_t *pte = lookup_address(address);
	unsigned offset = address & PAGE_MASK;

	BUG_ON(pte == NULL);

	return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
}
示例#4
0
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
			    struct gnttab_map_grant_ref *kmap_ops,
			    struct page **pages, unsigned int count)
{
	int i, ret = 0;
	bool lazy = false;
	pte_t *pte;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

	if (kmap_ops &&
	    !in_interrupt() &&
	    paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
		arch_enter_lazy_mmu_mode();
		lazy = true;
	}

	for (i = 0; i < count; i++) {
		unsigned long mfn, pfn;

		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
		}
		pfn = page_to_pfn(pages[i]);

		WARN_ON(PagePrivate(pages[i]));
		SetPagePrivate(pages[i]);
		set_page_private(pages[i], mfn);
		pages[i]->index = pfn_to_mfn(pfn);

		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
			ret = -ENOMEM;
			goto out;
		}

		if (kmap_ops) {
			ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
			if (ret)
				goto out;
		}
	}

out:
	if (lazy)
		arch_leave_lazy_mmu_mode();

	return ret;
}
示例#5
0
文件: grant-table.c 项目: mbgg/linux
static xen_pfn_t pvh_get_grant_pfn(int grant_idx)
{
	unsigned long vaddr;
	unsigned int level;
	pte_t *pte;

	vaddr = (unsigned long)(gnttab_shared.addr) + grant_idx * PAGE_SIZE;
	pte = lookup_address(vaddr, &level);
	BUG_ON(pte == NULL);
	return pte_mfn(*pte);
}
示例#6
0
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                    struct gnttab_map_grant_ref *kmap_ops,
                    struct page **pages, unsigned int count)
{
    int i, ret;
    bool lazy = false;
    pte_t *pte;
    unsigned long mfn;

    ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
    if (ret)
        return ret;

    /* Retry eagain maps */
    for (i = 0; i < count; i++)
        if (map_ops[i].status == GNTST_eagain)
            gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
                                    &map_ops[i].status, __func__);

    if (xen_feature(XENFEAT_auto_translated_physmap))
        return ret;

    if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
        arch_enter_lazy_mmu_mode();
        lazy = true;
    }

    for (i = 0; i < count; i++) {
        /* Do not add to override if the map failed. */
        if (map_ops[i].status)
            continue;

        if (map_ops[i].flags & GNTMAP_contains_pte) {
            pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
                             (map_ops[i].host_addr & ~PAGE_MASK));
            mfn = pte_mfn(*pte);
        } else {
            mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
        }
        ret = m2p_add_override(mfn, pages[i], kmap_ops ?
                               &kmap_ops[i] : NULL);
        if (ret)
            goto out;
    }

out:
    if (lazy)
        arch_leave_lazy_mmu_mode();

    return ret;
}
示例#7
0
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
		    struct page **pages, unsigned int count)
{
	int i, ret;
	pte_t *pte;
	unsigned long mfn;

	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
	if (ret)
		return ret;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return ret;

	for (i = 0; i < count; i++) {
		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			/* If you really wanted to do this:
			 * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
			 *
			 * The reason we do not implement it is b/c on the
			 * unmap path (gnttab_unmap_refs) we have no means of
			 * checking whether the page is !GNTMAP_contains_pte.
			 *
			 * That is without some extra data-structure to carry
			 * the struct page, bool clear_pte, and list_head next
			 * tuples and deal with allocation/delallocation, etc.
			 *
			 * The users of this API set the GNTMAP_contains_pte
			 * flag so lets just return not supported until it
			 * becomes neccessary to implement.
			 */
			return -EOPNOTSUPP;
		}
		ret = m2p_add_override(mfn, pages[i],
				       map_ops[i].flags & GNTMAP_contains_pte);
		if (ret)
			return ret;
	}

	return ret;
}
示例#8
0
xmaddr_t arbitrary_virt_to_machine(void *vaddr)
{
	unsigned long address = (unsigned long)vaddr;
	unsigned int level;
	pte_t *pte;
	unsigned offset;

	if (virt_addr_valid(vaddr))
		return virt_to_machine(vaddr);

	

	pte = lookup_address(address, &level);
	BUG_ON(pte == NULL);
	offset = address & ~PAGE_MASK;
	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
}
示例#9
0
文件: p2m.c 项目: DenisLug/mptcp
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
			    struct gnttab_map_grant_ref *kmap_ops,
			    struct page **pages, unsigned int count)
{
	int i, ret = 0;
	pte_t *pte;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

	if (kmap_ops) {
		ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
						kmap_ops, count);
		if (ret)
			goto out;
	}

	for (i = 0; i < count; i++) {
		unsigned long mfn, pfn;

		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
		}
		pfn = page_to_pfn(pages[i]);

		WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");

		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
			ret = -ENOMEM;
			goto out;
		}
	}

out:
	return ret;
}
示例#10
0
xmaddr_t arbitrary_virt_to_machine(void *vaddr)
{
	unsigned long address = (unsigned long)vaddr;
	unsigned int level;
	pte_t *pte;
	unsigned offset;

	/*
	 * if the PFN is in the linear mapped vaddr range, we can just use
	 * the (quick) virt_to_machine() p2m lookup
	 */
	if (virt_addr_valid(vaddr))
		return virt_to_machine(vaddr);

	/* otherwise we have to do a (slower) full page-table walk */

	pte = lookup_address(address, &level);
	BUG_ON(pte == NULL);
	offset = address & ~PAGE_MASK;
	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
}
示例#11
0
static int dealloc_pte_fn(
	pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
{
	unsigned long mfn = pte_mfn(*pte);
	int ret;
	struct xen_memory_reservation reservation = {
		.nr_extents   = 1,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};
	set_xen_guest_handle(reservation.extent_start, &mfn);
	set_pte_at(&init_mm, addr, pte, __pte_ma(0));
	set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
	BUG_ON(ret != 1);
	return 0;
}
#endif

struct page **alloc_empty_pages_and_pagevec(int nr_pages)
{
	unsigned long vaddr, flags;
	struct page *page, **pagevec;
	int i, ret;

	pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
	if (pagevec == NULL)
		return NULL;

	for (i = 0; i < nr_pages; i++) {
		page = pagevec[i] = alloc_page(GFP_KERNEL);
		if (page == NULL)
			goto err;

		vaddr = (unsigned long)page_address(page);

		scrub_pages(vaddr, 1);

		balloon_lock(flags);

		if (xen_feature(XENFEAT_auto_translated_physmap)) {
			unsigned long gmfn = page_to_pfn(page);
			struct xen_memory_reservation reservation = {
				.nr_extents   = 1,
				.extent_order = 0,
				.domid        = DOMID_SELF
			};
			set_xen_guest_handle(reservation.extent_start, &gmfn);
			ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
						   &reservation);
			if (ret == 1)
				ret = 0; /* success */
		} else {
#ifdef CONFIG_XEN
			ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE,
						  dealloc_pte_fn, NULL);
#else
			/* Cannot handle non-auto translate mode. */
			ret = 1;
#endif
		}

		if (ret != 0) {
			balloon_unlock(flags);
			__free_page(page);
			goto err;
		}

		totalram_pages = --current_pages;

		balloon_unlock(flags);
	}

 out:
	schedule_work(&balloon_worker);
#ifdef CONFIG_XEN
	flush_tlb_all();
#endif
	return pagevec;

 err:
	balloon_lock(flags);
	while (--i >= 0)
		balloon_append(pagevec[i]);
	balloon_unlock(flags);
	kfree(pagevec);
	pagevec = NULL;
	goto out;
}