예제 #1
0
/* This is called just after a mm has been created, but it has not
   been used yet.  We need to make sure that its pagetable is all
   read-only, and can be pinned. */
void xen_pgd_pin(pgd_t *pgd)
{
	struct multicall_space mcs;
	struct mmuext_op *op;

	xen_mc_batch();

	if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
		/* re-enable interrupts for kmap_flush_unused */
		xen_mc_issue(0);
		kmap_flush_unused();
		xen_mc_batch();
	}

	mcs = __xen_mc_entry(sizeof(*op));
	op = mcs.args;

#ifdef CONFIG_X86_PAE
	op->cmd = MMUEXT_PIN_L3_TABLE;
#else
	op->cmd = MMUEXT_PIN_L2_TABLE;
#endif
	op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);

	xen_mc_issue(0);
}
예제 #2
0
void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
				 pte_t *ptep, pte_t pte)
{
	struct mmu_update u;

	trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
	xen_mc_batch();

	u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
	u.val = pte_val_ma(pte);
	xen_extend_mmu_update(&u);

	xen_mc_issue(PARAVIRT_LAZY_MMU);
}
예제 #3
0
void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
				 pte_t *ptep, pte_t pte)
{
	struct mmu_update u;

	xen_mc_batch();

	u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
	u.val = pte_val_ma(pte);
	xen_extend_mmu_update(&u);

	ADD_STATS(prot_commit, 1);
	ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);

	xen_mc_issue(PARAVIRT_LAZY_MMU);
}
예제 #4
0
static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
{
	struct mmu_update u;

	if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
		return false;

	xen_mc_batch();

	u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
	u.val = pte_val_ma(pteval);
	xen_extend_mmu_update(&u);

	xen_mc_issue(PARAVIRT_LAZY_MMU);

	return true;
}
예제 #5
0
static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
{
	struct mmu_update u;

	preempt_disable();

	xen_mc_batch();

	/* ptr may be ioremapped for 64-bit pagetable setup */
	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
	u.val = pmd_val_ma(val);
	xen_extend_mmu_update(&u);

	xen_mc_issue(PARAVIRT_LAZY_MMU);

	preempt_enable();
}
static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
#ifdef CONFIG_X86_32
		lazy_load_gs(0);
#else
		loadsegment(fs, 0);
#endif
	}

	xen_mc_batch();

	load_TLS_descriptor(t, cpu, 0);
	load_TLS_descriptor(t, cpu, 1);
	load_TLS_descriptor(t, cpu, 2);

	xen_mc_issue(PARAVIRT_LAZY_CPU);
}
예제 #7
0
/* Release a pagetables pages back as normal RW */
static void xen_pgd_unpin(pgd_t *pgd)
{
	struct mmuext_op *op;
	struct multicall_space mcs;

	xen_mc_batch();

	mcs = __xen_mc_entry(sizeof(*op));

	op = mcs.args;
	op->cmd = MMUEXT_UNPIN_TABLE;
	op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));

	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);

	pgd_walk(pgd, unpin_page, TASK_SIZE);

	xen_mc_issue(0);
}
예제 #8
0
static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
{
	/*
	 * XXX sleazy hack: If we're being called in a lazy-cpu zone
	 * and lazy gs handling is enabled, it means we're in a
	 * context switch, and %gs has just been saved.  This means we
	 * can zero it out to prevent faults on exit from the
	 * hypervisor if the next process has no %gs.  Either way, it
	 * has been saved, and the new value will get loaded properly.
	 * This will go away as soon as Xen has been modified to not
	 * save/restore %gs for normal hypercalls.
	 *
	 * On x86_64, this hack is not used for %gs, because gs points
	 * to KERNEL_GS_BASE (and uses it for PDA references), so we
	 * must not zero %gs on x86_64
	 *
	 * For x86_64, we need to zero %fs, otherwise we may get an
	 * exception between the new %fs descriptor being loaded and
	 * %fs being effectively cleared at __switch_to().
	 */
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
#ifdef CONFIG_X86_32
		lazy_load_gs(0);
#else
		loadsegment(fs, 0);
#endif
	}

	xen_mc_batch();

	load_TLS_descriptor(t, cpu, 0);
	load_TLS_descriptor(t, cpu, 1);
	load_TLS_descriptor(t, cpu, 2);

	xen_mc_issue(PARAVIRT_LAZY_CPU);
}
예제 #9
0
static int m2p_remove_override(struct page *page,
			       struct gnttab_map_grant_ref *kmap_op,
			       unsigned long mfn)
{
	unsigned long flags;
	unsigned long pfn;
	unsigned long uninitialized_var(address);
	unsigned level;
	pte_t *ptep = NULL;

	pfn = page_to_pfn(page);

	if (!PageHighMem(page)) {
		address = (unsigned long)__va(pfn << PAGE_SHIFT);
		ptep = lookup_address(address, &level);

		if (WARN(ptep == NULL || level != PG_LEVEL_4K,
			 "m2p_remove_override: pfn %lx not mapped", pfn))
			return -EINVAL;
	}

	spin_lock_irqsave(&m2p_override_lock, flags);
	list_del(&page->lru);
	spin_unlock_irqrestore(&m2p_override_lock, flags);

	if (kmap_op != NULL) {
		if (!PageHighMem(page)) {
			struct multicall_space mcs;
			struct gnttab_unmap_and_replace *unmap_op;
			struct page *scratch_page = get_balloon_scratch_page();
			unsigned long scratch_page_address = (unsigned long)
				__va(page_to_pfn(scratch_page) << PAGE_SHIFT);

			/*
			 * It might be that we queued all the m2p grant table
			 * hypercalls in a multicall, then m2p_remove_override
			 * get called before the multicall has actually been
			 * issued. In this case handle is going to -1 because
			 * it hasn't been modified yet.
			 */
			if (kmap_op->handle == -1)
				xen_mc_flush();
			/*
			 * Now if kmap_op->handle is negative it means that the
			 * hypercall actually returned an error.
			 */
			if (kmap_op->handle == GNTST_general_error) {
				pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings",
					pfn, mfn);
				put_balloon_scratch_page();
				return -1;
			}

			xen_mc_batch();

			mcs = __xen_mc_entry(
				sizeof(struct gnttab_unmap_and_replace));
			unmap_op = mcs.args;
			unmap_op->host_addr = kmap_op->host_addr;
			unmap_op->new_addr = scratch_page_address;
			unmap_op->handle = kmap_op->handle;

			MULTI_grant_table_op(mcs.mc,
				GNTTABOP_unmap_and_replace, unmap_op, 1);

			mcs = __xen_mc_entry(0);
			MULTI_update_va_mapping(mcs.mc, scratch_page_address,
					pfn_pte(page_to_pfn(scratch_page),
					PAGE_KERNEL_RO), 0);

			xen_mc_issue(PARAVIRT_LAZY_MMU);

			kmap_op->host_addr = 0;
			put_balloon_scratch_page();
		}
	}

	/* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
	 * somewhere in this domain, even before being added to the
	 * m2p_override (see comment above in m2p_add_override).
	 * If there are no other entries in the m2p_override corresponding
	 * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
	 * the original pfn (the one shared by the frontend): the backend
	 * cannot do any IO on this page anymore because it has been
	 * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
	 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
	 * pfn again. */
	mfn &= ~FOREIGN_FRAME_BIT;
	pfn = mfn_to_pfn_no_overrides(mfn);
	if (__pfn_to_mfn(pfn) == FOREIGN_FRAME(mfn) &&
			m2p_find_override(mfn) == NULL)
		set_phys_to_machine(pfn, mfn);

	return 0;
}