示例#1
0
static void lazy_hcall1(unsigned long call, unsigned long arg1)
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
		hcall(call, arg1, 0, 0, 0);
	else
		async_hcall(call, arg1, 0, 0, 0);
}
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
		      struct gnttab_map_grant_ref *kmap_ops,
		      struct page **pages, unsigned int count)
{
	int i, ret;
	bool lazy = false;

	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
	if (ret)
		return ret;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return ret;

	if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
		arch_enter_lazy_mmu_mode();
		lazy = true;
	}

	for (i = 0; i < count; i++) {
		ret = m2p_remove_override(pages[i], kmap_ops ?
				       &kmap_ops[i] : NULL);
		if (ret)
			return ret;
	}

	if (lazy)
		arch_leave_lazy_mmu_mode();

	return ret;
}
示例#3
0
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
		    pte_t *ptep, pte_t pteval)
{
	ADD_STATS(set_pte_at, 1);
//	ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
	ADD_STATS(set_pte_at_current, mm == current->mm);
	ADD_STATS(set_pte_at_kernel, mm == &init_mm);

	if (mm == current->mm || mm == &init_mm) {
		if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
			struct multicall_space mcs;
			mcs = xen_mc_entry(0);

			MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
			ADD_STATS(set_pte_at_batched, 1);
			xen_mc_issue(PARAVIRT_LAZY_MMU);
			goto out;
		} else
			if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
				goto out;
	}
	xen_set_pte(ptep, pteval);

out:	return;
}
示例#4
0
/* You can imagine what lazy_hcall2, 3 and 4 look like. :*/
static void lazy_hcall2(unsigned long call,
		       unsigned long arg1,
		       unsigned long arg2)
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
		kvm_hypercall2(call, arg1, arg2);
	else
		async_hcall(call, arg1, arg2, 0, 0);
}
示例#5
0
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
			    struct gnttab_map_grant_ref *kmap_ops,
			    struct page **pages, unsigned int count)
{
	int i, ret = 0;
	bool lazy = false;
	pte_t *pte;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

	if (kmap_ops &&
	    !in_interrupt() &&
	    paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
		arch_enter_lazy_mmu_mode();
		lazy = true;
	}

	for (i = 0; i < count; i++) {
		unsigned long mfn, pfn;

		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
		}
		pfn = page_to_pfn(pages[i]);

		WARN_ON(PagePrivate(pages[i]));
		SetPagePrivate(pages[i]);
		set_page_private(pages[i], mfn);
		pages[i]->index = pfn_to_mfn(pfn);

		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
			ret = -ENOMEM;
			goto out;
		}

		if (kmap_ops) {
			ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
			if (ret)
				goto out;
		}
	}

out:
	if (lazy)
		arch_leave_lazy_mmu_mode();

	return ret;
}
示例#6
0
void paravirt_flush_lazy_mmu(void)
{
	preempt_disable();

	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
		arch_leave_lazy_mmu_mode();
		arch_enter_lazy_mmu_mode();
	}

	preempt_enable();
}
示例#7
0
static void lazy_hcall4(unsigned long call,
			unsigned long arg1,
			unsigned long arg2,
			unsigned long arg3,
			unsigned long arg4)
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
		hcall(call, arg1, arg2, arg3, arg4);
	else
		async_hcall(call, arg1, arg2, arg3, arg4);
}
示例#8
0
void arch_flush_lazy_cpu_mode(void)
{
	preempt_disable();

	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
		WARN_ON(preempt_count() == 1);
		arch_leave_lazy_cpu_mode();
		arch_enter_lazy_cpu_mode();
	}

	preempt_enable();
}
示例#9
0
static void kvm_deferred_mmu_op(void *buffer, int len)
{
	struct kvm_para_state *state = kvm_para_state();

	if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) {
		kvm_mmu_op(buffer, len);
		return;
	}
	if (state->mmu_queue_len + len > sizeof state->mmu_queue)
		mmu_queue_flush(state);
	memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
	state->mmu_queue_len += len;
}
示例#10
0
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                    struct gnttab_map_grant_ref *kmap_ops,
                    struct page **pages, unsigned int count)
{
    int i, ret;
    bool lazy = false;
    pte_t *pte;
    unsigned long mfn;

    ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
    if (ret)
        return ret;

    /* Retry eagain maps */
    for (i = 0; i < count; i++)
        if (map_ops[i].status == GNTST_eagain)
            gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
                                    &map_ops[i].status, __func__);

    if (xen_feature(XENFEAT_auto_translated_physmap))
        return ret;

    if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
        arch_enter_lazy_mmu_mode();
        lazy = true;
    }

    for (i = 0; i < count; i++) {
        /* Do not add to override if the map failed. */
        if (map_ops[i].status)
            continue;

        if (map_ops[i].flags & GNTMAP_contains_pte) {
            pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
                             (map_ops[i].host_addr & ~PAGE_MASK));
            mfn = pte_mfn(*pte);
        } else {
            mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
        }
        ret = m2p_add_override(mfn, pages[i], kmap_ops ?
                               &kmap_ops[i] : NULL);
        if (ret)
            goto out;
    }

out:
    if (lazy)
        arch_leave_lazy_mmu_mode();

    return ret;
}
示例#11
0
void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
				 pte_t *ptep, pte_t pte)
{
	struct mmu_update u;

	xen_mc_batch();

	u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
	u.val = pte_val_ma(pte);
	xen_extend_mmu_update(&u);

	ADD_STATS(prot_commit, 1);
	ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);

	xen_mc_issue(PARAVIRT_LAZY_MMU);
}
示例#12
0
static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
{
	struct mmu_update u;

	if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
		return false;

	xen_mc_batch();

	u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
	u.val = pte_val_ma(pteval);
	xen_extend_mmu_update(&u);

	xen_mc_issue(PARAVIRT_LAZY_MMU);

	return true;
}
static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
#ifdef CONFIG_X86_32
		lazy_load_gs(0);
#else
		loadsegment(fs, 0);
#endif
	}

	xen_mc_batch();

	load_TLS_descriptor(t, cpu, 0);
	load_TLS_descriptor(t, cpu, 1);
	load_TLS_descriptor(t, cpu, 2);

	xen_mc_issue(PARAVIRT_LAZY_CPU);
}
示例#14
0
void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
{
	struct mmu_update u;

	preempt_disable();

	xen_mc_batch();

	/* ptr may be ioremapped for 64-bit pagetable setup */
	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
	u.val = pmd_val_ma(val);
	xen_extend_mmu_update(&u);

	ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);

	xen_mc_issue(PARAVIRT_LAZY_MMU);

	preempt_enable();
}
示例#15
0
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
			      struct gnttab_map_grant_ref *kmap_ops,
			      struct page **pages, unsigned int count)
{
	int i, ret = 0;
	bool lazy = false;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

	if (kmap_ops &&
	    !in_interrupt() &&
	    paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
		arch_enter_lazy_mmu_mode();
		lazy = true;
	}

	for (i = 0; i < count; i++) {
		unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
		unsigned long pfn = page_to_pfn(pages[i]);

		if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
			ret = -EINVAL;
			goto out;
		}

		set_page_private(pages[i], INVALID_P2M_ENTRY);
		WARN_ON(!PagePrivate(pages[i]));
		ClearPagePrivate(pages[i]);
		set_phys_to_machine(pfn, pages[i]->index);

		if (kmap_ops)
			ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
		if (ret)
			goto out;
	}

out:
	if (lazy)
		arch_leave_lazy_mmu_mode();
	return ret;
}
示例#16
0
static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
{
	/*
	 * XXX sleazy hack: If we're being called in a lazy-cpu zone
	 * and lazy gs handling is enabled, it means we're in a
	 * context switch, and %gs has just been saved.  This means we
	 * can zero it out to prevent faults on exit from the
	 * hypervisor if the next process has no %gs.  Either way, it
	 * has been saved, and the new value will get loaded properly.
	 * This will go away as soon as Xen has been modified to not
	 * save/restore %gs for normal hypercalls.
	 *
	 * On x86_64, this hack is not used for %gs, because gs points
	 * to KERNEL_GS_BASE (and uses it for PDA references), so we
	 * must not zero %gs on x86_64
	 *
	 * For x86_64, we need to zero %fs, otherwise we may get an
	 * exception between the new %fs descriptor being loaded and
	 * %fs being effectively cleared at __switch_to().
	 */
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
#ifdef CONFIG_X86_32
		lazy_load_gs(0);
#else
		loadsegment(fs, 0);
#endif
	}

	xen_mc_batch();

	load_TLS_descriptor(t, cpu, 0);
	load_TLS_descriptor(t, cpu, 1);
	load_TLS_descriptor(t, cpu, 2);

	xen_mc_issue(PARAVIRT_LAZY_CPU);
}
示例#17
0
/* When lazy mode is turned off reset the per-cpu lazy mode variable and then
 * issue the do-nothing hypercall to flush any stored calls. */
static void lguest_leave_lazy_mode(void)
{
	paravirt_leave_lazy(paravirt_get_lazy_mode());
	hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
}
示例#18
0
void xen_leave_lazy(void)
{
	paravirt_leave_lazy(paravirt_get_lazy_mode());
	xen_mc_flush();
}
示例#19
0
文件: kvm.c 项目: AppEngine/linux-2.6
static void kvm_flush_tlb(void)
{
	struct kvm_mmu_op_flush_tlb ftlb = {
		.header.op = KVM_MMU_OP_FLUSH_TLB,
	};

	kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
}

static void kvm_release_pt(unsigned long pfn)
{
	struct kvm_mmu_op_release_pt rpt = {
		.header.op = KVM_MMU_OP_RELEASE_PT,
		.pt_phys = (u64)pfn << PAGE_SHIFT,
	};

	kvm_mmu_op(&rpt, sizeof rpt);
}

static void kvm_enter_lazy_mmu(void)
{
	struct kvm_para_state *state = kvm_para_state();

	paravirt_enter_lazy_mmu();
	state->mode = paravirt_get_lazy_mode();
}

static void kvm_leave_lazy_mmu(void)
{
	struct kvm_para_state *state = kvm_para_state();

	mmu_queue_flush(state);
	paravirt_leave_lazy_mmu();
	state->mode = paravirt_get_lazy_mode();
}

static void __init paravirt_ops_setup(void)
{
	pv_info.name = "KVM";
	pv_info.paravirt_enabled = 1;

	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
		pv_cpu_ops.io_delay = kvm_io_delay;

	if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
		pv_mmu_ops.set_pte = kvm_set_pte;
		pv_mmu_ops.set_pte_at = kvm_set_pte_at;
		pv_mmu_ops.set_pmd = kvm_set_pmd;
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
		pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
		pv_mmu_ops.pte_clear = kvm_pte_clear;
		pv_mmu_ops.pmd_clear = kvm_pmd_clear;
#endif
		pv_mmu_ops.set_pud = kvm_set_pud;
#if PAGETABLE_LEVELS == 4
		pv_mmu_ops.set_pgd = kvm_set_pgd;
#endif
#endif
		pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
		pv_mmu_ops.release_pte = kvm_release_pt;
		pv_mmu_ops.release_pmd = kvm_release_pt;
		pv_mmu_ops.release_pud = kvm_release_pt;

		pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
		pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
	}
#ifdef CONFIG_X86_IO_APIC
	no_timer_check = 1;
#endif
}

void __init kvm_guest_init(void)
{
	if (!kvm_para_available())
		return;

	paravirt_ops_setup();
}
示例#20
0
/* When lazy mode is turned off reset the per-cpu lazy mode variable and then
 * issue the do-nothing hypercall to flush any stored calls. */
static void lguest_leave_lazy_mode(void)
{
	paravirt_leave_lazy(paravirt_get_lazy_mode());
	kvm_hypercall0(LHCALL_FLUSH_ASYNC);
}