static void xen_clts(void) { struct multicall_space mcs; mcs = xen_mc_entry(0); MULTI_fpu_taskswitch(mcs.mc, 0); xen_mc_issue(PARAVIRT_LAZY_CPU); }
static void xen_write_cr0(unsigned long cr0) { struct multicall_space mcs; /* Only pay attention to cr0.TS; everything else is ignored. */ mcs = xen_mc_entry(0); MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0); xen_mc_issue(PARAVIRT_LAZY_CPU); }
static void xen_write_cr0(unsigned long cr0) { struct multicall_space mcs; this_cpu_write(xen_cr0_value, cr0); mcs = xen_mc_entry(0); MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0); xen_mc_issue(PARAVIRT_LAZY_CPU); }
/* Add an MFN override for a particular page */ static int m2p_add_override(unsigned long mfn, struct page *page, struct gnttab_map_grant_ref *kmap_op) { unsigned long flags; unsigned long pfn; unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; pfn = page_to_pfn(page); if (!PageHighMem(page)) { address = (unsigned long)__va(pfn << PAGE_SHIFT); ptep = lookup_address(address, &level); if (WARN(ptep == NULL || level != PG_LEVEL_4K, "m2p_add_override: pfn %lx not mapped", pfn)) return -EINVAL; } if (kmap_op != NULL) { if (!PageHighMem(page)) { struct multicall_space mcs = xen_mc_entry(sizeof(*kmap_op)); MULTI_grant_table_op(mcs.mc, GNTTABOP_map_grant_ref, kmap_op, 1); xen_mc_issue(PARAVIRT_LAZY_MMU); } } spin_lock_irqsave(&m2p_override_lock, flags); list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); spin_unlock_irqrestore(&m2p_override_lock, flags); /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other * pfn so that the following mfn_to_pfn(mfn) calls will return the * pfn from the m2p_override (the backend pfn) instead. * We need to do this because the pages shared by the frontend * (xen-blkfront) can be already locked (lock_page, called by * do_read_cache_page); when the userspace backend tries to use them * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so * do_blockdev_direct_IO is going to try to lock the same pages * again resulting in a deadlock. * As a side effect get_user_pages_fast might not be safe on the * frontend pages while they are being shared with the backend, * because mfn_to_pfn (that ends up being called by GUPF) will * return the backend pfn rather than the frontend pfn. */ pfn = mfn_to_pfn_no_overrides(mfn); if (__pfn_to_mfn(pfn) == mfn) set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); return 0; }
static void xen_set_ldt(const void *addr, unsigned entries) { struct mmuext_op *op; struct multicall_space mcs = xen_mc_entry(sizeof(*op)); op = mcs.args; op->cmd = MMUEXT_SET_LDT; op->arg1.linear_addr = (unsigned long)addr; op->arg2.nr_ents = entries; MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); xen_mc_issue(PARAVIRT_LAZY_CPU); }
void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { struct mmu_update u; trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); xen_mc_batch(); u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; u.val = pte_val_ma(pte); xen_extend_mmu_update(&u); xen_mc_issue(PARAVIRT_LAZY_MMU); }
void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { struct mmu_update u; xen_mc_batch(); u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; u.val = pte_val_ma(pte); xen_extend_mmu_update(&u); ADD_STATS(prot_commit, 1); ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); xen_mc_issue(PARAVIRT_LAZY_MMU); }
static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) { struct mmu_update u; preempt_disable(); xen_mc_batch(); /* ptr may be ioremapped for 64-bit pagetable setup */ u.ptr = arbitrary_virt_to_machine(ptr).maddr; u.val = pmd_val_ma(val); xen_extend_mmu_update(&u); xen_mc_issue(PARAVIRT_LAZY_MMU); preempt_enable(); }
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { if (mm == current->mm || mm == &init_mm) { if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) { struct multicall_space mcs; mcs = xen_mc_entry(0); MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); xen_mc_issue(PARAVIRT_LAZY_MMU); return; } else if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0) return; } xen_set_pte(ptep, pteval); }
static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) { struct mmu_update u; if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) return false; xen_mc_batch(); u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; u.val = pte_val_ma(pteval); xen_extend_mmu_update(&u); xen_mc_issue(PARAVIRT_LAZY_MMU); return true; }
void xen_set_pmd(pmd_t *ptr, pmd_t val) { struct multicall_space mcs; struct mmu_update *u; preempt_disable(); mcs = xen_mc_entry(sizeof(*u)); u = mcs.args; u->ptr = virt_to_machine(ptr).maddr; u->val = pmd_val_ma(val); MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF); xen_mc_issue(PARAVIRT_LAZY_MMU); preempt_enable(); }
void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) { struct multicall_space mcs; struct mmu_update *u; trace_xen_mmu_set_domain_pte(ptep, pteval, domid); mcs = xen_mc_entry(sizeof(*u)); u = mcs.args; /* ptep might be kmapped when using 32-bit HIGHPTE */ u->ptr = virt_to_machine(ptep).maddr; u->val = pte_val_ma(pteval); MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); xen_mc_issue(PARAVIRT_LAZY_MMU); }
static void xen_load_tls(struct thread_struct *t, unsigned int cpu) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { #ifdef CONFIG_X86_32 lazy_load_gs(0); #else loadsegment(fs, 0); #endif } xen_mc_batch(); load_TLS_descriptor(t, cpu, 0); load_TLS_descriptor(t, cpu, 1); load_TLS_descriptor(t, cpu, 2); xen_mc_issue(PARAVIRT_LAZY_CPU); }
/* Release a pagetables pages back as normal RW */ static void xen_pgd_unpin(pgd_t *pgd) { struct mmuext_op *op; struct multicall_space mcs; xen_mc_batch(); mcs = __xen_mc_entry(sizeof(*op)); op = mcs.args; op->cmd = MMUEXT_UNPIN_TABLE; op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd))); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); pgd_walk(pgd, unpin_page, TASK_SIZE); xen_mc_issue(0); }
static void xen_load_tls(struct thread_struct *t, unsigned int cpu) { /* * XXX sleazy hack: If we're being called in a lazy-cpu zone * and lazy gs handling is enabled, it means we're in a * context switch, and %gs has just been saved. This means we * can zero it out to prevent faults on exit from the * hypervisor if the next process has no %gs. Either way, it * has been saved, and the new value will get loaded properly. * This will go away as soon as Xen has been modified to not * save/restore %gs for normal hypercalls. * * On x86_64, this hack is not used for %gs, because gs points * to KERNEL_GS_BASE (and uses it for PDA references), so we * must not zero %gs on x86_64 * * For x86_64, we need to zero %fs, otherwise we may get an * exception between the new %fs descriptor being loaded and * %fs being effectively cleared at __switch_to(). */ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { #ifdef CONFIG_X86_32 lazy_load_gs(0); #else loadsegment(fs, 0); #endif } xen_mc_batch(); load_TLS_descriptor(t, cpu, 0); load_TLS_descriptor(t, cpu, 1); load_TLS_descriptor(t, cpu, 2); xen_mc_issue(PARAVIRT_LAZY_CPU); }
static int m2p_remove_override(struct page *page, struct gnttab_map_grant_ref *kmap_op, unsigned long mfn) { unsigned long flags; unsigned long pfn; unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; pfn = page_to_pfn(page); if (!PageHighMem(page)) { address = (unsigned long)__va(pfn << PAGE_SHIFT); ptep = lookup_address(address, &level); if (WARN(ptep == NULL || level != PG_LEVEL_4K, "m2p_remove_override: pfn %lx not mapped", pfn)) return -EINVAL; } spin_lock_irqsave(&m2p_override_lock, flags); list_del(&page->lru); spin_unlock_irqrestore(&m2p_override_lock, flags); if (kmap_op != NULL) { if (!PageHighMem(page)) { struct multicall_space mcs; struct gnttab_unmap_and_replace *unmap_op; struct page *scratch_page = get_balloon_scratch_page(); unsigned long scratch_page_address = (unsigned long) __va(page_to_pfn(scratch_page) << PAGE_SHIFT); /* * It might be that we queued all the m2p grant table * hypercalls in a multicall, then m2p_remove_override * get called before the multicall has actually been * issued. In this case handle is going to -1 because * it hasn't been modified yet. */ if (kmap_op->handle == -1) xen_mc_flush(); /* * Now if kmap_op->handle is negative it means that the * hypercall actually returned an error. */ if (kmap_op->handle == GNTST_general_error) { pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings", pfn, mfn); put_balloon_scratch_page(); return -1; } xen_mc_batch(); mcs = __xen_mc_entry( sizeof(struct gnttab_unmap_and_replace)); unmap_op = mcs.args; unmap_op->host_addr = kmap_op->host_addr; unmap_op->new_addr = scratch_page_address; unmap_op->handle = kmap_op->handle; MULTI_grant_table_op(mcs.mc, GNTTABOP_unmap_and_replace, unmap_op, 1); mcs = __xen_mc_entry(0); MULTI_update_va_mapping(mcs.mc, scratch_page_address, pfn_pte(page_to_pfn(scratch_page), PAGE_KERNEL_RO), 0); xen_mc_issue(PARAVIRT_LAZY_MMU); kmap_op->host_addr = 0; put_balloon_scratch_page(); } } /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present * somewhere in this domain, even before being added to the * m2p_override (see comment above in m2p_add_override). * If there are no other entries in the m2p_override corresponding * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for * the original pfn (the one shared by the frontend): the backend * cannot do any IO on this page anymore because it has been * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of * the original pfn causes mfn_to_pfn(mfn) to return the frontend * pfn again. */ mfn &= ~FOREIGN_FRAME_BIT; pfn = mfn_to_pfn_no_overrides(mfn); if (__pfn_to_mfn(pfn) == FOREIGN_FRAME(mfn) && m2p_find_override(mfn) == NULL) set_phys_to_machine(pfn, mfn); return 0; }
int m2p_remove_override(struct page *page, bool clear_pte) { unsigned long flags; unsigned long mfn; unsigned long pfn; unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; int ret = 0; pfn = page_to_pfn(page); mfn = get_phys_to_machine(pfn); if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) return -EINVAL; if (!PageHighMem(page)) { address = (unsigned long)__va(pfn << PAGE_SHIFT); ptep = lookup_address(address, &level); if (WARN(ptep == NULL || level != PG_LEVEL_4K, "m2p_remove_override: pfn %lx not mapped", pfn)) return -EINVAL; } spin_lock_irqsave(&m2p_override_lock, flags); list_del(&page->lru); spin_unlock_irqrestore(&m2p_override_lock, flags); WARN_ON(!PagePrivate(page)); ClearPagePrivate(page); if (clear_pte) { struct gnttab_map_grant_ref *map_op = (struct gnttab_map_grant_ref *) page->index; set_phys_to_machine(pfn, map_op->dev_bus_addr); if (!PageHighMem(page)) { struct multicall_space mcs; struct gnttab_unmap_grant_ref *unmap_op; /* * It might be that we queued all the m2p grant table * hypercalls in a multicall, then m2p_remove_override * get called before the multicall has actually been * issued. In this case handle is going to -1 because * it hasn't been modified yet. */ if (map_op->handle == -1) xen_mc_flush(); /* * Now if map_op->handle is negative it means that the * hypercall actually returned an error. */ if (map_op->handle == GNTST_general_error) { printk(KERN_WARNING "m2p_remove_override: " "pfn %lx mfn %lx, failed to modify kernel mappings", pfn, mfn); return -1; } mcs = xen_mc_entry( sizeof(struct gnttab_unmap_grant_ref)); unmap_op = mcs.args; unmap_op->host_addr = map_op->host_addr; unmap_op->handle = map_op->handle; unmap_op->dev_bus_addr = 0; MULTI_grant_table_op(mcs.mc, GNTTABOP_unmap_grant_ref, unmap_op, 1); xen_mc_issue(PARAVIRT_LAZY_MMU); set_pte_at(&init_mm, address, ptep, pfn_pte(pfn, PAGE_KERNEL)); __flush_tlb_single(address); map_op->host_addr = 0; } } else set_phys_to_machine(pfn, page->index); /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present * somewhere in this domain, even before being added to the * m2p_override (see comment above in m2p_add_override). * If there are no other entries in the m2p_override corresponding * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for * the original pfn (the one shared by the frontend): the backend * cannot do any IO on this page anymore because it has been * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of * the original pfn causes mfn_to_pfn(mfn) to return the frontend * pfn again. */ mfn &= ~FOREIGN_FRAME_BIT; ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && m2p_find_override(mfn) == NULL) set_phys_to_machine(pfn, mfn); return 0; }