static void xen_extend_mmu_update(const struct mmu_update *update) { struct multicall_space mcs; struct mmu_update *u; mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); if (mcs.mc != NULL) { ADD_STATS(mmu_update_extended, 1); ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1); mcs.mc->args[1]++; if (mcs.mc->args[1] < MMU_UPDATE_HISTO) ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1); else ADD_STATS(mmu_update_histo[0], 1); } else { ADD_STATS(mmu_update, 1); mcs = __xen_mc_entry(sizeof(*u)); MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); ADD_STATS(mmu_update_histo[1], 1); } u = mcs.args; *u = *update; }
static void xen_extend_mmu_update(const struct mmu_update *update) { struct multicall_space mcs; struct mmu_update *u; mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); if (mcs.mc != NULL) { mcs.mc->args[1]++; } else { mcs = __xen_mc_entry(sizeof(*u)); MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); } u = mcs.args; *u = *update; }
void xen_set_pmd(pmd_t *ptr, pmd_t val) { struct multicall_space mcs; struct mmu_update *u; preempt_disable(); mcs = xen_mc_entry(sizeof(*u)); u = mcs.args; u->ptr = virt_to_machine(ptr).maddr; u->val = pmd_val_ma(val); MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF); xen_mc_issue(PARAVIRT_LAZY_MMU); preempt_enable(); }
void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) { struct multicall_space mcs; struct mmu_update *u; trace_xen_mmu_set_domain_pte(ptep, pteval, domid); mcs = xen_mc_entry(sizeof(*u)); u = mcs.args; /* ptep might be kmapped when using 32-bit HIGHPTE */ u->ptr = virt_to_machine(ptep).maddr; u->val = pte_val_ma(pteval); MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); xen_mc_issue(PARAVIRT_LAZY_MMU); }