void copy_highpage(struct page *to, struct page *from) { void *vfrom, *vto; if (likely(xen_feature(XENFEAT_highmem_assist)) && (PageHighMem(from) || PageHighMem(to))) { unsigned long from_pfn = page_to_pfn(from); unsigned long to_pfn = page_to_pfn(to); struct mmuext_op meo; meo.cmd = MMUEXT_COPY_PAGE; meo.arg1.mfn = pfn_to_mfn(to_pfn); meo.arg2.src_mfn = pfn_to_mfn(from_pfn); if (mfn_to_pfn(meo.arg2.src_mfn) == from_pfn && mfn_to_pfn(meo.arg1.mfn) == to_pfn && HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0) return; } vfrom = kmap_atomic(from, KM_USER0); vto = kmap_atomic(to, KM_USER1); copy_page(vto, vfrom); kunmap_atomic(vfrom, KM_USER0); kunmap_atomic(vto, KM_USER1); }
void xen_invlpg_all(unsigned long ptr) { struct mmuext_op op; op.cmd = MMUEXT_INVLPG_ALL; op.arg1.linear_addr = ptr & PAGE_MASK; BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); }
void xen_pgd_unpin(unsigned long ptr) { struct mmuext_op op; op.cmd = MMUEXT_UNPIN_TABLE; op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); }
void xen_new_user_pt(unsigned long ptr) { struct mmuext_op op; op.cmd = MMUEXT_NEW_USER_BASEPTR; op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); }
void xen_set_ldt(unsigned long ptr, unsigned long len) { struct mmuext_op op; op.cmd = MMUEXT_SET_LDT; op.arg1.linear_addr = ptr; op.arg2.nr_ents = len; BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); }
void xen_tlb_flush_mask(cpumask_t *mask) { struct mmuext_op op; if ( cpus_empty(*mask) ) return; op.cmd = MMUEXT_TLB_FLUSH_MULTI; op.arg2.vcpumask = mask->bits; BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); }
void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr) { struct mmuext_op op; if ( cpus_empty(*mask) ) return; op.cmd = MMUEXT_INVLPG_MULTI; op.arg1.linear_addr = ptr & PAGE_MASK; op.arg2.vcpumask = mask->bits; BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); }
void xpq_queue_invlpg(vaddr_t va) { struct mmuext_op op; xpq_flush_queue(); XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va)); op.cmd = MMUEXT_INVLPG_LOCAL; op.arg1.linear_addr = (va & ~PAGE_MASK); if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) panic("xpq_queue_invlpg"); }
/* Zero a physical page */ void xen_pagezero(paddr_t pa) { mmuext_op_t op; op.cmd = MMUEXT_CLEAR_PAGE; op.arg1.mfn = xpmap_ptom(pa) >> PAGE_SHIFT; if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { panic(__func__); } }
void xpq_queue_tlb_flush(void) { struct mmuext_op op; xpq_flush_queue(); XENPRINTK2(("xpq_queue_tlb_flush\n")); op.cmd = MMUEXT_TLB_FLUSH_LOCAL; if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) panic("xpq_queue_tlb_flush"); }
void xpq_queue_unpin_table(paddr_t pa) { struct mmuext_op op; xpq_flush_queue(); XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa)); op.arg1.mfn = pa >> PAGE_SHIFT; op.cmd = MMUEXT_UNPIN_TABLE; if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) panic("xpq_queue_unpin_table"); }
void xpq_queue_pt_switch(paddr_t pa) { struct mmuext_op op; xpq_flush_queue(); XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n", (int64_t)pa, (int64_t)pa)); op.cmd = MMUEXT_NEW_BASEPTR; op.arg1.mfn = pa >> PAGE_SHIFT; if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) panic("xpq_queue_pt_switch"); }
/* Copy a page */ void xen_copy_page(paddr_t srcpa, paddr_t dstpa) { mmuext_op_t op; op.cmd = MMUEXT_COPY_PAGE; op.arg1.mfn = xpmap_ptom(dstpa) >> PAGE_SHIFT; op.arg2.src_mfn = xpmap_ptom(srcpa) >> PAGE_SHIFT; if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { panic(__func__); } }
void xen_pgd_pin(unsigned long ptr) { struct mmuext_op op; #ifdef CONFIG_X86_64 op.cmd = MMUEXT_PIN_L4_TABLE; #elif defined(CONFIG_X86_PAE) op.cmd = MMUEXT_PIN_L3_TABLE; #else op.cmd = MMUEXT_PIN_L2_TABLE; #endif op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); }
void xpq_queue_set_ldt(vaddr_t va, uint32_t entries) { struct mmuext_op op; xpq_flush_queue(); XENPRINTK2(("xpq_queue_set_ldt\n")); KASSERT(va == (va & ~PAGE_MASK)); op.cmd = MMUEXT_SET_LDT; op.arg1.linear_addr = va; op.arg2.nr_ents = entries; if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) panic("xpq_queue_set_ldt"); }
void xen_set_ldt(user_desc_t *ldt, uint_t nsels) { struct mmuext_op op; long err; op.cmd = MMUEXT_SET_LDT; op.arg1.linear_addr = (uintptr_t)ldt; op.arg2.nr_ents = nsels; if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) != 0) { panic("xen_set_ldt(%p, %d): error %d", (void *)ldt, nsels, -(int)err); } }
void xpq_queue_pin_table(paddr_t pa, int lvl) { struct mmuext_op op; xpq_flush_queue(); XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n", lvl + 1, pa)); op.arg1.mfn = pa >> PAGE_SHIFT; op.cmd = lvl; if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) panic("xpq_queue_pin_table"); }
/* This is a synchronous call. */ void xen_bcast_tlbflush(void) { mmuext_op_t op; /* Flush pending page updates */ xpq_flush_queue(); op.cmd = MMUEXT_TLB_FLUSH_ALL; if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { panic("xpq_queue_invlpg_all"); } return; }
void xen_bcast_invlpg(vaddr_t va) { mmuext_op_t op; /* Flush pending page updates */ xpq_flush_queue(); op.cmd = MMUEXT_INVLPG_ALL; op.arg1.linear_addr = va; if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { panic("xpq_queue_invlpg_all"); } return; }
void clear_highpage(struct page *page) { void *kaddr; if (likely(xen_feature(XENFEAT_highmem_assist)) && PageHighMem(page)) { struct mmuext_op meo; meo.cmd = MMUEXT_CLEAR_PAGE; meo.arg1.mfn = pfn_to_mfn(page_to_pfn(page)); if (HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0) return; } kaddr = kmap_atomic(page, KM_USER0); clear_page(kaddr); kunmap_atomic(kaddr, KM_USER0); }
/* This is a synchronous call. */ void xen_mcast_tlbflush(kcpuset_t *kc) { xcpumask_t xcpumask; mmuext_op_t op; kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask)); /* Flush pending page updates */ xpq_flush_queue(); op.cmd = MMUEXT_TLB_FLUSH_MULTI; op.arg2.vcpumask = &xcpumask.xcpum_xm; if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { panic("xpq_queue_invlpg_all"); } return; }
static int privcmd_HYPERVISOR_mmuext_op(struct mmuext_op *op, int count, uint_t *scount, domid_t domid) { int error, bytes; uint_t kscount; struct mmuext_op *kop, single_kop; import_export_t op_ie, scnt_ie; op_ie = scnt_ie = null_ie; error = 0; if (count >= 1) { bytes = count * sizeof (*kop); kop = (count == 1) ? &single_kop : kmem_alloc(bytes, KM_SLEEP); error = import_buffer(&op_ie, op, kop, bytes, IE_IMPORT); } DTRACE_XPV2(mmu__ext__op__start, int, count, struct mmuext_op *, ((error == -X_EFAULT) ? op : kop)); if (scount != NULL && error == 0) error = import_buffer(&scnt_ie, scount, &kscount, sizeof (kscount), IE_EXPORT); if (error == 0) error = HYPERVISOR_mmuext_op(kop, count, &kscount, domid); export_buffer(&op_ie, &error); export_buffer(&scnt_ie, &error); DTRACE_XPV1(mmu__ext__op__end, int, error); if (count > 1) kmem_free(kop, bytes); return (error); }
void my_xen_pgd_pin(unsigned long ptr) { if(aim!=xen_pgd_pin_id || signal==0) jprobe_return(); if(leave==1) {HYPERVISOR_mmuext_op(&mmuextop, 1, NULL, DOMID_SELF);leave=0;} if(time>0) --time; else {signal=0;printk("Done.\n");jprobe_return();} printk("Fortune: xen_pgd_pin from %s\n",current->comm); #ifdef CONFIG_X86_64 mmuextop.cmd = MMUEXT_PIN_L4_TABLE; #elif defined(CONFIG_X86_PAE) mmuextop.cmd = MMUEXT_PIN_L3_TABLE; #else mmuextop.cmd = MMUEXT_PIN_L2_TABLE; #endif mmuextop.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); printk("Fortune: cmd=0x%x\tmfn=0x%lx\n",mmuextop.cmd,mmuextop.arg1.mfn); if(fault==0) { getrando(3); if(rando==0) mmuextop.cmd=MMUEXT_PIN_L4_TABLE; if(rando==1) mmuextop.cmd=MMUEXT_PIN_L3_TABLE; if(rando==2) mmuextop.cmd=MMUEXT_PIN_L2_TABLE; printk("Fortune: change cmd to 0x%x\n",mmuextop.cmd); leave=1; } if(fault==1) { getrando(32); mmuextop.arg1.mfn ^= (1 << rando); printk("Fortune: change mfn to 0x%lx\n",mmuextop.arg1.mfn); leave=1; } jprobe_return(); }
void xen_tlb_flush_all(void) { struct mmuext_op op; op.cmd = MMUEXT_TLB_FLUSH_ALL; BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); }