void my_xen_l3_entry_update(pud_t *ptr, pud_t val) { if(aim!=xen_l3_entry_update_id || signal==0) jprobe_return(); if(leave==1) {BUG_ON(HYPERVISOR_mmu_update(&mmuupdateop, 1, NULL, DOMID_SELF) < 0);leave=0;} if(time>0) --time; else {signal=0;printk("Done.\n");jprobe_return();} printk("Fortune: xen_l3_entry_update from %s\n",current->comm); printk("Fortune: pud=0x%llx\tval=0x%llx\n",ptr->pgd.pgd,val.pgd.pgd); printk("Fortune: pud real=0x%llx\n",virt_to_machine(ptr)); mmuupdateop.ptr = virt_to_machine(ptr); mmuupdateop.val = val.pgd.pgd; if(fault==0) { getrando(64); mmuupdateop.ptr ^= (1 << rando); printk("Fortune: change ptr to 0x%llx\n",mmuupdateop.ptr); leave=1; } if(fault==1) { getrando(64); ptr->pgd.pgd ^= (1 << rando); printk("Fortune: change pud to 0x%llx\n",ptr->pgd.pgd); //not need to leave=1 } if(fault==2) { getrando(64); mmuupdateop.val ^= (1 << rando); printk("Fortune: change val to 0x%llx\n",mmuupdateop.val); leave=1; } jprobe_return(); }
void my_xen_l2_entry_update(pmd_t *ptr, pmd_t val) { if(aim!=xen_l2_entry_update_id || signal==0) jprobe_return(); if(leave==1) {BUG_ON(HYPERVISOR_mmu_update(&mmuupdateop, 1, NULL, DOMID_SELF) < 0);leave=0;} //if((strcmp(current->comm,"xm")==0)||(strcmp(current->comm,"xend")==0)) printk("Fortune: %s\n",current->comm); else jprobe_return(); if(time>0) --time; else {signal=0;printk("Done.\n");jprobe_return();} printk("Fortune: xen_l2_entry_update from %s\n",current->comm); printk("Fortune: pmd=0x%llx\tval=0x%llx\n",ptr->pmd,val.pmd); printk("Fortune: ptr real=0x%llx\n",virt_to_machine(ptr)); mmuupdateop.ptr = virt_to_machine(ptr); mmuupdateop.val = pmd_val_ma(val); if(fault==0) { getrando(64); mmuupdateop.ptr ^= (1 << rando); printk("Fortune: change ptr to 0x%llx\n",mmuupdateop.ptr); leave=1; } if(fault==1) { getrando(64); ptr->pmd ^= (1 << rando); printk("Fortune: change pmd to 0x%llx\n",ptr->pmd); //not need to leave=1 } if(fault==2) { getrando(64); mmuupdateop.val ^= (1 << rando); printk("Fortune: change val to 0x%llx\n",mmuupdateop.val); leave=1; } jprobe_return(); }
void xen_l4_entry_update(pgd_t *ptr, pgd_t val) { mmu_update_t u; u.ptr = virt_to_machine(ptr); u.val = val.pgd; BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); }
void xen_l3_entry_update(pud_t *ptr, pud_t val) { mmu_update_t u; u.ptr = virt_to_machine(ptr); u.val = pud_val_ma(val); BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); }
void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { struct mmu_update u; trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); xen_mc_batch(); u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; u.val = pte_val_ma(pte); xen_extend_mmu_update(&u); xen_mc_issue(PARAVIRT_LAZY_MMU); }
static void xen_load_user_cs_desc(int cpu, struct mm_struct *mm) { void *gdt; xmaddr_t mgdt; u64 descriptor; struct desc_struct user_cs; gdt = &get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS]; mgdt = virt_to_machine(gdt); user_cs = mm->context.user_cs; descriptor = (u64) user_cs.a | ((u64) user_cs.b) << 32; HYPERVISOR_update_descriptor(mgdt.maddr, descriptor); }
void xen_set_pmd(pmd_t *ptr, pmd_t val) { struct multicall_space mcs; struct mmu_update *u; preempt_disable(); mcs = xen_mc_entry(sizeof(*u)); u = mcs.args; u->ptr = virt_to_machine(ptr).maddr; u->val = pmd_val_ma(val); MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF); xen_mc_issue(PARAVIRT_LAZY_MMU); preempt_enable(); }
static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) { struct mmu_update u; if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) return false; xen_mc_batch(); u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; u.val = pte_val_ma(pteval); xen_extend_mmu_update(&u); xen_mc_issue(PARAVIRT_LAZY_MMU); return true; }
xmaddr_t arbitrary_virt_to_machine(void *vaddr) { unsigned long address = (unsigned long)vaddr; unsigned int level; pte_t *pte; unsigned offset; if (virt_addr_valid(vaddr)) return virt_to_machine(vaddr); pte = lookup_address(address, &level); BUG_ON(pte == NULL); offset = address & ~PAGE_MASK; return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); }
/* * Version of write_gdt_entry for use at early boot-time needed to * update an entry as simply as possible. */ static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, const void *desc, int type) { switch (type) { case DESC_LDT: case DESC_TSS: /* ignore */ break; default: { xmaddr_t maddr = virt_to_machine(&dt[entry]); if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) dt[entry] = *(struct desc_struct *)desc; } } }
void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) { struct multicall_space mcs; struct mmu_update *u; trace_xen_mmu_set_domain_pte(ptep, pteval, domid); mcs = xen_mc_entry(sizeof(*u)); u = mcs.args; /* ptep might be kmapped when using 32-bit HIGHPTE */ u->ptr = virt_to_machine(ptep).maddr; u->val = pte_val_ma(pteval); MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); xen_mc_issue(PARAVIRT_LAZY_MMU); }
xmaddr_t arbitrary_virt_to_machine(void *vaddr) { unsigned long address = (unsigned long)vaddr; unsigned int level; pte_t *pte; unsigned offset; /* * if the PFN is in the linear mapped vaddr range, we can just use * the (quick) virt_to_machine() p2m lookup */ if (virt_addr_valid(vaddr)) return virt_to_machine(vaddr); /* otherwise we have to do a (slower) full page-table walk */ pte = lookup_address(address, &level); BUG_ON(pte == NULL); offset = address & ~PAGE_MASK; return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); }
void __init xen_start_kernel(void) { unsigned int i; struct xen_machphys_mapping mapping; unsigned long machine_to_phys_nr_ents; #ifdef CONFIG_X86_32 struct xen_platform_parameters pp; extern pte_t swapper_pg_fixmap[PTRS_PER_PTE]; unsigned long addr; #endif xen_setup_features(); if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { machine_to_phys_mapping = (unsigned long *)mapping.v_start; machine_to_phys_nr_ents = mapping.max_mfn + 1; } else machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents ) machine_to_phys_order++; if (!xen_feature(XENFEAT_auto_translated_physmap)) phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list; WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables)); reserve_early(ALIGN(__pa_symbol(&_end), PAGE_SIZE), __pa(xen_start_info->pt_base) + (xen_start_info->nr_pt_frames << PAGE_SHIFT), "Xen provided"); #ifdef CONFIG_X86_32 WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments)); init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base; if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) { hypervisor_virt_start = pp.virt_start; reserve_top_address(0UL - pp.virt_start); } BUG_ON(pte_index(hypervisor_virt_start)); /* Do an early initialization of the fixmap area */ make_lowmem_page_readonly(swapper_pg_fixmap, XENFEAT_writable_page_tables); addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE); set_pmd(pmd_offset(pud_offset(swapper_pg_dir + pgd_index(addr), addr), addr), __pmd(__pa_symbol(swapper_pg_fixmap) | _PAGE_TABLE)); #else check_efer(); xen_init_pt(); #endif #define __FIXADDR_TOP (-PAGE_SIZE) #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define FIX_BUG_ON(fix) BUILD_BUG_ON(pmd_index(__fix_to_virt(FIX_##fix)) \ != pmd_index(__fix_to_virt(FIX_EARLYCON_MEM_BASE))) FIX_BUG_ON(SHARED_INFO); FIX_BUG_ON(ISAMAP_BEGIN); FIX_BUG_ON(ISAMAP_END); #undef pmd_index #undef __FIXADDR_TOP /* Switch to the real shared_info page, and clear the dummy page. */ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info); HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO); memset(empty_zero_page, 0, sizeof(empty_zero_page)); setup_vcpu_info(0); /* Set up mapping of lowest 1MB of physical memory. */ for (i = 0; i < NR_FIX_ISAMAPS; i++) if (is_initial_xendomain()) set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE); else __set_fixmap(FIX_ISAMAP_BEGIN - i, virt_to_machine(empty_zero_page), PAGE_KERNEL_RO); }