static asmlinkage void memdump_gvirt (void *data) { struct memdump_gvirt_data *dd; struct memdump_data *d; int i; u64 ent[5]; int levels; u64 physaddr; dd = data; d = dd->d; for (i = 0; i < dd->sendlen; i++) { if (cpu_mmu_get_pte (d->virtaddr + i, d->cr0, d->cr3, d->cr4, d->efer, false, false, false, ent, &levels) == VMMERR_SUCCESS) { physaddr = (ent[0] & PTE_ADDR_MASK64) | ((d->virtaddr + i) & 0xFFF); } else { snprintf (dd->errbuf, dd->errlen, "get_pte failed (virt=0x%lX)", d->virtaddr + i); break; } read_gphys_b (physaddr, &dd->q[i], 0); } }
int memdump_gvirt(struct memdump_data * dumpdata, void * value){ long gvirt = 0xffffffff81c0e000ULL - (0xffffffff81000000ULL - 0x0000000001000000ULL); struct memdump_data * data = dumpdata; int i, r; u64 ent[5]; int levels; u64 physaddr; int width = 8; char str[width]; memset(ent, 0, sizeof(ent)); get_control_regs((ulong *)&data->cr0, (ulong *)&data->cr3, (ulong *)&data->cr4, &data->efer); for (i = 0; i < data->sendlen; i += width) { r = cpu_mmu_get_pte(data->virtaddr+i, (ulong)data->cr0, (ulong)data->cr3, (ulong)data->cr4, data->efer, true, false, false, ent, &levels); if (r == VMMERR_SUCCESS) { physaddr = (ent[0] & PTE_ADDR_MASK64) | ((data->virtaddr+i) & 0xFFF); read_hphys_q(physaddr, str, 0); memcpy(value+i, str, width); printhex(str, width, physaddr); memset(str, 0, width); } else{ printf("error: r = %d\n", r); break; } } return r; }
void convert_gvirt_to_gphys(long gvirt, long * gphys){ struct memdump_data data; memset(&data, 0, sizeof(struct memdump_data)); data.virtaddr = gvirt; int i, r; u64 ent[5]; int levels; u64 physaddr; memset(ent, 0, sizeof(ent)); get_control_regs((ulong *)&data.cr0, (ulong *)&data.cr3, (ulong *)&data.cr4, &data.efer); r = cpu_mmu_get_pte(data.virtaddr, (ulong)data.cr0, (ulong)data.cr3, (ulong)data.cr4, data.efer, true, false, false, ent, &levels); if (r == 0) { physaddr = (ent[0] & PTE_ADDR_MASK64) | ((data.virtaddr) & 0xFFF); *gphys = physaddr; } else{ printf("error!: r = %d\n", r); } }
char * read_str_va(u64 vaddr) { char * ret_val = NULL; char * ret_val1 = NULL; ret_val = (char *)alloc(24 * sizeof(char)); ret_val1 = (char *)alloc(24 * sizeof(char)); u64 x= 0x0ULL, y=0x0ULL, z=0x0ULL; struct memdump_data data; u64 ent[5]; int i, r, levels; memset(ent, 0, sizeof(ent)); memset(&data, 0, sizeof(struct memdump_data)); data.virtaddr = vaddr; get_control_regs((ulong *)&data.cr0, (ulong *)&data.cr3, (ulong *)&data.cr4, &data.efer); r = cpu_mmu_get_pte(data.virtaddr, (ulong)data.cr0, (ulong)data.cr3, (ulong)data.cr4, data.efer, false, false, false, ent, &levels); // printf("[%s r=%d 0x%016llx]\n", __func__, r, vaddr); int c=0; // parport_pcpc if (r == VMMERR_SUCCESS) { data.physaddr = (ent[0] & PTE_ADDR_MASK64) | ((data.virtaddr) & 0xFFF); read_hphys_q(data.physaddr, &x, 0); read_hphys_q(data.physaddr+8, &y, 0); read_hphys_q(data.physaddr+16, &z, 0); for(i=0;i<8;i++) { int t=x&0xff; // if (t==0) { // break; // } // printf("%c", t); ret_val[c++] = t; x=x>>8; } //printf("\t\t\t\t%lx\t\t\t\t\t",y); for(i=0;i<8;i++) { int t=y&0xff; // if (t==0) { // break; // } // printf("%c", t); ret_val[c++] = t; y=y>>8; } for(i=0;i<8;i++) { int t=z&0xff; // if (t==0) { // break; // } // printf("%c", t); ret_val[c++] = t; z=z>>8; } }
int virt_memcpy(ulong virtaddr, int nr_bytes, void * value){ struct memdump_data data; u64 ent[5]; int r, levels; memset(ent, 0, sizeof(ent)); memset(&data, 0, sizeof(struct memdump_data)); data.virtaddr = virtaddr; get_control_regs((ulong *)&data.cr0, (ulong *)&data.cr3, (ulong *)&data.cr4, &data.efer); r = cpu_mmu_get_pte(data.virtaddr, (ulong)data.cr0, (ulong)data.cr3, (ulong)data.cr4, data.efer, false, false, false, ent, &levels); if (r == VMMERR_SUCCESS) { data.physaddr = (ent[0] & PTE_ADDR_MASK64) | ((data.virtaddr) & 0xFFF); if(nr_bytes == 4) { read_hphys_l(data.physaddr, value, 0); } if(nr_bytes == 8) { read_hphys_q(data.physaddr, value, 0); } return 0; } return -1; }
/* Microcode updates cannot be loaded in VMX non-root operation on * Intel CPUs. This function loads the updates in VMX root * operation. */ static bool ia32_bios_updt (virt_t addr) { u64 vmm_addr = PAGESIZE | (addr & PAGESIZE_MASK); phys_t phys, mm_phys; struct msrarg m; int num; ulong cr2, lastcr2 = 0, guest_addr; int levels; enum vmmerr r; u64 entries[5]; u64 efer; ulong cr0, cr3, cr4; phys_t hphys, gphys; int in_mmio_range; bool ret = false; m.msrindex = MSR_IA32_BIOS_UPDT_TRIG; m.msrdata = &vmm_addr; current->vmctl.read_control_reg (CONTROL_REG_CR0, &cr0); current->vmctl.read_control_reg (CONTROL_REG_CR3, &cr3); current->vmctl.read_control_reg (CONTROL_REG_CR4, &cr4); current->vmctl.read_msr (MSR_IA32_EFER, &efer); if (0) printf ("CPU%d: old IA32_BIOS_SIGN_ID %016llX\n", get_cpu_id (), get_ia32_bios_sign_id ()); /* Allocate an empty page directory for address 0-0x3FFFFFFF * and switch to it. */ if (mm_process_alloc (&phys) < 0) panic ("%s: mm_process_alloc failed", __func__); mm_phys = mm_process_switch (phys); for (;;) { /* Do update! */ num = callfunc_and_getint (do_write_msr_sub, &m); if (num == -1) /* Success */ break; if (num == EXCEPTION_GP) { ret = true; break; } if (num != EXCEPTION_PF) panic ("%s: exception %d", __func__, num); /* Handle a page fault. Get the guest physical * address of the page. */ /* FIXME: Set access bit in PTE */ asm_rdcr2 (&cr2); if (lastcr2 == cr2) /* check to avoid infinite loop */ panic ("%s: second page fault at 0x%lX", __func__, cr2); else lastcr2 = cr2; guest_addr = (addr & ~PAGESIZE_MASK) + (cr2 - PAGESIZE); r = cpu_mmu_get_pte (guest_addr, cr0, cr3, cr4, efer, false, false, false, entries, &levels); if (r == VMMERR_PAGE_NOT_PRESENT) { ret = true; if (0) printf ("%s: guest page fault at 0x%lX\n", __func__, guest_addr); current->vmctl.generate_pagefault (0, guest_addr); break; } if (r != VMMERR_SUCCESS) panic ("%s: cpu_mmu_get_pte failed %d", __func__, r); gphys = entries[0] & current->pte_addr_mask; /* Find MMIO hooks. */ mmio_lock (); in_mmio_range = mmio_access_page (gphys, false); mmio_unlock (); if (in_mmio_range) panic ("%s: mmio check failed cr2=0x%lX ent=0x%llX", __func__, cr2, entries[0]); /* Convert the address and map it. */ /* FIXME: Handle cache flags in the PTE. */ hphys = current->gmm.gp2hp (gphys, NULL); ASSERT (!(hphys & PAGESIZE_MASK)); if (mm_process_map_shared_physpage (cr2, hphys, false)) panic ("%s: mm_process_map_shared_physpage failed" " cr2=0x%lX guest=0x%lX ent=0x%llX", __func__, cr2, guest_addr, entries[0]); if (0) printf ("%s: cr2=0x%lX guest=0x%lX ent=0x%llX\n", __func__, cr2, guest_addr, entries[0]); } /* Free page tables, switch to previous address space and free * the page directory. */ mm_process_unmapall (); mm_process_switch (mm_phys); mm_process_free (phys); if (0) printf ("CPU%d: new IA32_BIOS_SIGN_ID %016llX\n", get_cpu_id (), get_ia32_bios_sign_id ()); return ret; }