static void vmx_vmexit_show_gp_event() { int_desc_t *idt; pg_wlk_t wlk; if(!vm_exit_info.int_err_code.sl.idt ||_xx_lmode()) return; idt = (int_desc_t*)(vm_state.idtr.base.raw & 0xffffffff); if(!__paging()) goto __show_gp; else if(vm_full_walk((offset_t)idt, &wlk)) { idt = (int_desc_t*)wlk.addr; goto __show_gp; } printf("#GP related to IDT and IDT is no mapped\n"); return; __show_gp: printf("#GP related to IDT entry 0x%x [0x%X]\n" ,vm_exit_info.int_err_code.sl.idx ,idt[vm_exit_info.int_err_code.sl.idx].raw); }
static void gdb_vmm_translate(uint8_t *data, size_t len) { offset_t vaddr, paddr; size_t psz, sz; if(!gdb_get_number(data, len, (uint64_t*)&vaddr, 0)) { gdb_nak(); return; } debug(GDBSTUB_CMD, "translating 0x%X\n", vaddr); if(!__paging()) paddr = vaddr; else if(!__pg_walk(info->vmm.ctrl.active_cr3, vaddr, &paddr, &psz, 1)) { debug(GDBSTUB, "memory translation failure\n"); gdb_err_mem(); return; } debug(GDBSTUB_CMD, "sending 0x%X\n", paddr); if(cpu_addr_sz() == 64) sz = sizeof(uint64_t)*2; else /* XXX: gdb seems to wait for 32 bits regs at least */ sz = sizeof(uint32_t)*2; gdb_add_number(paddr, sz, 0); gdb_send_packet(); }
int __vm_access_mem(vm_access_t *access) { offset_t vaddr, nxt; size_t psz, len; if(!access->len) return 1; if(!__paging()) return access->operator(access); vaddr = access->addr; len = access->len; while(len) { if(!__pg_walk(access->cr3, vaddr, &access->addr, &psz, 1)) { debug(VM_ACCESS, "#PF on vm access 0x%X sz 0x%X\n", vaddr, len); return 0; } nxt = __align_next(vaddr, psz); access->len = min(len, (nxt - vaddr)); if(!access->operator(access)) return 0; len -= access->len; vaddr = nxt; } return 1; }
/* ** Resolve guest virtual into guest physical */ int vm_pg_walk(offset_t vaddr, offset_t *paddr, size_t *psz) { if(!__paging()) { debug(VM, "walk while paging disabled !\n"); return 0; } return __pg_walk(&__cr3, vaddr, paddr, psz, 1); }