static void vmx_vmexit_show_gp_event() { int_desc_t *idt; pg_wlk_t wlk; if(!vm_exit_info.int_err_code.sl.idt ||_xx_lmode()) return; idt = (int_desc_t*)(vm_state.idtr.base.raw & 0xffffffff); if(!__paging()) goto __show_gp; else if(vm_full_walk((offset_t)idt, &wlk)) { idt = (int_desc_t*)wlk.addr; goto __show_gp; } printf("#GP related to IDT and IDT is no mapped\n"); return; __show_gp: printf("#GP related to IDT entry 0x%x [0x%X]\n" ,vm_exit_info.int_err_code.sl.idx ,idt[vm_exit_info.int_err_code.sl.idx].raw); }
/* ** Page walking service ** ** . we use VMM cpu skillz as we depend ** upon system mmu and not nested one ** ** . we support : ** - long/compatibility mode ** - legacy protected mode + paging + pae ** - legacy protected mode + paging */ int __pg_walk(cr3_reg_t *cr3, offset_t vaddr, pg_wlk_t *wlk) { debug(PG_WLK, "cr3 0x%X\n", cr3->raw); if(_xx_lmode()) return __pg_walk_lmode(cr3, vaddr, wlk); if(__cr4.pae) return __pg_walk_pmode_pae(cr3, vaddr, wlk); return __pg_walk_pmode(cr3, vaddr, wlk); }
/* ** Page walking services ** ** . we use VMM cpu skillz as we depend ** upon system mmu and not nested one ** ** . we support : ** - long/compatibility mode ** - legacy protected mode + paging + pae ** - legacy protected mode + paging */ int __pg_walk(cr3_reg_t *cr3, offset_t vaddr, offset_t *paddr, size_t *psz, int chk) { debug(PG_W, "cr3 0x%X\n", cr3->raw); if(_xx_lmode()) return pg_walk_lmode(cr3, vaddr, paddr, psz, chk); if(__cr4.pae) return pg_walk_pmode_pae(cr3, vaddr, paddr, psz, chk); return pg_walk_pmode(cr3, vaddr, paddr, psz, chk); }