static u32 mips_vcpu_map_guest_to_host(struct vmm_vcpu *vcpu, mips32_tlb_entry_t *gtlbe) { struct vmm_guest *guest = NULL; struct vmm_region *guest_region = NULL; physical_addr_t gphys_addr = 0, hphys_addr = 0, gphys_addr2map = 0, gphys_offset = 0; physical_addr_t hphys_addr2map = 0; int do_map = 0; guest = vcpu->guest; if (vcpu->is_normal) { /* * Only if the guest is making a valid tlb entry, try to map the * gphys to hphys */ if (TBE_ELO_VALID(gtlbe, entrylo0)) { gphys_addr2map = (gtlbe->entrylo0._s_entrylo.pfn \ << PAGE_SHIFT); guest_region = \ vmm_guest_find_region(guest, gphys_addr2map, TRUE); if (!guest_region) { TBE_ELO_INVALIDATE(gtlbe, entrylo0); return VMM_EFAIL; } gphys_addr = guest_region->gphys_addr; hphys_addr = guest_region->hphys_addr; if ((gphys_offset = gphys_addr - gphys_addr2map) >= 0) { hphys_addr2map = hphys_addr + gphys_offset; gtlbe->entrylo0._s_entrylo.pfn = (hphys_addr2map >> PAGE_SHIFT); /* * We can keep the valid and other bits same * for now. */ do_map = 1; } } if (TBE_ELO_VALID(gtlbe, entrylo1)) { gphys_addr = (gtlbe->entrylo1._s_entrylo.pfn \ << PAGE_SHIFT); guest_region = \ vmm_guest_find_region(guest, gphys_addr, TRUE); if (!guest_region) { TBE_ELO_INVALIDATE(gtlbe, entrylo1); return VMM_EFAIL; } gphys_addr = guest_region->gphys_addr; hphys_addr = guest_region->hphys_addr; if ((gphys_offset = gphys_addr - gphys_addr2map) >= 0) { hphys_addr2map = hphys_addr + gphys_offset; gtlbe->entrylo0._s_entrylo.pfn = (hphys_addr2map >> PAGE_SHIFT); /* * We can keep the valid and other bits same * for now. */ do_map = 1; } }
static int map_guest_region(struct vmm_vcpu *vcpu, int region_type, int tlb_index) { mips32_tlb_entry_t shadow_entry; physical_addr_t gphys; physical_addr_t hphys, paddr; virtual_addr_t vaddr2map; u32 gphys_size; struct vmm_region *region; struct vmm_guest *aguest = vcpu->guest; vaddr2map = (region_type == VMM_REGION_TYPE_ROM ? 0x3FC00000 : 0x0); paddr = (region_type == VMM_REGION_TYPE_ROM ? 0x1FC00000 : 0x0); /* * Create the initial TLB entry mapping complete RAM promised * to the guest. The idea is that guest vcpu shouldn't fault * on this address. */ region = vmm_guest_find_region(aguest, paddr, TRUE); if (region == NULL) { vmm_printf("Bummer!!! No guest region defined for VCPU RAM.\n"); return VMM_EFAIL; } gphys = region->gphys_addr; hphys = region->hphys_addr; gphys_size = region->phys_size; switch (gphys_size) { case TLB_PAGE_SIZE_1K: case TLB_PAGE_SIZE_4K: case TLB_PAGE_SIZE_16K: case TLB_PAGE_SIZE_256K: case TLB_PAGE_SIZE_1M: case TLB_PAGE_SIZE_4M: case TLB_PAGE_SIZE_16M: case TLB_PAGE_SIZE_64M: case TLB_PAGE_SIZE_256M: gphys_size = gphys_size; shadow_entry.page_mask = ((gphys_size / 2) - 1); break; default: vmm_panic("Guest physical memory region should be same as page" " sizes available for MIPS32.\n"); } /* FIXME: Guest physical/virtual should be from DTS */ shadow_entry.entryhi._s_entryhi.vpn2 = (vaddr2map >> VPN2_SHIFT); shadow_entry.entryhi._s_entryhi.asid = (u8)(2 << 6); shadow_entry.entryhi._s_entryhi.reserved = 0; shadow_entry.entryhi._s_entryhi.vpn2x = 0; shadow_entry.entrylo0._s_entrylo.global = 0; shadow_entry.entrylo0._s_entrylo.valid = 1; shadow_entry.entrylo0._s_entrylo.dirty = 1; shadow_entry.entrylo0._s_entrylo.cacheable = 1; shadow_entry.entrylo0._s_entrylo.pfn = (hphys >> PAGE_SHIFT); shadow_entry.entrylo1._s_entrylo.global = 0; shadow_entry.entrylo1._s_entrylo.valid = 0; shadow_entry.entrylo1._s_entrylo.dirty = 0; shadow_entry.entrylo1._s_entrylo.cacheable = 0; shadow_entry.entrylo1._s_entrylo.pfn = 0; vmm_memcpy((void *)&mips_sregs(vcpu)->shadow_tlb_entries[tlb_index], (void *)&shadow_entry, sizeof(mips32_tlb_entry_t)); return VMM_OK; }