Пример #1
0
u32 mips_probe_vcpu_tlb(struct vmm_vcpu *vcpu, arch_regs_t *uregs)
{
	u32 guest_cp0_index = (0x01UL << 31);
	u32 tlb_counter;
	mips32_tlb_entry_t *c_tlb_entry, *tempe, t_tlb_entry;
	mips32_entryhi_t g_probed_ehi;

	g_probed_ehi._entryhi = mips_sregs(vcpu)->cp0_regs[CP0_ENTRYHI_IDX];

	for (tlb_counter = 0; tlb_counter < CPU_TLB_COUNT; tlb_counter++) {
		c_tlb_entry = &mips_sregs(vcpu)->hw_tlb_entries[tlb_counter];

		t_tlb_entry.page_mask = c_tlb_entry->page_mask;
		t_tlb_entry.entryhi._entryhi = g_probed_ehi._entryhi;
		tempe = &t_tlb_entry;

		if (TBE_PGMSKD_VPN2(c_tlb_entry) == TBE_PGMSKD_VPN2(tempe)
		    && (TBE_ELO_VALID(c_tlb_entry, entrylo0)
			|| TBE_ELO_VALID(c_tlb_entry, entrylo1))
		    && (TBE_ASID(c_tlb_entry) == TBE_ASID(tempe)
			|| TBE_ELO_GLOBAL(c_tlb_entry, entrylo0)
			|| TBE_ELO_GLOBAL(c_tlb_entry, entrylo1))) {
			guest_cp0_index = 0;
			guest_cp0_index = tlb_counter;
			break;
		}
	}

	mips_sregs(vcpu)->cp0_regs[CP0_INDEX_IDX] = guest_cp0_index;

	return VMM_OK;
}
Пример #2
0
int arch_vcpu_regs_init(struct vmm_vcpu *vcpu)
{
	vmm_memset(mips_uregs(vcpu), 0, sizeof(arch_regs_t));

        if (!vcpu->is_normal) {
		/* For orphan vcpu */
                mips_uregs(vcpu)->cp0_epc = vcpu->start_pc;
                mips_uregs(vcpu)->regs[SP_IDX] = vcpu->start_sp;
		mips_uregs(vcpu)->regs[S8_IDX] = mips_uregs(vcpu)->regs[SP_IDX];
		mips_uregs(vcpu)->cp0_status = read_c0_status();
		mips_uregs(vcpu)->cp0_entryhi = read_c0_entryhi();
        } else {
		/* For normal vcpu running guests */
		mips_sregs(vcpu)->cp0_regs[CP0_CAUSE_IDX] = 0x400;
		mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX] = 0x40004;
		mips_uregs(vcpu)->cp0_status = read_c0_status() | (0x01UL << CP0_STATUS_UM_SHIFT);
		mips_uregs(vcpu)->cp0_entryhi = read_c0_entryhi();
		mips_uregs(vcpu)->cp0_entryhi &= ASID_MASK;
		mips_uregs(vcpu)->cp0_entryhi |= (0x2 << ASID_SHIFT);
		mips_uregs(vcpu)->cp0_epc = vcpu->start_pc;

		/* All guest run from 0 and fault */
		mips_sregs(vcpu)->cp0_regs[CP0_EPC_IDX] = vcpu->start_pc;
		/* Give guest the same CPU cap as we have */
		mips_sregs(vcpu)->cp0_regs[CP0_PRID_IDX] = read_c0_prid();
	}

	return VMM_OK;
}
Пример #3
0
int do_vcpu_tlbmiss(arch_regs_t *uregs)
{
	u32 badvaddr = read_c0_badvaddr();
	struct vmm_vcpu *current_vcpu;
	int counter = 0;
	mips32_tlb_entry_t *c_tlbe;

	current_vcpu = vmm_scheduler_current_vcpu();
	for (counter = 0; counter < 2 * CPU_TLB_COUNT; counter++) {
		c_tlbe = &mips_sregs(current_vcpu)->shadow_tlb_entries[counter];
		if (TBE_PGMSKD_VPN2(c_tlbe) ==
		    (badvaddr & ~c_tlbe->page_mask)) {
			mips_fill_tlb_entry(c_tlbe, -1);
			return 0;
		} else {
			vmm_panic("No TLB entry in shadow."
				  " Send fault to guest.\n");
		}
	}

	return VMM_EFAIL;
}
Пример #4
0
static int map_guest_region(struct vmm_vcpu *vcpu, int region_type, int tlb_index)
{
	mips32_tlb_entry_t shadow_entry;
	physical_addr_t gphys;
	physical_addr_t hphys, paddr;
	virtual_addr_t vaddr2map;
	u32 gphys_size;
	struct vmm_region *region;
	struct vmm_guest *aguest = vcpu->guest;

	vaddr2map = (region_type == VMM_REGION_TYPE_ROM ? 0x3FC00000 : 0x0);
	paddr = (region_type == VMM_REGION_TYPE_ROM ? 0x1FC00000 : 0x0);

	/*
	 * Create the initial TLB entry mapping complete RAM promised
	 * to the guest. The idea is that guest vcpu shouldn't fault
	 * on this address.
	 */
	region = vmm_guest_find_region(aguest, paddr, TRUE);
	if (region == NULL) {
		vmm_printf("Bummer!!! No guest region defined for VCPU RAM.\n");
		return VMM_EFAIL;
	}

	gphys = region->gphys_addr;
	hphys = region->hphys_addr;
	gphys_size = region->phys_size;

	switch (gphys_size) {
	case TLB_PAGE_SIZE_1K:
	case TLB_PAGE_SIZE_4K:
	case TLB_PAGE_SIZE_16K:
	case TLB_PAGE_SIZE_256K:
	case TLB_PAGE_SIZE_1M:
	case TLB_PAGE_SIZE_4M:
	case TLB_PAGE_SIZE_16M:
	case TLB_PAGE_SIZE_64M:
	case TLB_PAGE_SIZE_256M:
		gphys_size = gphys_size;
		shadow_entry.page_mask = ((gphys_size / 2) - 1);
		break;
	default:
		vmm_panic("Guest physical memory region should be same as page"
			  " sizes available for MIPS32.\n");
	}

	/* FIXME: Guest physical/virtual should be from DTS */
	shadow_entry.entryhi._s_entryhi.vpn2 = (vaddr2map >> VPN2_SHIFT);
	shadow_entry.entryhi._s_entryhi.asid = (u8)(2 << 6);
	shadow_entry.entryhi._s_entryhi.reserved = 0;
	shadow_entry.entryhi._s_entryhi.vpn2x = 0;

	shadow_entry.entrylo0._s_entrylo.global = 0;
	shadow_entry.entrylo0._s_entrylo.valid = 1;
	shadow_entry.entrylo0._s_entrylo.dirty = 1;
	shadow_entry.entrylo0._s_entrylo.cacheable = 1;
	shadow_entry.entrylo0._s_entrylo.pfn = (hphys >> PAGE_SHIFT);

	shadow_entry.entrylo1._s_entrylo.global = 0;
	shadow_entry.entrylo1._s_entrylo.valid = 0;
	shadow_entry.entrylo1._s_entrylo.dirty = 0;
	shadow_entry.entrylo1._s_entrylo.cacheable = 0;
	shadow_entry.entrylo1._s_entrylo.pfn = 0;

	vmm_memcpy((void *)&mips_sregs(vcpu)->shadow_tlb_entries[tlb_index],
		   (void *)&shadow_entry, sizeof(mips32_tlb_entry_t));

	return VMM_OK;
}
Пример #5
0
static u32 load_store_emulated_reg(u8 sreg, u8 sel,
				   u32 *treg,
				   struct vmm_vcpu *vcpu, u8 do_load)
{
	u32 _err = VMM_OK;
	u32 *emulated_reg = NULL;

	switch(sreg) {
	case 0: /* index register */
	case 1: /* Random register */
	case 2: /* entry lo0 */
	case 3: /* entry lo1 */
	case 4: /* context */
	case 5:
	case 6:
	case 7:
	case 8:
	case 9:
	case 10:
	case 11:
		emulated_reg = &mips_sregs(vcpu)->cp0_regs[sreg];
		break;
	case 12:
		switch(sel) {
		case 0:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX];
			break;
		case 1:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_INTCTL_IDX];
			break;
		case 2:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_SRSCTL_IDX];
			break;
		case 3:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_SRSMAP_IDX];
			break;
		}
	case 13: /* Cause register */
		emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_CAUSE_IDX];
		break;
	case 14:
		emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_EPC_IDX];
		break;
	case 15:
		switch (sel) {
		case 0:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_PRID_IDX];
			break;
		case 1:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_EBASE_IDX];
			break;
		}
		break;
	case 16:
		switch(sel) {
		case 0:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_CONFIG_IDX];
			break;
		case 1:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_CONFIG1_IDX];
			break;
		case 2:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_CONFIG2_IDX];
			break;
		case 3:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_CONFIG3_IDX];
			break;
		}
		break;
	case 17:
		emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_LLADDR_IDX];
		break;
	case 18:
		emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_WATCHLO_IDX];
		break;
	case 19:
		emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_WATCHHI_IDX];
		break;
	case 23:
		emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_DEBUG_IDX];
		break;
	case 24:
		emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_DEPC_IDX];
		break;
	case 25:
		switch(sel) {
		case 0:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_PERFCTL_IDX];
			break;
		case 1:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_PERFCNT_IDX];
			break;
		}
		break;
	case 26:
		emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_ECC_IDX];
		break;
	case 27:
		emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_CACHEERR_IDX];
		break;
	case 28:
		switch(sel) {
		case 0:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_TAGLO_IDX];
			break;
		case 1:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_DATALO_IDX];
			break;
		}
		break;
	case 29:
		switch(sel) {
		case 0:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_TAGHI_IDX];
			break;
		case 1:
			emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_DATAHI_IDX];
			break;
		}
		break;
	case 31:
		emulated_reg = &mips_sregs(vcpu)->cp0_regs[CP0_ERRORPC_IDX];
		break;
	default:
		_err = VMM_EFAIL;
		emulated_reg = NULL;
		break;
	}

	if (emulated_reg && _err == VMM_OK) {
		if (do_load)
			*treg = *emulated_reg;
		else
			*emulated_reg = *treg;
	}

	return _err;
}
Пример #6
0
/* Co-processor un-usable exception */
u32 cpu_vcpu_emulate_cop_inst(struct vmm_vcpu *vcpu, u32 inst,
			      arch_regs_t *uregs)
{
	u32 cp0_cause = read_c0_cause();
	u8 rt, rd, sel;
	mips32_entryhi_t ehi;

	u32 cop_id = UNUSABLE_COP_ID(cp0_cause);
	if (cop_id != 0) {
		ehi._entryhi = (read_c0_entryhi() & ~0xFF);
		ehi._s_entryhi.asid = (0x1 << ASID_SHIFT);
		write_c0_entryhi(ehi._entryhi);
		vmm_panic("COP%d unusable exeption!\n", cop_id);
	}

	switch(MIPS32_OPCODE(inst)) {
	case MIPS32_OPC_CP0_ACSS:
		switch(MIPS32_OPC_CP0_DIR(inst)) {
		case MIPS32_OPC_CP0_MF:
			rt = MIPS32_OPC_CP0_RT(inst);
			rd = MIPS32_OPC_CP0_RD(inst);
			sel = MIPS32_OPC_CP0_SEL(inst);
			if (load_store_emulated_reg(rd, sel,
						    &uregs->regs[rt],
						    vcpu, 1)) {
				ehi._entryhi = read_c0_entryhi() & ~0xFF;
				ehi._s_entryhi.asid = (0x1 << ASID_SHIFT);
				write_c0_entryhi(ehi._entryhi);
				vmm_panic("Can't load emulated register.\n");
			}

			break;
		case MIPS32_OPC_CP0_MT:
			rt = MIPS32_OPC_CP0_RT(inst);
			rd = MIPS32_OPC_CP0_RD(inst);
			sel = MIPS32_OPC_CP0_SEL(inst);
			if (load_store_emulated_reg(rd, sel,
						    &uregs->regs[rt],
						    vcpu, 0)) {
				ehi._entryhi = read_c0_entryhi() & ~0xFF;
				ehi._s_entryhi.asid = (0x1 << ASID_SHIFT);
				write_c0_entryhi(ehi._entryhi);
				vmm_panic("Can't load emulated register.\n");
			}

			break;

		case MIPS32_OPC_CP0_DIEI:
			if (!MIPS32_OPC_CP0_SC(inst)) {
				rt = MIPS32_OPC_CP0_RT(inst);
				/* only when rt points to a non-zero register
				 * save current status there. */
				if (rt)
					uregs->regs[rt] =
						mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX];

				/* Opcode says disable interrupts (for vcpu) */
				mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX] &= ~0x1UL;
			} else {
				rt = MIPS32_OPC_CP0_RT(inst);
				/* only when rt points to a non-zero register
				 * save current status there. */
				if (rt)
					uregs->regs[rt] =
						mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX];

				/* Opcode says enable interrupts (for vcpu) */
				mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX] |= 0x01UL;
			}

			break;
		default:
			if (IS_TLB_ACCESS_INST(inst)) {
				return cpu_vcpu_emulate_tlb_inst(vcpu,
								 inst, uregs);
			}
			break;
		}
		break;
	}

	return VMM_OK;
}