Пример #1
0
void vmm_scheduler_yield(void)
{
	irq_flags_t flags;
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	arch_cpu_irq_save(flags);

	if (schedp->irq_context) {
		vmm_panic("%s: Cannot yield in IRQ context\n", __func__);
	}

	if (!schedp->current_vcpu) {
		vmm_panic("%s: NULL VCPU pointer\n", __func__);
	}

	if (schedp->current_vcpu->is_normal) {
		/* For Normal VCPU
		 * Just enable yield on exit and rest will be taken care
		 * by vmm_scheduler_irq_exit()
		 */
		if (vmm_manager_vcpu_get_state(schedp->current_vcpu) == 
						VMM_VCPU_STATE_RUNNING) {
			schedp->yield_on_irq_exit = TRUE;
		}
	} else {
		/* For Orphan VCPU
		 * Forcefully expire yield 
		 */
		arch_vcpu_preempt_orphan();
	}

	arch_cpu_irq_restore(flags);
}
Пример #2
0
/*
 * Copy data from an mbuf chain starting "off" bytes from the beginning,
 * continuing for "len" bytes, into the indicated buffer.
 */
void m_copydata(struct vmm_mbuf *m, int off, int len, void *vp)
{
	unsigned count;
	void *cp = vp;

	if(m == NULL || vp == NULL)
		vmm_panic("%s: either m or vp is NULL\n", __func__);
	if (off < 0 || len < 0)
		vmm_panic("%s: off %d, len %d", __func__, off, len);
	while (off > 0) {
		if (m == NULL)
			vmm_panic("%s: m == NULL, off %d", __func__, off);
		if (off < m->m_len)
			break;
		off -= m->m_len;
		m = m->m_next;
	}
	while (len > 0) {
		count = min(m->m_len - off, len);
		memcpy(cp, mtod(m, char *) + off, count);
		len -= count;
		cp = (char *)cp + count;
		off = 0;
		m = m->m_next;
	}
}
Пример #3
0
static int netdev_register_port(struct net_device *ndev)
{
        struct vmm_netport *port;
        const char *attr;
        struct vmm_netswitch *nsw;
	struct vmm_device *dev = ndev->vmm_dev;

        port = vmm_netport_alloc(ndev->name, VMM_NETPORT_DEF_QUEUE_SIZE);
        if (!port) {
                vmm_printf("Failed to allocate netport for %s\n", ndev->name);
                return VMM_ENOMEM;
        }

	port->dev.parent = dev;
        port->mtu = ndev->mtu;
        port->link_changed = netdev_set_link;
        port->can_receive = netdev_can_receive;
        port->switch2port_xfer = netdev_switch2port_xfer;
        port->priv = ndev;
        memcpy(port->macaddr, ndev->dev_addr, ETH_ALEN);

        ndev->nsw_priv = port;

        vmm_netport_register(port);

	if (dev) {
		if (dev->of_node &&
		    vmm_devtree_read_string(dev->of_node,
					    "switch", &attr) == VMM_OK) {
			nsw = vmm_netswitch_find(attr);
			if (!nsw) {
				vmm_panic("%s: Cannot find netswitch \"%s\"\n",
					  ndev->name, attr);
			}
			vmm_netswitch_port_add(nsw, port);
		} else {
			/* Add port to default switch if not specified. */
			nsw = vmm_netswitch_default();
			if (nsw) {
				vmm_netswitch_port_add(nsw, port);
			} else {
				vmm_panic("%s: Failed to find default "
				          "netswitch\n", ndev->name);
			}
		}
	}

        return VMM_OK;
}
Пример #4
0
void do_soft_irq(arch_regs_t * uregs)
{
	int rc = VMM_OK;
	struct vmm_vcpu * vcpu;

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		vmm_panic("%s: unexpected exception\n", __func__);
	}

	vmm_scheduler_irq_enter(uregs, TRUE);

	vcpu = vmm_scheduler_current_vcpu();

	/* If vcpu priviledge is user then generate exception 
	 * and return without emulating instruction 
	 */
	if ((arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) == CPSR_MODE_USER) {
		vmm_vcpu_irq_assert(vcpu, CPU_SOFT_IRQ, 0x0);
	} else {
		if (uregs->cpsr & CPSR_THUMB_ENABLED) {
			rc = cpu_vcpu_hypercall_thumb(vcpu, uregs, 
							*((u32 *)uregs->pc));
		} else {
			rc = cpu_vcpu_hypercall_arm(vcpu, uregs, 
							*((u32 *)uregs->pc));
		}
	}

	if (rc) {
		vmm_printf("%s: error %d\n", __func__, rc);
	}

	vmm_scheduler_irq_exit(uregs);
}
Пример #5
0
void do_undef_inst(vmm_user_regs_t * uregs)
{
	int rc = VMM_OK;
	vmm_vcpu_t * vcpu;

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		vmm_panic("%s: unexpected exception\n", __func__);
	}

	vmm_scheduler_irq_enter(uregs, TRUE);

	vcpu = vmm_scheduler_current_vcpu();

	/* If vcpu priviledge is user then generate exception 
	 * and return without emulating instruction 
	 */
	if ((vcpu->sregs->cpsr & CPSR_MODE_MASK) == CPSR_MODE_USER) {
		vmm_vcpu_irq_assert(vcpu, CPU_UNDEF_INST_IRQ, 0x0);
	} else {
		if (uregs->cpsr & CPSR_THUMB_ENABLED) {
			rc = cpu_vcpu_emulate_thumb_inst(vcpu, uregs, FALSE);
		} else {
			rc = cpu_vcpu_emulate_arm_inst(vcpu, uregs, FALSE);
		}
	}

	if (rc) {
		vmm_printf("%s: error %d\n", __func__, rc);
	}

	vmm_scheduler_irq_exit(uregs);
}
Пример #6
0
void do_bad_mode(arch_regs_t *regs, unsigned long mode)
{
	u32 ec, il, iss;
	u64 esr, far, elr;
	struct vmm_vcpu *vcpu;

	esr = mrs(esr_el2);
	far = mrs(far_el2);
	elr = mrs(elr_el2);

	ec = (esr & ESR_EC_MASK) >> ESR_EC_SHIFT;
	il = (esr & ESR_IL_MASK) >> ESR_IL_SHIFT;
	iss = (esr & ESR_ISS_MASK) >> ESR_ISS_SHIFT;

	vcpu = vmm_scheduler_current_vcpu();

	vmm_printf("%s: CPU%d VCPU=%s unexpected exception\n",
		   __func__, vmm_smp_processor_id(),
		   (vcpu) ? vcpu->name : "(NULL)");
	vmm_printf("%s: ESR=0x%016lx EC=0x%x IL=0x%x ISS=0x%x\n",
		   __func__, esr, ec, il, iss);
	vmm_printf("%s: ELR=0x%016lx FAR=0x%016lx HPFAR=0x%016lx\n",
		   __func__, elr, far, mrs(hpfar_el2));
	cpu_vcpu_dump_user_reg(regs);
	vmm_panic("%s: please reboot ...\n", __func__);
}
Пример #7
0
void vmm_scheduler_yield(void)
{
	if (vmm_scheduler_irq_context()) {
		vmm_panic("%s: Tried to yield in IRQ context\n", __func__);
	}

	vmm_timer_event_start(sched.ev, 0);
}
Пример #8
0
u32 do_general_exception(arch_regs_t *uregs)
{
    u32 cp0_cause = read_c0_cause();
    u32 cp0_status = read_c0_status();
    mips32_entryhi_t ehi;
    u32 victim_asid;
    u32 victim_inst;
    struct vmm_vcpu *c_vcpu;
    u8 delay_slot_exception = IS_BD_SET(cp0_cause);

    ehi._entryhi = read_c0_entryhi();
    victim_asid = ehi._s_entryhi.asid >> ASID_SHIFT;
    c_vcpu = vmm_scheduler_current_vcpu();

    /*
     * When exception is happening in the delay slot. We need to emulate
     * the corresponding branch instruction first. If its one of the "likely"
     * instructions, we don't need to emulate the faulting instruction since
     * "likely" instructions don't allow slot to be executed if branch is not
     * taken.
     */
    if (delay_slot_exception) {
        victim_inst = *((u32 *)(uregs->cp0_epc + 4));

        /*
         * If this function returns zero, the branch instruction was a
         * "likely" instruction and the branch wasn't taken. So don't
         * execute the delay slot, just return. The correct EPC to return
         * to will be programmed under our feet.
         */
        if (!cpu_vcpu_emulate_branch_and_jump_inst(c_vcpu, *((u32 *)uregs->cp0_epc), uregs)) {
            return VMM_OK;
        }
    } else {
        victim_inst = *((u32 *)uregs->cp0_epc);
    }

    switch (EXCEPTION_CAUSE(cp0_cause)) {
    case EXEC_CODE_COPU:
        cpu_vcpu_emulate_cop_inst(c_vcpu, victim_inst, uregs);

        if (!delay_slot_exception)
            uregs->cp0_epc += 4;

        break;

    case EXEC_CODE_TLBL:
        if (CPU_IN_USER_MODE(cp0_status) && is_vmm_asid(ehi._s_entryhi.asid)) {
            ehi._s_entryhi.asid = (0x1 << ASID_SHIFT);
            write_c0_entryhi(ehi._entryhi);
            vmm_panic("CPU is in user mode and ASID is pointing to VMM!!\n");
        }
        break;
    }

    return VMM_OK;
}
Пример #9
0
int uip_netport_loopback_send(struct vmm_mbuf *mbuf)
{
	struct uip_port_state *s = &uip_port_state;
	if(mbuf == NULL)
		vmm_panic("%s: mbuf NULL\n", __func__);
	u8 *dstmac = ether_dstmac(mtod(mbuf, u8 *));
	u8 *srcmac = ether_srcmac(mtod(mbuf, u8 *));

	memcpy(dstmac, s->port->macaddr, 6);
	memcpy(srcmac, s->port->macaddr, 6);
	return uip_switch2port_xfer(s->port, mbuf);
}
Пример #10
0
/**
 *  Fills the uip_buf with packet from RX queue. In case RX queue is
 *  empty, we wait for sometime.
 */
int uip_netport_read(void)
{
	struct vmm_mbuf *mbuf;
	struct dlist *node;
	unsigned long flags;
	u64 timeout = 50000000;
	struct uip_port_state *s = &uip_port_state;

	/* Keep trying till RX buf is not empty */
	vmm_spin_lock_irqsave(&s->lock, flags);
	while(list_empty(&s->rxbuf)) {
		vmm_spin_unlock_irqrestore(&s->lock, flags);
		if(timeout) {
			/* Still time left for timeout so we wait */
			vmm_completion_wait_timeout(&s->rx_possible, &timeout);
		} else {
			/* We timed-out and buffer is still empty, so return */
			uip_len = 0;
			return uip_len;
		}
		vmm_spin_lock_irqsave(&s->lock, flags);
	}
	/* At this point we are sure rxbuf is non-empty, so we just
	 * dequeue a packet */
	node = list_pop(&s->rxbuf);
	mbuf = m_list_entry(node);
	vmm_spin_unlock_irqrestore(&s->lock, flags);
	if(mbuf == NULL) {
		vmm_panic("%s: mbuf is null\n", __func__);
	}
	if(!uip_buf) {
		vmm_panic("%s: uip_buf is null\n", __func__);
	}
	/* Copy the data from mbuf to uip_buf */
	uip_len = min(UIP_BUFSIZE, mbuf->m_pktlen);
	m_copydata(mbuf, 0, uip_len, uip_buf);
	/* Free the mbuf */
	m_freem(mbuf);
	return uip_len;
}
Пример #11
0
void do_error(struct vmm_vcpu *vcpu, arch_regs_t *regs,
	      unsigned long scause, const char *msg, int err, bool panic)
{
	u32 cpu = vmm_smp_processor_id();

	vmm_printf("%s: CPU%d: VCPU=%s %s (error %d)\n",
		   __func__, cpu, (vcpu) ? vcpu->name : "(NULL)", msg, err);
	cpu_vcpu_dump_general_regs(NULL, regs);
	cpu_vcpu_dump_exception_regs(NULL, scause, csr_read(CSR_STVAL));
	if (panic) {
		vmm_panic("%s: please reboot ...\n", __func__);
	}
}
Пример #12
0
void do_data_abort(arch_regs_t *regs)
{
	struct vmm_vcpu *vcpu = vmm_scheduler_current_vcpu();

	vmm_printf("%s: CPU%d unexpected exception\n",
		   __func__, vmm_smp_processor_id());
	vmm_printf("%s: Current VCPU=%s HSR=0x%08x\n",
		   __func__, (vcpu) ? vcpu->name : "(NULL)", 
		   read_hsr());
	vmm_printf("%s: HPFAR=0x%08x HIFAR=0x%08x HDFAR=0x%08x\n",
		   __func__, read_hpfar(), read_hifar(), read_hdfar());

	cpu_vcpu_dump_user_reg(regs);

	vmm_panic("%s: please reboot ...\n", __func__);
}
Пример #13
0
static int __init daemon_mterm_init(void)
{
	u8 mterm_priority;
	u32 mterm_time_slice;
	struct vmm_devtree_node * node;
	const char * attrval;

	/* Reset the control structure */
	vmm_memset(&mtctrl, 0, sizeof(mtctrl));

	/* Retrive mterm time slice */
	node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING
				   VMM_DEVTREE_VMMINFO_NODE_NAME);
	if (!node) {
		return VMM_EFAIL;
	}
	attrval = vmm_devtree_attrval(node,
				      "mterm_priority");
	if (attrval) {
		mterm_priority = *((u32 *) attrval);
	} else {
		mterm_priority = VMM_THREAD_DEF_PRIORITY;
	}
	attrval = vmm_devtree_attrval(node,
				      "mterm_time_slice");
	if (attrval) {
		mterm_time_slice = *((u32 *) attrval);
	} else {
		mterm_time_slice = VMM_THREAD_DEF_TIME_SLICE;
	}

	/* Create mterm thread */
	mtctrl.thread = vmm_threads_create("mterm", 
					   &mterm_main, 
					   NULL, 
					   mterm_priority,
					   mterm_time_slice);
	if (!mtctrl.thread) {
		vmm_panic("Creation of system critical thread failed.\n");
	}

	/* Start the mterm thread */
	vmm_threads_start(mtctrl.thread);

	return VMM_OK;
}
Пример #14
0
void vmm_shutdown(void)
{
	int rc;

	/* Stop scheduler */
	vmm_printf("Stopping Hypervisor Timer Subsytem\n");
	vmm_timer_stop();

	/* FIXME: Do other cleanup stuff. */

	/* Issue board shutdown */
	vmm_printf("Issuing Board Shutdown\n");
	if ((rc = arch_board_shutdown())) {
		vmm_panic("Error: Board shutdown failed.\n");
	}

	/* Wait here. Nothing else to do. */
	vmm_hang();
}
Пример #15
0
void vmm_reset(void)
{
	int rc;

	/* Stop scheduler */
	vmm_printf("Stopping Hypervisor Timer\n");
	vmm_timer_stop();

	/* FIXME: Do other cleanup stuff. */

	/* Issue board reset */
	vmm_printf("Issuing Board Reset\n");
	if ((rc = arch_board_reset())) {
		vmm_panic("Error: Board reset failed.\n");
	}

	/* Wait here. Nothing else to do. */
	vmm_hang();
}
Пример #16
0
static void vmm_scheduler_switch(struct vmm_scheduler_ctrl *schedp,
				 arch_regs_t *regs)
{
	struct vmm_vcpu *vcpu = schedp->current_vcpu;

	if (!regs) {
		/* This should never happen !!! */
		vmm_panic("%s: null pointer to regs.\n", __func__);
	}

	if (vcpu) {
		if (!vcpu->preempt_count) {
			vmm_scheduler_next(schedp, &schedp->ev, regs);
		} else {
			vmm_timer_event_restart(&schedp->ev);
		}
	} else {
		vmm_scheduler_next(schedp, &schedp->ev, regs);
	}
}
Пример #17
0
int do_vcpu_tlbmiss(arch_regs_t *uregs)
{
	u32 badvaddr = read_c0_badvaddr();
	struct vmm_vcpu *current_vcpu;
	int counter = 0;
	mips32_tlb_entry_t *c_tlbe;

	current_vcpu = vmm_scheduler_current_vcpu();
	for (counter = 0; counter < 2 * CPU_TLB_COUNT; counter++) {
		c_tlbe = &mips_sregs(current_vcpu)->shadow_tlb_entries[counter];
		if (TBE_PGMSKD_VPN2(c_tlbe) ==
		    (badvaddr & ~c_tlbe->page_mask)) {
			mips_fill_tlb_entry(c_tlbe, -1);
			return 0;
		} else {
			vmm_panic("No TLB entry in shadow."
				  " Send fault to guest.\n");
		}
	}

	return VMM_EFAIL;
}
Пример #18
0
void do_soft_irq(arch_regs_t *regs)
{
	struct vmm_vcpu *vcpu;

	if ((regs->cpsr & CPSR_MODE_MASK) == CPSR_MODE_HYPERVISOR) {
		vmm_scheduler_preempt_orphan(regs);
		return;
	} else {
		vcpu = vmm_scheduler_current_vcpu();

		vmm_printf("%s: CPU%d unexpected exception\n",
			   __func__, vmm_smp_processor_id());
		vmm_printf("%s: Current VCPU=%s HSR=0x%08x\n",
			   __func__, (vcpu) ? vcpu->name : "(NULL)", 
			   read_hsr());
		vmm_printf("%s: HPFAR=0x%08x HIFAR=0x%08x HDFAR=0x%08x\n",
			   __func__, read_hpfar(), read_hifar(), read_hdfar());

		cpu_vcpu_dump_user_reg(regs);

		vmm_panic("%s: please reboot ...\n", __func__);
	}
}
Пример #19
0
static int map_guest_region(struct vmm_vcpu *vcpu, int region_type, int tlb_index)
{
	mips32_tlb_entry_t shadow_entry;
	physical_addr_t gphys;
	physical_addr_t hphys, paddr;
	virtual_addr_t vaddr2map;
	u32 gphys_size;
	struct vmm_region *region;
	struct vmm_guest *aguest = vcpu->guest;

	vaddr2map = (region_type == VMM_REGION_TYPE_ROM ? 0x3FC00000 : 0x0);
	paddr = (region_type == VMM_REGION_TYPE_ROM ? 0x1FC00000 : 0x0);

	/*
	 * Create the initial TLB entry mapping complete RAM promised
	 * to the guest. The idea is that guest vcpu shouldn't fault
	 * on this address.
	 */
	region = vmm_guest_find_region(aguest, paddr, TRUE);
	if (region == NULL) {
		vmm_printf("Bummer!!! No guest region defined for VCPU RAM.\n");
		return VMM_EFAIL;
	}

	gphys = region->gphys_addr;
	hphys = region->hphys_addr;
	gphys_size = region->phys_size;

	switch (gphys_size) {
	case TLB_PAGE_SIZE_1K:
	case TLB_PAGE_SIZE_4K:
	case TLB_PAGE_SIZE_16K:
	case TLB_PAGE_SIZE_256K:
	case TLB_PAGE_SIZE_1M:
	case TLB_PAGE_SIZE_4M:
	case TLB_PAGE_SIZE_16M:
	case TLB_PAGE_SIZE_64M:
	case TLB_PAGE_SIZE_256M:
		gphys_size = gphys_size;
		shadow_entry.page_mask = ((gphys_size / 2) - 1);
		break;
	default:
		vmm_panic("Guest physical memory region should be same as page"
			  " sizes available for MIPS32.\n");
	}

	/* FIXME: Guest physical/virtual should be from DTS */
	shadow_entry.entryhi._s_entryhi.vpn2 = (vaddr2map >> VPN2_SHIFT);
	shadow_entry.entryhi._s_entryhi.asid = (u8)(2 << 6);
	shadow_entry.entryhi._s_entryhi.reserved = 0;
	shadow_entry.entryhi._s_entryhi.vpn2x = 0;

	shadow_entry.entrylo0._s_entrylo.global = 0;
	shadow_entry.entrylo0._s_entrylo.valid = 1;
	shadow_entry.entrylo0._s_entrylo.dirty = 1;
	shadow_entry.entrylo0._s_entrylo.cacheable = 1;
	shadow_entry.entrylo0._s_entrylo.pfn = (hphys >> PAGE_SHIFT);

	shadow_entry.entrylo1._s_entrylo.global = 0;
	shadow_entry.entrylo1._s_entrylo.valid = 0;
	shadow_entry.entrylo1._s_entrylo.dirty = 0;
	shadow_entry.entrylo1._s_entrylo.cacheable = 0;
	shadow_entry.entrylo1._s_entrylo.pfn = 0;

	vmm_memcpy((void *)&mips_sregs(vcpu)->shadow_tlb_entries[tlb_index],
		   (void *)&shadow_entry, sizeof(mips32_tlb_entry_t));

	return VMM_OK;
}
Пример #20
0
void do_data_abort(vmm_user_regs_t * uregs)
{
	int rc = VMM_EFAIL; 
	bool crash_dump = FALSE;
	u32 dfsr, dfar, fs, dom, wnr;
	vmm_vcpu_t * vcpu;
	cpu_l1tbl_t * l1;
	cpu_page_t pg;

	dfsr = read_dfsr();
	dfar = read_dfar();
	fs = (dfsr & DFSR_FS4_MASK) >> DFSR_FS4_SHIFT;
	fs = (fs << 4) | (dfsr & DFSR_FS_MASK);
	wnr = (dfsr & DFSR_WNR_MASK) >> DFSR_WNR_SHIFT;
	dom = (dfsr & DFSR_DOM_MASK) >> DFSR_DOM_SHIFT;

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		if (fs != DFSR_FS_TRANS_FAULT_SECTION ||
		    fs != DFSR_FS_TRANS_FAULT_PAGE) {
			vmm_panic("%s: unexpected prefetch abort\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n", 
				  __func__, __func__, dfsr, dfar);
		}
		rc = cpu_mmu_get_reserved_page(dfar, &pg);
		if (rc) {
			vmm_panic("%s: cannot find reserved page\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n", 
				  __func__, __func__, dfsr, dfar);
		}
		l1 = cpu_mmu_l1tbl_current();
		if (!l1) {
			vmm_panic("%s: cannot find l1 table\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n",
				  __func__, __func__, dfsr, dfar);
		}
		rc = cpu_mmu_map_page(l1, &pg);
		if (rc) {
			vmm_panic("%s: cannot map page in l1 table\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n",
				  __func__, __func__, dfsr, dfar);
		}
		return;
	}

	vmm_scheduler_irq_enter(uregs, TRUE);

	vcpu = vmm_scheduler_current_vcpu();

	switch(fs) {
	case DFSR_FS_ALIGN_FAULT:
		break;
	case DFSR_FS_ICACHE_MAINT_FAULT:
		break;
	case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1:
	case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2:
		break;
	case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1:
	case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2:
		break;
	case DFSR_FS_TRANS_FAULT_SECTION:
	case DFSR_FS_TRANS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1, FALSE);
		crash_dump = TRUE;
		break;
	case DFSR_FS_ACCESS_FAULT_SECTION:
	case DFSR_FS_ACCESS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		crash_dump = TRUE;
		break;
	case DFSR_FS_DOMAIN_FAULT_SECTION:
	case DFSR_FS_DOMAIN_FAULT_PAGE:
		rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		crash_dump = TRUE;
		break;
	case DFSR_FS_PERM_FAULT_SECTION:
	case DFSR_FS_PERM_FAULT_PAGE:
		rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		if ((dfar & ~(sizeof(vcpu->sregs->cp15.ovect) - 1)) != 
						vcpu->sregs->cp15.ovect_base) {
			crash_dump = FALSE;
		}
		break;
	case DFSR_FS_DEBUG_EVENT:
	case DFSR_FS_SYNC_EXT_ABORT:
	case DFSR_FS_IMP_VALID_LOCKDOWN:
	case DFSR_FS_IMP_VALID_COPROC_ABORT:
	case DFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR:
	case DFSR_FS_ASYNC_EXT_ABORT:
	case DFSR_FS_MEM_ACCESS_ASYNC_PARITY_ERROR:
		break;
	default:
		break;
	};

	if (rc && crash_dump) {
		vmm_printf("\n");
		vmm_printf("%s: error %d\n", __func__, rc);
		vmm_printf("%s: vcpu_id = %d, dfar = 0x%x, dfsr = 0x%x\n", 
				__func__, vcpu->id, dfar, dfsr);
		cpu_vcpu_dump_user_reg(vcpu, uregs);
	}

	vmm_scheduler_irq_exit(uregs);
}
Пример #21
0
static void system_init_work(struct vmm_work *work)
{
#define BOOTCMD_WIDTH		256
	int ret;
	char bcmd[BOOTCMD_WIDTH];
	const char *str;
	u32 c, freed;
	struct vmm_chardev *cdev;
#if defined(CONFIG_RTC)
	struct vmm_rtcdev *rdev;
#endif
	struct vmm_devtree_node *node, *node1;

	/* Initialize command manager */
	vmm_printf("Initialize Command Manager\n");
	ret = vmm_cmdmgr_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize device driver framework */
	vmm_printf("Initialize Device Driver Framework\n");
	ret = vmm_devdrv_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize device emulation framework */
	vmm_printf("Initialize Device Emulation Framework\n");
	ret = vmm_devemu_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize character device framework */
	vmm_printf("Initialize Character Device Framework\n");
	ret = vmm_chardev_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize virtual serial port framework */
	vmm_printf("Initialize Virtual Serial Port Framework\n");
	ret = vmm_vserial_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

#if defined(CONFIG_SMP)
	/* Poll for all present CPUs to become online */
	/* Note: There is a timeout of 1 second */
	/* Note: The modules might use SMP IPIs or might have per-cpu context 
	 * so, we do this before vmm_modules_init() in-order to make sure that 
	 * correct number of online CPUs are visible to all modules.
	 */
	ret = 1000;
	while(ret--) {
		int all_cpu_online = 1;

		for_each_present_cpu(c) {
			if (!vmm_cpu_online(c)) {
				all_cpu_online = 0;
			}
		}

		if (all_cpu_online) {
			break;
		}

		vmm_mdelay(1);
	}
#endif

	/* Initialize hypervisor modules */
	vmm_printf("Initialize Hypervisor Modules\n");
	ret = vmm_modules_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize cpu final */
	vmm_printf("Initialize CPU Final\n");
	ret = arch_cpu_final_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Intialize board final */
	vmm_printf("Initialize Board Final\n");
	ret = arch_board_final_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Print status of present host CPUs */
	for_each_present_cpu(c) {
		if (vmm_cpu_online(c)) {
			vmm_printf("CPU%d: Online\n", c);
		} else {
			vmm_printf("CPU%d: Possible\n", c);
		}
	}
	vmm_printf("Brought Up %d CPUs\n", vmm_num_online_cpus());

	/* Free init memory */
	vmm_printf("Freeing init memory: ");
	freed = vmm_host_free_initmem();
	vmm_printf("%dK\n", freed);

	/* Process attributes in chosen node */
	node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING
				   VMM_DEVTREE_CHOSEN_NODE_NAME);
	if (node) {
		/* Find character device based on console attribute */
		str = vmm_devtree_attrval(node, VMM_DEVTREE_CONSOLE_ATTR_NAME);
		if (!(cdev = vmm_chardev_find(str))) {
			if ((node1 = vmm_devtree_getnode(str))) {
				cdev = vmm_chardev_find(node1->name);
			}
		}
		/* Set chosen console device as stdio device */
		if (cdev) {
			vmm_printf("Change stdio device to %s\n", cdev->name);
			vmm_stdio_change_device(cdev);
		}

#if defined(CONFIG_RTC)
		/* Find rtc device based on rtcdev attribute */
		str = vmm_devtree_attrval(node, VMM_DEVTREE_RTCDEV_ATTR_NAME);
		if (!(rdev = vmm_rtcdev_find(str))) {
			if ((node1 = vmm_devtree_getnode(str))) {
				rdev = vmm_rtcdev_find(node1->name);
			}
		}
		/* Syncup wallclock time with chosen rtc device */
		if (rdev) {
			ret = vmm_rtcdev_sync_wallclock(rdev);
			vmm_printf("Syncup wallclock using %s", rdev->name);
			if (ret) {
				vmm_printf("(error %d)", ret);
			}
			vmm_printf("\n");
		}
#endif

		/* Execute boot commands */
		str = vmm_devtree_attrval(node, VMM_DEVTREE_BOOTCMD_ATTR_NAME);
		if (str) {
			c = vmm_devtree_attrlen(node, VMM_DEVTREE_BOOTCMD_ATTR_NAME);
			while (c) {
#if defined(CONFIG_VERBOSE_MODE)
				/* Print boot command */
				vmm_printf("bootcmd: %s\n", str);
#endif
				/* Execute boot command */
				strlcpy(bcmd, str, sizeof(bcmd));
				cdev = vmm_stdio_device();
				vmm_cmdmgr_execute_cmdstr(cdev, bcmd, NULL);
				/* Next boot command */
				c -= strlen(str) + 1;
				str += strlen(str) + 1;
			}
		}
	}
}
Пример #22
0
void vmm_init(void)
{
	int ret;
#if defined(CONFIG_SMP)
	u32 c;
#endif
	u32 cpu = vmm_smp_processor_id();
	struct vmm_work sysinit;

	/* Mark this CPU possible & present */
	vmm_set_cpu_possible(cpu, TRUE);
	vmm_set_cpu_present(cpu, TRUE);

	/* Print version string */
	vmm_printf("\n");
	vmm_printf("%s v%d.%d.%d (%s %s)\n", VMM_NAME, 
		   VMM_VERSION_MAJOR, VMM_VERSION_MINOR, VMM_VERSION_RELEASE,
		   __DATE__, __TIME__);
	vmm_printf("\n");

	/* Initialize host virtual address space */
	vmm_printf("Initialize Host Address Space\n");
	ret = vmm_host_aspace_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize heap */
	vmm_printf("Initialize Heap Management\n");
	ret = vmm_heap_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize per-cpu area */
	vmm_printf("Initialize PerCPU Areas\n");
	ret = vmm_percpu_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize device tree */
	vmm_printf("Initialize Device Tree\n");
	ret = vmm_devtree_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize host interrupts */
	vmm_printf("Initialize Host IRQ\n");
	ret = vmm_host_irq_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize CPU early */
	vmm_printf("Initialize CPU Early\n");
	ret = arch_cpu_early_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize Board early */
	vmm_printf("Initialize Board Early\n");
	ret = arch_board_early_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize standerd input/output */
	vmm_printf("Initialize Standard I/O\n");
	ret = vmm_stdio_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize clocksource manager */
	vmm_printf("Initialize Clocksource Manager\n");
	ret = vmm_clocksource_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize clockchip manager */
	vmm_printf("Initialize Clockchip Manager\n");
	ret = vmm_clockchip_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize hypervisor timer */
	vmm_printf("Initialize Hypervisor Timer\n");
	ret = vmm_timer_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize soft delay */
	vmm_printf("Initialize Soft Delay\n");
	ret = vmm_delay_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize hypervisor manager */
	vmm_printf("Initialize Hypervisor Manager\n");
	ret = vmm_manager_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize hypervisor scheduler */
	vmm_printf("Initialize Hypervisor Scheduler\n");
	ret = vmm_scheduler_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize hypervisor threads */
	vmm_printf("Initialize Hypervisor Threads\n");
	ret = vmm_threads_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

#ifdef CONFIG_PROFILE
	/* Intialize hypervisor profiler */
	vmm_printf("Initialize Hypervisor Profiler\n");
	ret = vmm_profiler_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}
#endif

#if defined(CONFIG_SMP)
	/* Initialize inter-processor interrupts */
	vmm_printf("Initialize Inter Processor Interrupts\n")
	ret = vmm_smp_ipi_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize secondary CPUs */
	vmm_printf("Initialize Secondary CPUs\n");
	ret = arch_smp_init_cpus();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Prepare secondary CPUs */
	ret = arch_smp_prepare_cpus(vmm_num_possible_cpus());
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Start each present secondary CPUs */
	for_each_present_cpu(c) {
		if (c == cpu) {
			continue;
		}
		ret = arch_smp_start_cpu(c);
		if (ret) {
			vmm_printf("Failed to start CPU%d\n", ret);
		}
	}

	/* Initialize hypervisor load balancer */
	vmm_printf("Initialize Hypervisor Load Balancer\n");
	ret = vmm_loadbal_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}
#endif

	/* Initialize workqueue framework */
	vmm_printf("Initialize Workqueue Framework\n");
	ret = vmm_workqueue_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize wallclock */
	vmm_printf("Initialize Wallclock Subsystem\n");
	ret = vmm_wallclock_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Schedule system initialization work */
	INIT_WORK(&sysinit, &system_init_work);
	vmm_workqueue_schedule_work(NULL, &sysinit);

	/* Start timer (Must be last step) */
	vmm_timer_start();

	/* Wait here till scheduler gets invoked by timer */
	vmm_hang();
}
Пример #23
0
int uip_netport_init(void)
{
	struct vmm_netswitch *nsw;
	struct uip_port_state *s = &uip_port_state;
	struct uip_fw_netif *netif;
	uip_ipaddr_t ipaddr;
	char tname[64];

	uip_buf = vmm_malloc(UIP_BUFSIZE + 2);
	if(!uip_buf) {
		vmm_panic("%s: uip_buf alloc failed\n", __func__);
	}

	INIT_SPIN_LOCK(&s->lock);
	INIT_LIST_HEAD(&s->rxbuf);
	INIT_COMPLETION(&s->rx_possible);

	/* Get the first netswitch */
	nsw = vmm_netswitch_get(0);
	if(!nsw) {
		vmm_panic("No netswitch found\n");
	}
	/* Create a port-name */
	vmm_sprintf(tname, "%s-uip", nsw->name); 
	/* Allocate a netport for this netswitch */
	s->port = vmm_netport_alloc(tname);
	if(!s->port) {
		vmm_printf("UIP->netport alloc failed\n");
		return VMM_EFAIL;
	}
	/* Allocate a uip_fw_netif */ 
	netif = vmm_malloc(sizeof(struct uip_fw_netif));
	if(!netif) {
		vmm_printf("UIP->netif alloc failed\n");
		return VMM_EFAIL;
	}
	/* Register the netport */
	s->port->mtu = UIP_BUFSIZE;
	s->port->link_changed = uip_set_link;
	s->port->can_receive = uip_can_receive;
	s->port->switch2port_xfer = uip_switch2port_xfer;
	s->port->priv = s;
	s->netif = netif;

	vmm_netport_register(s->port);
	/* Attach with the netswitch */
	vmm_netswitch_port_add(nsw, s->port);
	/* Notify our ethernet address */
	uip_setethaddr(((struct uip_eth_addr *)(s->port->macaddr)));
	/* Generate an IP address */
	uip_ipaddr(ipaddr, 192,168,0,1);
	uip_fw_setipaddr(netif, ipaddr);
	uip_ipaddr(ipaddr, 255,255,255,0);
	uip_fw_setnetmask(netif, ipaddr);
	/* Register the netif with uip stack */
	netif->output = &uip_netport_output;
	netif->priv = s;
	uip_fw_register(netif);
	/* Set this interface as default one */
	uip_fw_default(netif);
	return 0;
}
Пример #24
0
/* Co-processor un-usable exception */
u32 cpu_vcpu_emulate_cop_inst(struct vmm_vcpu *vcpu, u32 inst,
			      arch_regs_t *uregs)
{
	u32 cp0_cause = read_c0_cause();
	u8 rt, rd, sel;
	mips32_entryhi_t ehi;

	u32 cop_id = UNUSABLE_COP_ID(cp0_cause);
	if (cop_id != 0) {
		ehi._entryhi = (read_c0_entryhi() & ~0xFF);
		ehi._s_entryhi.asid = (0x1 << ASID_SHIFT);
		write_c0_entryhi(ehi._entryhi);
		vmm_panic("COP%d unusable exeption!\n", cop_id);
	}

	switch(MIPS32_OPCODE(inst)) {
	case MIPS32_OPC_CP0_ACSS:
		switch(MIPS32_OPC_CP0_DIR(inst)) {
		case MIPS32_OPC_CP0_MF:
			rt = MIPS32_OPC_CP0_RT(inst);
			rd = MIPS32_OPC_CP0_RD(inst);
			sel = MIPS32_OPC_CP0_SEL(inst);
			if (load_store_emulated_reg(rd, sel,
						    &uregs->regs[rt],
						    vcpu, 1)) {
				ehi._entryhi = read_c0_entryhi() & ~0xFF;
				ehi._s_entryhi.asid = (0x1 << ASID_SHIFT);
				write_c0_entryhi(ehi._entryhi);
				vmm_panic("Can't load emulated register.\n");
			}

			break;
		case MIPS32_OPC_CP0_MT:
			rt = MIPS32_OPC_CP0_RT(inst);
			rd = MIPS32_OPC_CP0_RD(inst);
			sel = MIPS32_OPC_CP0_SEL(inst);
			if (load_store_emulated_reg(rd, sel,
						    &uregs->regs[rt],
						    vcpu, 0)) {
				ehi._entryhi = read_c0_entryhi() & ~0xFF;
				ehi._s_entryhi.asid = (0x1 << ASID_SHIFT);
				write_c0_entryhi(ehi._entryhi);
				vmm_panic("Can't load emulated register.\n");
			}

			break;

		case MIPS32_OPC_CP0_DIEI:
			if (!MIPS32_OPC_CP0_SC(inst)) {
				rt = MIPS32_OPC_CP0_RT(inst);
				/* only when rt points to a non-zero register
				 * save current status there. */
				if (rt)
					uregs->regs[rt] =
						mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX];

				/* Opcode says disable interrupts (for vcpu) */
				mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX] &= ~0x1UL;
			} else {
				rt = MIPS32_OPC_CP0_RT(inst);
				/* only when rt points to a non-zero register
				 * save current status there. */
				if (rt)
					uregs->regs[rt] =
						mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX];

				/* Opcode says enable interrupts (for vcpu) */
				mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX] |= 0x01UL;
			}

			break;
		default:
			if (IS_TLB_ACCESS_INST(inst)) {
				return cpu_vcpu_emulate_tlb_inst(vcpu,
								 inst, uregs);
			}
			break;
		}
		break;
	}

	return VMM_OK;
}
Пример #25
0
void do_sync(arch_regs_t *regs, unsigned long mode)
{
	int rc = VMM_OK;
	u32 ec, il, iss;
	u64 esr, far, elr;
	physical_addr_t fipa = 0;
	struct vmm_vcpu *vcpu;

	esr = mrs(esr_el2);
	far = mrs(far_el2);
	elr = mrs(elr_el2);

	ec = (esr & ESR_EC_MASK) >> ESR_EC_SHIFT;
	il = (esr & ESR_IL_MASK) >> ESR_IL_SHIFT;
	iss = (esr & ESR_ISS_MASK) >> ESR_ISS_SHIFT;

	vcpu = vmm_scheduler_current_vcpu();

	/* We dont expect any faults from hypervisor code itself 
	 * so, any trap we get from hypervisor mode means something
	 * unexpected has occured.
	 */
	if ((regs->pstate & PSR_EL_MASK) == PSR_EL_2) {
		if ((ec == EC_TRAP_HVC_A64) && (iss == 0)) {
			vmm_scheduler_preempt_orphan(regs);
			return;
		}
		vmm_printf("%s: CPU%d VCPU=%s unexpected exception\n",
			   __func__, vmm_smp_processor_id(),
			   (vcpu) ? vcpu->name : "(NULL)");
		vmm_printf("%s: ESR=0x%016lx EC=0x%x IL=0x%x ISS=0x%x\n",
			   __func__, esr, ec, il, iss);
		vmm_printf("%s: ELR=0x%016lx FAR=0x%016lx HPFAR=0x%016lx\n",
			   __func__, elr, far, mrs(hpfar_el2));
		cpu_vcpu_dump_user_reg(regs);
		vmm_panic("%s: please reboot ...\n", __func__);
	}

	vmm_scheduler_irq_enter(regs, TRUE);

	switch (ec) {
	case EC_UNKNOWN:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_WFI_WFE:
		/* WFI emulation */
		rc = cpu_vcpu_emulate_wfi_wfe(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP15_A32:
		/* MCR/MRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCRR_MRRC_CP15_A32:
		/* MCRR/MRRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcrr_mrrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP14_A32:
		/* MCR/MRC CP14 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_LDC_STC_CP14_A32:
		/* LDC/STC CP14 emulation */
		rc = cpu_vcpu_emulate_ldc_stc_cp14(vcpu, regs, il, iss);
		break;
	case EC_SIMD_FPU:
		/* Advanced SIMD and FPU emulation */
		rc = cpu_vcpu_emulate_simd_fp_regs(vcpu, regs, il, iss);
		break;
	case EC_FPEXC_A32:
	case EC_FPEXC_A64:
		/* We dont expect any FP execution faults */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_MRC_VMRS_CP10_A32:
		/* MRC (or VMRS) to CP10 for MVFR0, MVFR1 or FPSID */
		rc = cpu_vcpu_emulate_vmrs(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCRR_MRRC_CP14_A32:
		/* MRRC to CP14 emulation */
		rc = cpu_vcpu_emulate_mcrr_mrrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SVC_A32:
	case EC_TRAP_SVC_A64:
		/* We dont expect to get these traps so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_SMC_A32:
		/* SMC emulation for A32 guest */
		rc = cpu_vcpu_emulate_smc32(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SMC_A64:
		/* SMC emulation for A64 guest */
		rc = cpu_vcpu_emulate_smc64(vcpu, regs, il, iss);
		break;
	case EC_TRAP_HVC_A32:
		/* HVC emulation for A32 guest */
		rc = cpu_vcpu_emulate_hvc32(vcpu, regs, il, iss);
		break;
	case EC_TRAP_HVC_A64:
		/* HVC emulation for A64 guest */
		rc = cpu_vcpu_emulate_hvc64(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MSR_MRS_SYSTEM:
		/* MSR/MRS/SystemRegs emulation */
		rc = cpu_vcpu_emulate_msr_mrs_system(vcpu, regs, il, iss);
		break;
	case EC_TRAP_LWREL_INST_ABORT:
		/* Stage2 instruction abort */
		fipa = (mrs(hpfar_el2) & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (mrs(far_el2) & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_inst_abort(vcpu, regs, il, iss, fipa);
		break;
	case EC_TRAP_LWREL_DATA_ABORT:
		/* Stage2 data abort */
		fipa = (mrs(hpfar_el2) & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (mrs(far_el2) & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_data_abort(vcpu, regs, il, iss, fipa);
		break;
	case EC_CUREL_INST_ABORT:
	case EC_CUREL_DATA_ABORT:
	case EC_SERROR:
		/* We dont expect to get aborts from EL2 so error */
		rc = VMM_EFAIL;
		break;
	case EC_PC_UNALIGNED:
	case EC_SP_UNALIGNED:
		/* We dont expect to get alignment faults from EL2 */
		rc = VMM_EFAIL;
		break;
	default:
		/* Unhandled or unknown EC value so error */
		rc = VMM_EFAIL;
		break;
	};

	if (rc) {
		vmm_printf("%s: CPU%d VCPU=%s sync failed (error %d)\n", 
			   __func__, vmm_smp_processor_id(), 
			   (vcpu) ? vcpu->name : "(NULL)", rc);
		vmm_printf("%s: ESR=0x%016lx EC=0x%x IL=0x%x ISS=0x%x\n",
			   __func__, esr, ec, il, iss);
		vmm_printf("%s: ELR=0x%016lx FAR=0x%016lx HPFAR=0x%016lx\n",
			   __func__, elr, far, mrs(hpfar_el2));
		if (vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_HALTED) {
			cpu_vcpu_halt(vcpu, regs);
		}
	}

	vmm_scheduler_irq_exit(regs);
}
Пример #26
0
void do_not_used(arch_regs_t * uregs)
{
	vmm_panic("%s: unexpected exception\n", __func__);
}
Пример #27
0
void do_data_abort(arch_regs_t * uregs)
{
	int rc = VMM_EFAIL; 
	bool crash_dump = FALSE;
	u32 dfsr, dfar, fs, dom, wnr;
	struct vmm_vcpu * vcpu;
	struct cpu_l1tbl * l1;
	struct cpu_page pg;

	dfsr = read_dfsr();
	dfar = read_dfar();

	fs = (dfsr & DFSR_FS_MASK);
#if !defined(CONFIG_ARMV5)
	fs |= (dfsr & DFSR_FS4_MASK) >> (DFSR_FS4_SHIFT - 4);
#endif
	wnr = (dfsr & DFSR_WNR_MASK) >> DFSR_WNR_SHIFT;
	dom = (dfsr & DFSR_DOM_MASK) >> DFSR_DOM_SHIFT;

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		if (fs != DFSR_FS_TRANS_FAULT_SECTION &&
		    fs != DFSR_FS_TRANS_FAULT_PAGE) {
			vmm_panic("%s: unexpected data abort\n"
				  "%s: pc = 0x%08x, dfsr = 0x%08x, dfar = 0x%08x\n", 
				  __func__, __func__, uregs->pc, dfsr, dfar);
		}
		rc = cpu_mmu_get_reserved_page(dfar, &pg);
		if (rc) {
			/* If we were in normal context then just handle
			 * trans fault for current normal VCPU and exit
			 * else there is nothing we can do so panic.
			 */
			if (vmm_scheduler_normal_context()) {
				vcpu = vmm_scheduler_current_vcpu();
				cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1, FALSE);
				return;
			}
			vmm_panic("%s: cannot find reserved page\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n", 
				  __func__, __func__, dfsr, dfar);
		}
		l1 = cpu_mmu_l1tbl_current();
		if (!l1) {
			vmm_panic("%s: cannot find l1 table\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n",
				  __func__, __func__, dfsr, dfar);
		}
		rc = cpu_mmu_map_page(l1, &pg);
		if (rc) {
			vmm_panic("%s: cannot map page in l1 table\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n",
				  __func__, __func__, dfsr, dfar);
		}
		return;
	}

	vcpu = vmm_scheduler_current_vcpu();

	vmm_scheduler_irq_enter(uregs, TRUE);

	switch(fs) {
	case DFSR_FS_ALIGN_FAULT:
		break;
	case DFSR_FS_ICACHE_MAINT_FAULT:
		break;
	case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1:
	case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2:
		break;
	case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1:
	case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2:
		break;
	case DFSR_FS_TRANS_FAULT_SECTION:
	case DFSR_FS_TRANS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1, FALSE);
		crash_dump = TRUE;
		break;
	case DFSR_FS_ACCESS_FAULT_SECTION:
	case DFSR_FS_ACCESS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		crash_dump = TRUE;
		break;
	case DFSR_FS_DOMAIN_FAULT_SECTION:
	case DFSR_FS_DOMAIN_FAULT_PAGE:
		rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		crash_dump = TRUE;
		break;
	case DFSR_FS_PERM_FAULT_SECTION:
	case DFSR_FS_PERM_FAULT_PAGE:
		rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		if ((dfar & ~(TTBL_L2TBL_SMALL_PAGE_SIZE - 1)) != 
						arm_priv(vcpu)->cp15.ovect_base) {
			crash_dump = FALSE;
		}
		break;
	case DFSR_FS_DEBUG_EVENT:
	case DFSR_FS_SYNC_EXT_ABORT:
	case DFSR_FS_IMP_VALID_LOCKDOWN:
	case DFSR_FS_IMP_VALID_COPROC_ABORT:
	case DFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR:
	case DFSR_FS_ASYNC_EXT_ABORT:
	case DFSR_FS_MEM_ACCESS_ASYNC_PARITY_ERROR:
		break;
	default:
		break;
	};

	if (rc && crash_dump) {
		vmm_printf("\n");
		vmm_printf("%s: error %d\n", __func__, rc);
		vmm_printf("%s: vcpu_id = %d, dfar = 0x%x, dfsr = 0x%x\n", 
				__func__, vcpu->id, dfar, dfsr);
		cpu_vcpu_dump_user_reg(vcpu, uregs);
	}

	vmm_scheduler_irq_exit(uregs);
}
Пример #28
0
void do_prefetch_abort(arch_regs_t * uregs)
{
	int rc = VMM_EFAIL;
	bool crash_dump = FALSE;
	u32 ifsr, ifar, fs;
	struct vmm_vcpu * vcpu;

	ifsr = read_ifsr();
	ifar = read_ifar();

	fs = (ifsr & IFSR_FS_MASK);
#if !defined(CONFIG_ARMV5)
	fs |= (ifsr & IFSR_FS4_MASK) >> (IFSR_FS4_SHIFT - 4);
#endif

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		struct cpu_l1tbl * l1;
		struct cpu_page pg;
		if (fs != IFSR_FS_TRANS_FAULT_SECTION &&
		    fs != IFSR_FS_TRANS_FAULT_PAGE) {
			vmm_panic("%s: unexpected prefetch abort\n"
				  "%s: pc = 0x%08x, ifsr = 0x%08x, ifar = 0x%08x\n", 
				  __func__, __func__, uregs->pc, ifsr, ifar);
		}
		rc = cpu_mmu_get_reserved_page((virtual_addr_t)ifar, &pg);
		if (rc) {
			vmm_panic("%s: cannot find reserved page\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n", 
				  __func__, __func__, ifsr, ifar);
		}
		l1 = cpu_mmu_l1tbl_current();
		if (!l1) {
			vmm_panic("%s: cannot find l1 table\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n",
				  __func__, __func__, ifsr, ifar);
		}
		rc = cpu_mmu_map_page(l1, &pg);
		if (rc) {
			vmm_panic("%s: cannot map page in l1 table\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n",
				  __func__, __func__, ifsr, ifar);
		}
		return;
	}

	vcpu = vmm_scheduler_current_vcpu();

	if ((uregs->pc & ~(TTBL_L2TBL_SMALL_PAGE_SIZE - 1)) == 
	    arm_priv(vcpu)->cp15.ovect_base) {
		uregs->pc = (virtual_addr_t)arm_guest_priv(vcpu->guest)->ovect 
			    + (uregs->pc & (TTBL_L2TBL_SMALL_PAGE_SIZE - 1));
		return;
	}

	vmm_scheduler_irq_enter(uregs, TRUE);

	switch(fs) {
	case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1:
	case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2:
		break;
	case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1:
	case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2:
		break;
	case IFSR_FS_TRANS_FAULT_SECTION:
	case IFSR_FS_TRANS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0, FALSE);
		crash_dump = TRUE;
		break;
	case IFSR_FS_ACCESS_FAULT_SECTION:
	case IFSR_FS_ACCESS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_DOMAIN_FAULT_SECTION:
	case IFSR_FS_DOMAIN_FAULT_PAGE:
		rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_PERM_FAULT_SECTION:
	case IFSR_FS_PERM_FAULT_PAGE:
		rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_DEBUG_EVENT:
	case IFSR_FS_SYNC_EXT_ABORT:
	case IFSR_FS_IMP_VALID_LOCKDOWN:
	case IFSR_FS_IMP_VALID_COPROC_ABORT:
	case IFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR:
		break;
	default:
		break; 
	};

	if (rc && crash_dump) {
		vmm_printf("\n");
		vmm_printf("%s: error %d\n", __func__, rc);
		vmm_printf("%s: vcpu_id = %d, ifar = 0x%x, ifsr = 0x%x\n", 
				__func__, vcpu->id, ifar, ifsr);
		cpu_vcpu_dump_user_reg(vcpu, uregs);
	}

	vmm_scheduler_irq_exit(uregs);
}
Пример #29
0
static int __init lwip_netstack_init(void)
{
	int rc;
	struct vmm_netswitch *nsw;
	struct vmm_devtree_node *node;
	const char *str;
	u8 ip[] = {169, 254, 1, 1};
	u8 mask[] = {255, 255, 255, 0};
	ip_addr_t __ip, __nm, __gw;

	/* Clear lwIP state */
	memset(&lns, 0, sizeof(lns));

	/* Get netstack device tree node if available */
	node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING
				   VMM_DEVTREE_VMMINFO_NODE_NAME
				   VMM_DEVTREE_PATH_SEPARATOR_STRING
				   VMM_DEVTREE_VMMNET_NODE_NAME
				   VMM_DEVTREE_PATH_SEPARATOR_STRING
				   VMM_DEVTREE_NETSTACK_NODE_NAME);

	/* Retrive preferred IP address */
	if (vmm_devtree_read_string(node, "ipaddr", &str) == VMM_OK) {
		/* Read ip address from netstack node */
		str2ipaddr(ip, str);
	}

	/* Retrive preferred IP address */
	if (vmm_devtree_read_string(node, "netmask", &str) == VMM_OK) {
		/* Read network mask from netstack node */
		str2ipaddr(mask, str);
	}

	/* Retrive preferred netswitch */
	if (vmm_devtree_read_string(node, "netswitch", &str) == VMM_OK) {
		/* Find netswitch with given name */
		nsw = vmm_netswitch_find(str);
	} else {
		/* Get default netswitch */
		nsw = vmm_netswitch_default();
	}
	if (!nsw) {
		vmm_panic("%s: No netswitch found\n", __func__);
	}

	/* Release netstack device tree node */
	vmm_devtree_dref_node(node);

	/* Allocate a netport */
	lns.port = vmm_netport_alloc("lwip-netport", VMM_NETPORT_DEF_QUEUE_SIZE);
	if (!lns.port) {
		vmm_printf("%s: vmm_netport_alloc() failed\n", __func__);
		rc = VMM_ENOMEM;
		goto fail;
	}

	/* Setup a netport */
	lns.port->mtu = 1500;
	lns.port->link_changed = lwip_set_link;
	lns.port->can_receive = lwip_can_receive;
	lns.port->switch2port_xfer = lwip_switch2port_xfer;
	lns.port->priv = &lns;

	/* Register a netport */
	rc = vmm_netport_register(lns.port);
	if (rc) {
		goto fail1;
	}

	/* Initialize lwIP + TCP/IP APIs */
	tcpip_init(NULL, NULL);

	/* Add netif */
	IP4_ADDR(&__ip, ip[0],ip[1],ip[2],ip[3]);
	IP4_ADDR(&__nm, mask[0],mask[1],mask[2],mask[3]);
	IP4_ADDR(&__gw, ip[0],ip[1],ip[2],ip[3]);
	netif_add(&lns.nif, &__ip, &__nm, &__gw, &lns,
		  lwip_netstack_netif_init, ethernet_input);

	/* Set default netif */
	netif_set_default(&lns.nif);

	/* Attach netport with netswitch 
	 * Note: This will cause netport link_change()
	 */
	rc = vmm_netswitch_port_add(nsw, lns.port);
	if (rc) {
		goto fail2;
	}

#if !defined(PING_USE_SOCKETS)
	/* Initalize RAW PCB for ping */
	ping_raw_init();
#endif

	return VMM_OK;

fail2:
	vmm_netport_unregister(lns.port);
fail1:
	vmm_netport_free(lns.port);
fail:
	return rc;
}
Пример #30
0
static void vmm_scheduler_next(struct vmm_scheduler_ctrl *schedp,
			       struct vmm_timer_event *ev, 
			       arch_regs_t *regs)
{
	irq_flags_t cf, nf;
	u64 tstamp = vmm_timer_timestamp();
	struct vmm_vcpu *next = NULL; 
	struct vmm_vcpu *tcurrent = NULL, *current = schedp->current_vcpu;
	u32 current_state;

	/* First time scheduling */
	if (!current) {
		next = rq_dequeue(schedp);
		if (!next) {
			/* This should never happen !!! */
			vmm_panic("%s: no vcpu to switch to.\n", __func__);
		}

		vmm_write_lock_irqsave_lite(&next->sched_lock, nf);

		arch_vcpu_switch(NULL, next, regs);
		next->state_ready_nsecs += tstamp - next->state_tstamp;
		arch_atomic_write(&next->state, VMM_VCPU_STATE_RUNNING);
		next->state_tstamp = tstamp;
		schedp->current_vcpu = next;
		vmm_timer_event_start(ev, next->time_slice);

		vmm_write_unlock_irqrestore_lite(&next->sched_lock, nf);

		return;
	}

	/* Normal scheduling */
	vmm_write_lock_irqsave_lite(&current->sched_lock, cf);

	current_state = arch_atomic_read(&current->state);

	if (current_state & VMM_VCPU_STATE_SAVEABLE) {
		if (current_state == VMM_VCPU_STATE_RUNNING) {
			current->state_running_nsecs += 
				tstamp - current->state_tstamp;
			arch_atomic_write(&current->state, VMM_VCPU_STATE_READY);
			current->state_tstamp = tstamp;
			rq_enqueue(schedp, current);
		}
		tcurrent = current;
	}

	next = rq_dequeue(schedp);
	if (!next) {
		/* This should never happen !!! */
		vmm_panic("%s: no vcpu to switch to.\n", 
			  __func__);
	}

	if (next != current) {
		vmm_write_lock_irqsave_lite(&next->sched_lock, nf);
		arch_vcpu_switch(tcurrent, next, regs);
	}

	next->state_ready_nsecs += tstamp - next->state_tstamp;
	arch_atomic_write(&next->state, VMM_VCPU_STATE_RUNNING);
	next->state_tstamp = tstamp;
	schedp->current_vcpu = next;
	vmm_timer_event_start(ev, next->time_slice);

	if (next != current) {
		vmm_write_unlock_irqrestore_lite(&next->sched_lock, nf);
	}

	vmm_write_unlock_irqrestore_lite(&current->sched_lock, cf);
}