Ejemplo n.º 1
0
void do_bad_mode(arch_regs_t *regs, unsigned long mode)
{
	u32 ec, il, iss;
	u64 esr, far, elr;
	struct vmm_vcpu *vcpu;

	esr = mrs(esr_el2);
	far = mrs(far_el2);
	elr = mrs(elr_el2);

	ec = (esr & ESR_EC_MASK) >> ESR_EC_SHIFT;
	il = (esr & ESR_IL_MASK) >> ESR_IL_SHIFT;
	iss = (esr & ESR_ISS_MASK) >> ESR_ISS_SHIFT;

	vcpu = vmm_scheduler_current_vcpu();

	vmm_printf("%s: CPU%d VCPU=%s unexpected exception\n",
		   __func__, vmm_smp_processor_id(),
		   (vcpu) ? vcpu->name : "(NULL)");
	vmm_printf("%s: ESR=0x%016lx EC=0x%x IL=0x%x ISS=0x%x\n",
		   __func__, esr, ec, il, iss);
	vmm_printf("%s: ELR=0x%016lx FAR=0x%016lx HPFAR=0x%016lx\n",
		   __func__, elr, far, mrs(hpfar_el2));
	cpu_vcpu_dump_user_reg(regs);
	vmm_panic("%s: please reboot ...\n", __func__);
}
Ejemplo n.º 2
0
int __cpuinit vmm_scheduler_init(void)
{
	int rc;
	char vcpu_name[VMM_FIELD_NAME_SIZE];
	u32 cpu = vmm_smp_processor_id();
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	/* Reset the scheduler control structure */
	memset(schedp, 0, sizeof(struct vmm_scheduler_ctrl));

	/* Create ready queue (Per Host CPU) */
	schedp->rq = vmm_schedalgo_rq_create();
	if (!schedp->rq) {
		return VMM_EFAIL;
	}
	INIT_SPIN_LOCK(&schedp->rq_lock);

	/* Initialize current VCPU. (Per Host CPU) */
	schedp->current_vcpu = NULL;

	/* Initialize IRQ state (Per Host CPU) */
	schedp->irq_context = FALSE;
	schedp->irq_regs = NULL;

	/* Initialize yield on exit (Per Host CPU) */
	schedp->yield_on_irq_exit = FALSE;

	/* Create timer event and start it. (Per Host CPU) */
	INIT_TIMER_EVENT(&schedp->ev, &vmm_scheduler_timer_event, schedp);

	/* Create idle orphan vcpu with default time slice. (Per Host CPU) */
	vmm_snprintf(vcpu_name, sizeof(vcpu_name), "idle/%d", cpu);
	schedp->idle_vcpu = vmm_manager_vcpu_orphan_create(vcpu_name,
						(virtual_addr_t)&idle_orphan,
						IDLE_VCPU_STACK_SZ,
						IDLE_VCPU_PRIORITY, 
						IDLE_VCPU_TIMESLICE);
	if (!schedp->idle_vcpu) {
		return VMM_EFAIL;
	}

	/* The idle vcpu need to stay on this cpu */
	if ((rc = vmm_manager_vcpu_set_affinity(schedp->idle_vcpu,
						vmm_cpumask_of(cpu)))) {
		return rc;
	}

	/* Kick idle orphan vcpu */
	if ((rc = vmm_manager_vcpu_kick(schedp->idle_vcpu))) {
		return rc;
	}

	/* Start scheduler timer event */
	vmm_timer_event_start(&schedp->ev, 0);

	/* Mark this CPU online */
	vmm_set_cpu_online(cpu, TRUE);

	return VMM_OK;
}
Ejemplo n.º 3
0
static void __init gic_dist_init(struct gic_chip_data *gic)
{
	unsigned int i;
	u32 cpumask = 1 << vmm_smp_processor_id();
	virtual_addr_t base = gic->dist_base;

	cpumask |= cpumask << 8;
	cpumask |= cpumask << 16;

	/* Disable IRQ distribution */
	gic_write(0, base + GIC_DIST_CTRL);

	/*
	 * Set all global interrupts to be level triggered, active low.
	 */
	for (i = 32; i < gic->max_irqs; i += 16) {
		gic_write(0, base + GIC_DIST_CONFIG + i * 4 / 16);
	}

	/*
	 * Set all global interrupts to this CPU only.
	 */
	for (i = 32; i < gic->max_irqs; i += 4) {
		gic_write(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
	}

	/*
	 * Set priority on all interrupts.
	 */
	for (i = 0; i < gic->max_irqs; i += 4) {
		gic_write(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
	}

	/*
	 * Disable all interrupts.
	 */
	for (i = 0; i < gic->max_irqs; i += 32) {
		gic_write(0xffffffff,
			  base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
	}

	/*
	 * Setup the Host IRQ subsystem.
	 * Note: We handle all interrupts including SGIs and PPIs via C code.
	 * The Linux kernel handles pheripheral interrupts via C code and 
	 * SGI/PPI via assembly code.
	 */
	for (i = gic->irq_start; i < (gic->irq_start + gic->max_irqs); i++) {
		vmm_host_irq_set_chip(i, &gic_chip);
		vmm_host_irq_set_chip_data(i, gic);
		vmm_host_irq_set_handler(i, vmm_handle_fast_eoi);
		/* Mark SGIs and PPIs as per-CPU IRQs */
		if (i < 32) {
			vmm_host_irq_mark_per_cpu(i);
		}
	}

	/* Enable IRQ distribution */
	gic_write(1, base + GIC_DIST_CTRL);
}
Ejemplo n.º 4
0
bool vmm_smp_is_bootcpu(void)
{
	if (smp_bootcpu_id == UINT_MAX) {
		return FALSE;
	}

	return (smp_bootcpu_id == vmm_smp_processor_id()) ? TRUE : FALSE;
}
Ejemplo n.º 5
0
void vmm_smp_set_bootcpu(void)
{
	u32 cpu = vmm_smp_processor_id();

	if ((smp_bootcpu_id == UINT_MAX) &&
	    (cpu < CONFIG_CPU_COUNT)) {
		smp_bootcpu_id = cpu;
	}
}
Ejemplo n.º 6
0
int __cpuinit twd_clockchip_init(virtual_addr_t base, 
				virtual_addr_t ref_counter_addr,
				u32 ref_counter_freq,
				u32 ppi_hirq)
{
	int rc;
	u32 cpu = vmm_smp_processor_id();
	struct twd_clockchip *cc = &this_cpu(twd_cc);

	memset(cc, 0, sizeof(struct twd_clockchip));

	twd_caliberate_freq(base, ref_counter_addr, ref_counter_freq);

	vmm_sprintf(cc->name, "twd/%d", cpu);

	cc->base = base;
	cc->clkchip.name = cc->name;
	cc->clkchip.hirq = ppi_hirq;
	cc->clkchip.rating = 350;
	cc->clkchip.cpumask = vmm_cpumask_of(cpu);
	cc->clkchip.features = 
		VMM_CLOCKCHIP_FEAT_PERIODIC | VMM_CLOCKCHIP_FEAT_ONESHOT;
	cc->clkchip.shift = 20;
	cc->clkchip.mult = vmm_clockchip_hz2mult(twd_freq_hz, cc->clkchip.shift);
	cc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(0xF, &cc->clkchip);
	cc->clkchip.max_delta_ns = 
			vmm_clockchip_delta2ns(0xFFFFFFFF, &cc->clkchip);
	cc->clkchip.set_mode = &twd_clockchip_set_mode;
	cc->clkchip.set_next_event = &twd_clockchip_set_next_event;
	cc->clkchip.expire = &twd_clockchip_expire;
	cc->clkchip.priv = cc;

	if (!cpu) {
		/* Register interrupt handler */
		if ((rc = vmm_host_irq_register(ppi_hirq, "twd",
						&twd_clockchip_irq_handler, 
						cc))) {
			return rc;
		}

		/* Mark interrupt as per-cpu */
		if ((rc = vmm_host_irq_mark_per_cpu(ppi_hirq))) {
			return rc;
		}
	}

	/* Explicitly enable local timer PPI in GIC 
	 * Note: Local timer requires PPI support hence requires GIC
	 */
	gic_enable_ppi(ppi_hirq);

	return vmm_clockchip_register(&cc->clkchip);
}
Ejemplo n.º 7
0
void do_error(struct vmm_vcpu *vcpu, arch_regs_t *regs,
	      unsigned long scause, const char *msg, int err, bool panic)
{
	u32 cpu = vmm_smp_processor_id();

	vmm_printf("%s: CPU%d: VCPU=%s %s (error %d)\n",
		   __func__, cpu, (vcpu) ? vcpu->name : "(NULL)", msg, err);
	cpu_vcpu_dump_general_regs(NULL, regs);
	cpu_vcpu_dump_exception_regs(NULL, scause, csr_read(CSR_STVAL));
	if (panic) {
		vmm_panic("%s: please reboot ...\n", __func__);
	}
}
Ejemplo n.º 8
0
int __cpuinit arch_clockchip_init(void)
{
	int rc;
	struct vmm_devtree_node *node;
	u32 val, cpu = vmm_smp_processor_id();

	if (!cpu) {
		/* Map timer0 registers */
		node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING
					   "mct");
		if (!node) {
			goto skip_mct_timer_init;
		}

		if (!mct_timer_base) {
			rc = vmm_devtree_regmap(node, &mct_timer_base, 0);
			if (rc) {
				return rc;
			}
		}

		rc = vmm_devtree_clock_frequency(node, &mct_clk_rate);
		if (rc) {
			return rc;
		}

		/* Get MCT irq */
		rc = vmm_devtree_irq_get(node, &val, 0);
		if (rc) {
			return rc;
		}

		/* Initialize MCT as clockchip */
		rc = exynos4_clockchip_init(mct_timer_base, val, node->name,
					    300, mct_clk_rate, 0);
		if (rc) {
			return rc;
		}

	}
 skip_mct_timer_init:

#if CONFIG_SAMSUNG_MCT_LOCAL_TIMERS
	if (mct_timer_base) {
		exynos4_local_timer_init(mct_timer_base, 0, "mct_tick", 450,
					 mct_clk_rate);
	}
#endif

	return VMM_OK;
}
Ejemplo n.º 9
0
void do_data_abort(arch_regs_t *regs)
{
	struct vmm_vcpu *vcpu = vmm_scheduler_current_vcpu();

	vmm_printf("%s: CPU%d unexpected exception\n",
		   __func__, vmm_smp_processor_id());
	vmm_printf("%s: Current VCPU=%s HSR=0x%08x\n",
		   __func__, (vcpu) ? vcpu->name : "(NULL)", 
		   read_hsr());
	vmm_printf("%s: HPFAR=0x%08x HIFAR=0x%08x HDFAR=0x%08x\n",
		   __func__, read_hpfar(), read_hifar(), read_hdfar());

	cpu_vcpu_dump_user_reg(regs);

	vmm_panic("%s: please reboot ...\n", __func__);
}
Ejemplo n.º 10
0
/*
 * Set the executing CPUs power mode as defined.  This will be in
 * preparation for it executing a WFI instruction.
 *
 * This function must be called with preemption disabled, and as it
 * has the side effect of disabling coherency, caches must have been
 * flushed.  Interrupts must also have been disabled.
 */
int scu_power_mode(void *scu_base, u32 mode)
{
	u32 val, cpu;

	cpu = vmm_smp_processor_id();

	if (mode > SCU_PM_POWEROFF || mode == SCU_PM_EINVAL || cpu > 3) {
		return VMM_EFAIL;
	}

	val = vmm_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
	val |= mode;
	vmm_writeb(val, scu_base + SCU_CPU_STATUS + cpu);

	return VMM_OK;
}
Ejemplo n.º 11
0
void __init cpu_init(void)
{
#if defined(CONFIG_SMP)
	u32 cpu = vmm_smp_processor_id();

	if (!cpu) { /* Primary CPU */
		vmm_init();
	} else { /* Secondary CPUs */
		vmm_init_secondary();
	}
#else
	/* Initialize VMM (APIs only available after this) */
	vmm_init();
#endif

	/* We will never come back here. */
	vmm_hang();
}
Ejemplo n.º 12
0
void __init cpu_init(void)
{
#if defined(CONFIG_SMP)
	u32 cpu = vmm_smp_processor_id();

#if defined(ARCH_HAS_DEFTERM_EARLY_PRINT)
    arch_defterm_early_putc('0'+cpu);
#endif

	if (!cpu) { /* Primary CPU */
		vmm_init();
	} else { /* Secondary CPUs */
		vmm_init_secondary();
	}
#else
	/* Initialize VMM (APIs only available after this) */
	vmm_init();
#endif

	/* We will never come back here. */
	vmm_hang();
}
Ejemplo n.º 13
0
void do_soft_irq(arch_regs_t *regs)
{
	struct vmm_vcpu *vcpu;

	if ((regs->cpsr & CPSR_MODE_MASK) == CPSR_MODE_HYPERVISOR) {
		vmm_scheduler_preempt_orphan(regs);
		return;
	} else {
		vcpu = vmm_scheduler_current_vcpu();

		vmm_printf("%s: CPU%d unexpected exception\n",
			   __func__, vmm_smp_processor_id());
		vmm_printf("%s: Current VCPU=%s HSR=0x%08x\n",
			   __func__, (vcpu) ? vcpu->name : "(NULL)", 
			   read_hsr());
		vmm_printf("%s: HPFAR=0x%08x HIFAR=0x%08x HDFAR=0x%08x\n",
			   __func__, read_hpfar(), read_hifar(), read_hdfar());

		cpu_vcpu_dump_user_reg(regs);

		vmm_panic("%s: please reboot ...\n", __func__);
	}
}
Ejemplo n.º 14
0
int __cpuinit twd_clockchip_init(virtual_addr_t ref_counter_addr,
                                 u32 ref_counter_freq)
{
    int rc;
    u32 cpu = vmm_smp_processor_id();
    struct vmm_devtree_node *node;
    struct twd_clockchip *cc = &this_cpu(twd_cc);

    node = vmm_devtree_find_matching(NULL, twd_match);
    if (!node) {
        return VMM_ENODEV;
    }

    if (!twd_base) {
        rc = vmm_devtree_regmap(node, &twd_base, 0);
        if (rc) {
            return rc;
        }
    }

    if (!twd_ppi_irq) {
        rc = vmm_devtree_irq_get(node, &twd_ppi_irq, 0);
        if (rc) {
            return rc;
        }
    }

    twd_caliberate_freq(twd_base, ref_counter_addr, ref_counter_freq);

    memset(cc, 0, sizeof(struct twd_clockchip));

    vmm_sprintf(cc->name, "twd/%d", cpu);

    cc->clkchip.name = cc->name;
    cc->clkchip.hirq = twd_ppi_irq;
    cc->clkchip.rating = 350;
    cc->clkchip.cpumask = vmm_cpumask_of(cpu);
    cc->clkchip.features =
        VMM_CLOCKCHIP_FEAT_PERIODIC | VMM_CLOCKCHIP_FEAT_ONESHOT;
    vmm_clocks_calc_mult_shift(&cc->clkchip.mult, &cc->clkchip.shift,
                               VMM_NSEC_PER_SEC, twd_freq_hz, 10);
    cc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(0xF, &cc->clkchip);
    cc->clkchip.max_delta_ns =
        vmm_clockchip_delta2ns(0xFFFFFFFF, &cc->clkchip);
    cc->clkchip.set_mode = &twd_clockchip_set_mode;
    cc->clkchip.set_next_event = &twd_clockchip_set_next_event;
    cc->clkchip.priv = cc;

    if (!cpu) {
        /* Register interrupt handler */
        if ((rc = vmm_host_irq_register(twd_ppi_irq, "twd",
                                        &twd_clockchip_irq_handler,
                                        cc))) {
            return rc;
        }

        /* Mark interrupt as per-cpu */
        if ((rc = vmm_host_irq_mark_per_cpu(twd_ppi_irq))) {
            return rc;
        }
    }

    /* Explicitly enable local timer PPI in GIC
     * Note: Local timer requires PPI support hence requires GIC
     */
    gic_enable_ppi(twd_ppi_irq);

    return vmm_clockchip_register(&cc->clkchip);
}
Ejemplo n.º 15
0
int __cpuinit arch_cpu_irq_setup(void)
{
	static const struct cpu_page zero_filled_cpu_page = { 0 };

	int rc;
	extern u32 _start_vect[];
	u32 *vectors, *vectors_data;
	u32 vec, cpu = vmm_smp_processor_id();
	struct cpu_page vec_page;

#if defined(CONFIG_ARM32_HIGHVEC)
	/* Enable high vectors in SCTLR */
	write_sctlr(read_sctlr() | SCTLR_V_MASK);
	vectors = (u32 *) CPU_IRQ_HIGHVEC_BASE;
#else
#if defined(CONFIG_ARMV7A_SECUREX)
	write_vbar(CPU_IRQ_LOWVEC_BASE);
#endif
	vectors = (u32 *) CPU_IRQ_LOWVEC_BASE;
#endif
	vectors_data = vectors + CPU_IRQ_NR;

	/* For secondary CPUs nothing else to be done. */
	if (cpu) {
		return VMM_OK;
	}

	/* If vectors are at correct location then do nothing */
	if ((u32) _start_vect == (u32) vectors) {
		return VMM_OK;
	}

	/* If vectors are not mapped in virtual memory then map them. */
	vec_page = zero_filled_cpu_page;
	rc = cpu_mmu_get_reserved_page((virtual_addr_t)vectors, &vec_page);
	if (rc) {
		rc = vmm_host_ram_alloc(&vec_page.pa, 
					TTBL_L2TBL_SMALL_PAGE_SIZE, 
					TRUE);
		if (rc) {
			return rc;
		}

		vec_page.va = (virtual_addr_t)vectors;
		vec_page.sz = TTBL_L2TBL_SMALL_PAGE_SIZE;
		vec_page.dom = TTBL_L1TBL_TTE_DOM_RESERVED;
		vec_page.ap = TTBL_AP_SRW_U;

		if ((rc = cpu_mmu_map_reserved_page(&vec_page))) {
			return rc;
		}
	}

	/*
	 * Loop through the vectors we're taking over, and copy the
	 * vector's insn and data word.
	 */
	for (vec = 0; vec < CPU_IRQ_NR; vec++) {
		vectors[vec] = _start_vect[vec];
		vectors_data[vec] = _start_vect[vec + CPU_IRQ_NR];
	}

	return VMM_OK;
}
Ejemplo n.º 16
0
void arch_vcpu_switch(struct vmm_vcpu *tvcpu, 
		      struct vmm_vcpu *vcpu, 
		      arch_regs_t *regs)
{
	u32 ite;
	irq_flags_t flags;

	/* Save user registers & banked registers */
	if (tvcpu) {
		arm_regs(tvcpu)->pc = regs->pc;
		arm_regs(tvcpu)->lr = regs->lr;
		arm_regs(tvcpu)->sp = regs->sp;
		for (ite = 0; ite < CPU_GPR_COUNT; ite++) {
			arm_regs(tvcpu)->gpr[ite] = regs->gpr[ite];
		}
		arm_regs(tvcpu)->pstate = regs->pstate;
		if (tvcpu->is_normal) {
			/* Update last host CPU */
			arm_priv(tvcpu)->last_hcpu = vmm_smp_processor_id();
			/* Save VGIC context */
			arm_vgic_save(tvcpu);
			/* Save sysregs context */
			cpu_vcpu_sysregs_save(tvcpu);
			/* Save VFP and SIMD context */
			cpu_vcpu_vfp_save(tvcpu);
			/* Save generic timer */
			if (arm_feature(tvcpu, ARM_FEATURE_GENERIC_TIMER)) {
				generic_timer_vcpu_context_save(tvcpu,
						arm_gentimer_context(tvcpu));
			}
		}
	}
	/* Restore user registers & special registers */
	regs->pc = arm_regs(vcpu)->pc;
	regs->lr = arm_regs(vcpu)->lr;
	regs->sp = arm_regs(vcpu)->sp;
	for (ite = 0; ite < CPU_GPR_COUNT; ite++) {
		regs->gpr[ite] = arm_regs(vcpu)->gpr[ite];
	}
	regs->pstate = arm_regs(vcpu)->pstate;
	if (vcpu->is_normal) {
		/* Restore hypervisor context */
		vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);
		msr(hcr_el2, arm_priv(vcpu)->hcr);
		vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);
		msr(cptr_el2, arm_priv(vcpu)->cptr);
		msr(hstr_el2, arm_priv(vcpu)->hstr);
		/* Restore Stage2 MMU context */
		mmu_lpae_stage2_chttbl(vcpu->guest->id, 
			       arm_guest_priv(vcpu->guest)->ttbl);
		/* Restore generic timer */
		if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
			generic_timer_vcpu_context_restore(vcpu,
						arm_gentimer_context(vcpu));
		}
		/* Restore VFP and SIMD context */
		cpu_vcpu_vfp_restore(vcpu);
		/* Restore sysregs context */
		cpu_vcpu_sysregs_restore(vcpu);
		/* Restore VGIC context */
		arm_vgic_restore(vcpu);
		/* Flush TLB if moved to new host CPU */
		if (arm_priv(vcpu)->last_hcpu != vmm_smp_processor_id()) {
			/* Invalidate all guest TLB enteries because
			 * we might have stale guest TLB enteries from
			 * our previous run on new_hcpu host CPU 
			 */
			inv_tlb_guest_allis();
			/* Ensure changes are visible */
			dsb();
			isb();
		}
	}
	/* Clear exclusive monitor */
	clrex();
}
Ejemplo n.º 17
0
void do_sync(arch_regs_t *regs, unsigned long mode)
{
	int rc = VMM_OK;
	u32 ec, il, iss;
	u64 esr, far, elr;
	physical_addr_t fipa = 0;
	struct vmm_vcpu *vcpu;

	esr = mrs(esr_el2);
	far = mrs(far_el2);
	elr = mrs(elr_el2);

	ec = (esr & ESR_EC_MASK) >> ESR_EC_SHIFT;
	il = (esr & ESR_IL_MASK) >> ESR_IL_SHIFT;
	iss = (esr & ESR_ISS_MASK) >> ESR_ISS_SHIFT;

	vcpu = vmm_scheduler_current_vcpu();

	/* We dont expect any faults from hypervisor code itself 
	 * so, any trap we get from hypervisor mode means something
	 * unexpected has occured.
	 */
	if ((regs->pstate & PSR_EL_MASK) == PSR_EL_2) {
		if ((ec == EC_TRAP_HVC_A64) && (iss == 0)) {
			vmm_scheduler_preempt_orphan(regs);
			return;
		}
		vmm_printf("%s: CPU%d VCPU=%s unexpected exception\n",
			   __func__, vmm_smp_processor_id(),
			   (vcpu) ? vcpu->name : "(NULL)");
		vmm_printf("%s: ESR=0x%016lx EC=0x%x IL=0x%x ISS=0x%x\n",
			   __func__, esr, ec, il, iss);
		vmm_printf("%s: ELR=0x%016lx FAR=0x%016lx HPFAR=0x%016lx\n",
			   __func__, elr, far, mrs(hpfar_el2));
		cpu_vcpu_dump_user_reg(regs);
		vmm_panic("%s: please reboot ...\n", __func__);
	}

	vmm_scheduler_irq_enter(regs, TRUE);

	switch (ec) {
	case EC_UNKNOWN:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_WFI_WFE:
		/* WFI emulation */
		rc = cpu_vcpu_emulate_wfi_wfe(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP15_A32:
		/* MCR/MRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCRR_MRRC_CP15_A32:
		/* MCRR/MRRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcrr_mrrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP14_A32:
		/* MCR/MRC CP14 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_LDC_STC_CP14_A32:
		/* LDC/STC CP14 emulation */
		rc = cpu_vcpu_emulate_ldc_stc_cp14(vcpu, regs, il, iss);
		break;
	case EC_SIMD_FPU:
		/* Advanced SIMD and FPU emulation */
		rc = cpu_vcpu_emulate_simd_fp_regs(vcpu, regs, il, iss);
		break;
	case EC_FPEXC_A32:
	case EC_FPEXC_A64:
		/* We dont expect any FP execution faults */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_MRC_VMRS_CP10_A32:
		/* MRC (or VMRS) to CP10 for MVFR0, MVFR1 or FPSID */
		rc = cpu_vcpu_emulate_vmrs(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCRR_MRRC_CP14_A32:
		/* MRRC to CP14 emulation */
		rc = cpu_vcpu_emulate_mcrr_mrrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SVC_A32:
	case EC_TRAP_SVC_A64:
		/* We dont expect to get these traps so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_SMC_A32:
		/* SMC emulation for A32 guest */
		rc = cpu_vcpu_emulate_smc32(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SMC_A64:
		/* SMC emulation for A64 guest */
		rc = cpu_vcpu_emulate_smc64(vcpu, regs, il, iss);
		break;
	case EC_TRAP_HVC_A32:
		/* HVC emulation for A32 guest */
		rc = cpu_vcpu_emulate_hvc32(vcpu, regs, il, iss);
		break;
	case EC_TRAP_HVC_A64:
		/* HVC emulation for A64 guest */
		rc = cpu_vcpu_emulate_hvc64(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MSR_MRS_SYSTEM:
		/* MSR/MRS/SystemRegs emulation */
		rc = cpu_vcpu_emulate_msr_mrs_system(vcpu, regs, il, iss);
		break;
	case EC_TRAP_LWREL_INST_ABORT:
		/* Stage2 instruction abort */
		fipa = (mrs(hpfar_el2) & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (mrs(far_el2) & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_inst_abort(vcpu, regs, il, iss, fipa);
		break;
	case EC_TRAP_LWREL_DATA_ABORT:
		/* Stage2 data abort */
		fipa = (mrs(hpfar_el2) & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (mrs(far_el2) & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_data_abort(vcpu, regs, il, iss, fipa);
		break;
	case EC_CUREL_INST_ABORT:
	case EC_CUREL_DATA_ABORT:
	case EC_SERROR:
		/* We dont expect to get aborts from EL2 so error */
		rc = VMM_EFAIL;
		break;
	case EC_PC_UNALIGNED:
	case EC_SP_UNALIGNED:
		/* We dont expect to get alignment faults from EL2 */
		rc = VMM_EFAIL;
		break;
	default:
		/* Unhandled or unknown EC value so error */
		rc = VMM_EFAIL;
		break;
	};

	if (rc) {
		vmm_printf("%s: CPU%d VCPU=%s sync failed (error %d)\n", 
			   __func__, vmm_smp_processor_id(), 
			   (vcpu) ? vcpu->name : "(NULL)", rc);
		vmm_printf("%s: ESR=0x%016lx EC=0x%x IL=0x%x ISS=0x%x\n",
			   __func__, esr, ec, il, iss);
		vmm_printf("%s: ELR=0x%016lx FAR=0x%016lx HPFAR=0x%016lx\n",
			   __func__, elr, far, mrs(hpfar_el2));
		if (vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_HALTED) {
			cpu_vcpu_halt(vcpu, regs);
		}
	}

	vmm_scheduler_irq_exit(regs);
}
Ejemplo n.º 18
0
int vmm_scheduler_state_change(struct vmm_vcpu *vcpu, u32 new_state)
{
	u64 tstamp;
	int rc = VMM_OK;
	irq_flags_t flags;
	bool preempt = FALSE;
	u32 chcpu = vmm_smp_processor_id(), vhcpu;
	struct vmm_scheduler_ctrl *schedp;
	u32 current_state;

	if (!vcpu) {
		return VMM_EFAIL;
	}

	vmm_write_lock_irqsave_lite(&vcpu->sched_lock, flags);

	vhcpu = vcpu->hcpu;
	schedp = &per_cpu(sched, vhcpu);

	current_state = arch_atomic_read(&vcpu->state);

	switch(new_state) {
	case VMM_VCPU_STATE_UNKNOWN:
		/* Existing VCPU being destroyed */
		rc = vmm_schedalgo_vcpu_cleanup(vcpu);
		break;
	case VMM_VCPU_STATE_RESET:
		if (current_state == VMM_VCPU_STATE_UNKNOWN) {
			/* New VCPU */
			rc = vmm_schedalgo_vcpu_setup(vcpu);
		} else if (current_state != VMM_VCPU_STATE_RESET) {
			/* Existing VCPU */
			/* Make sure VCPU is not in a ready queue */
			if ((schedp->current_vcpu != vcpu) &&
			    (current_state == VMM_VCPU_STATE_READY)) {
				if ((rc = rq_detach(schedp, vcpu))) {
					break;
				}
			}
			/* Make sure current VCPU is preempted */
			if ((schedp->current_vcpu == vcpu) &&
			    (current_state == VMM_VCPU_STATE_RUNNING)) {
				preempt = TRUE;
			}
			vcpu->reset_count++;
			if ((rc = arch_vcpu_init(vcpu))) {
				break;
			}
			if ((rc = vmm_vcpu_irq_init(vcpu))) {
				break;
			}
		} else {
			rc = VMM_EFAIL;
		}
		break;
	case VMM_VCPU_STATE_READY:
		if ((current_state == VMM_VCPU_STATE_RESET) ||
		    (current_state == VMM_VCPU_STATE_PAUSED)) {
			/* Enqueue VCPU to ready queue */
			rc = rq_enqueue(schedp, vcpu);
			if (!rc && (schedp->current_vcpu != vcpu)) {
				preempt = rq_prempt_needed(schedp);
			}
		} else {
			rc = VMM_EFAIL;
		}
		break;
	case VMM_VCPU_STATE_PAUSED:
	case VMM_VCPU_STATE_HALTED:
		if ((current_state == VMM_VCPU_STATE_READY) ||
		    (current_state == VMM_VCPU_STATE_RUNNING)) {
			/* Expire timer event if current VCPU 
			 * is paused or halted 
			 */
			if (schedp->current_vcpu == vcpu) {
				preempt = TRUE;
			} else if (current_state == VMM_VCPU_STATE_READY) {
				/* Make sure VCPU is not in a ready queue */
				rc = rq_detach(schedp, vcpu);
			}
		} else {
			rc = VMM_EFAIL;
		}
		break;
	}

	if (rc == VMM_OK) {
		tstamp = vmm_timer_timestamp();
		switch (current_state) {
		case VMM_VCPU_STATE_READY:
			vcpu->state_ready_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		case VMM_VCPU_STATE_RUNNING:
			vcpu->state_running_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		case VMM_VCPU_STATE_PAUSED:
			vcpu->state_paused_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		case VMM_VCPU_STATE_HALTED:
			vcpu->state_halted_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		default:
			break; 
		}
		if (new_state == VMM_VCPU_STATE_RESET) {
			vcpu->state_ready_nsecs = 0;
			vcpu->state_running_nsecs = 0;
			vcpu->state_paused_nsecs = 0;
			vcpu->state_halted_nsecs = 0;
			vcpu->reset_tstamp = tstamp;
		}
		arch_atomic_write(&vcpu->state, new_state);
		vcpu->state_tstamp = tstamp;
	}

	vmm_write_unlock_irqrestore_lite(&vcpu->sched_lock, flags);

	if (preempt && schedp->current_vcpu) {
		if (chcpu == vhcpu) {
			if (schedp->current_vcpu->is_normal) {
				schedp->yield_on_irq_exit = TRUE;
			} else if (schedp->irq_context) {
				vmm_scheduler_preempt_orphan(schedp->irq_regs);
			} else {
				arch_vcpu_preempt_orphan();
			}
		} else {
			vmm_smp_ipi_async_call(vmm_cpumask_of(vhcpu),
						scheduler_ipi_resched,
						NULL, NULL, NULL);
		}
	}

	return rc;
}
Ejemplo n.º 19
0
static int __cpuinit twd_clockchip_init(struct vmm_devtree_node *node)
{
	int rc;
	u32 ref_cnt_freq;
	virtual_addr_t ref_cnt_addr;
	u32 cpu = vmm_smp_processor_id();
	struct twd_clockchip *cc = &this_cpu(twd_cc);

	if (!twd_base) {
		rc = vmm_devtree_regmap(node, &twd_base, 0);
		if (rc) {
			goto fail;
		}
	}

	if (!twd_ppi_irq) {
		rc = vmm_devtree_irq_get(node, &twd_ppi_irq, 0);
		if (rc) {
			goto fail_regunmap;
		}
	}

	if (!twd_freq_hz) {
		/* First try to find TWD clock */
		if (!twd_clk) {
			twd_clk = of_clk_get(node, 0);
		}
		if (!twd_clk) {
			twd_clk = clk_get_sys("smp_twd", NULL);
		}

		if (twd_clk) {
			/* Use TWD clock to find frequency */
			rc = clk_prepare_enable(twd_clk);
			if (rc) {
				clk_put(twd_clk);
				goto fail_regunmap;
			}
			twd_freq_hz = clk_get_rate(twd_clk);
		} else {
			/* No TWD clock found hence caliberate */
			rc = vmm_devtree_regmap(node, &ref_cnt_addr, 1);
			if (rc) {
				vmm_devtree_regunmap(node, ref_cnt_addr, 1);
				goto fail_regunmap;
			}
			if (vmm_devtree_read_u32(node, "ref-counter-freq",
						 &ref_cnt_freq)) {
				vmm_devtree_regunmap(node, ref_cnt_addr, 1);
				goto fail_regunmap;
			}
			twd_caliberate_freq(twd_base, 
					ref_cnt_addr, ref_cnt_freq);
			vmm_devtree_regunmap(node, ref_cnt_addr, 1);
		}
	}

	memset(cc, 0, sizeof(struct twd_clockchip));

	vmm_sprintf(cc->name, "twd/%d", cpu);

	cc->clkchip.name = cc->name;
	cc->clkchip.hirq = twd_ppi_irq;
	cc->clkchip.rating = 350;
	cc->clkchip.cpumask = vmm_cpumask_of(cpu);
	cc->clkchip.features = 
		VMM_CLOCKCHIP_FEAT_PERIODIC | VMM_CLOCKCHIP_FEAT_ONESHOT;
	vmm_clocks_calc_mult_shift(&cc->clkchip.mult, &cc->clkchip.shift, 
				   VMM_NSEC_PER_SEC, twd_freq_hz, 10);
	cc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(0xF, &cc->clkchip);
	cc->clkchip.max_delta_ns = 
			vmm_clockchip_delta2ns(0xFFFFFFFF, &cc->clkchip);
	cc->clkchip.set_mode = &twd_clockchip_set_mode;
	cc->clkchip.set_next_event = &twd_clockchip_set_next_event;
	cc->clkchip.priv = cc;

	if (vmm_smp_is_bootcpu()) {
		/* Register interrupt handler */
		if ((rc = vmm_host_irq_register(twd_ppi_irq, "twd",
						&twd_clockchip_irq_handler, 
						cc))) {
			
			goto fail_regunmap;
		}

		/* Mark interrupt as per-cpu */
		if ((rc = vmm_host_irq_mark_per_cpu(twd_ppi_irq))) {
			goto fail_unreg_irq;
		}
	}

	/* Explicitly enable local timer PPI in GIC 
	 * Note: Local timer requires PPI support hence requires GIC
	 */
	gic_enable_ppi(twd_ppi_irq);

	rc = vmm_clockchip_register(&cc->clkchip);
	if (rc) {
		goto fail_unreg_irq;
	}

	return VMM_OK;

fail_unreg_irq:
	if (vmm_smp_is_bootcpu()) {
		vmm_host_irq_unregister(twd_ppi_irq, cc);
	}
fail_regunmap:
	vmm_devtree_regunmap(node, twd_base, 0);
fail:
	return rc;
}
Ejemplo n.º 20
0
static int __cpuinit generic_timer_clockchip_init(struct vmm_devtree_node *node)
{
	int rc;
	u32 irq[4], num_irqs, val;
	struct vmm_clockchip *cc;

	/* Get and Check generic timer frequency */
	generic_timer_get_freq(node);
	if (generic_timer_hz == 0) {
		return VMM_EFAIL;
	}

	/* Get hypervisor timer irq number */
	irq[GENERIC_HYPERVISOR_TIMER] = vmm_devtree_irq_parse_map(node,
						GENERIC_HYPERVISOR_TIMER);
	if (!irq[GENERIC_HYPERVISOR_TIMER]) {
		return VMM_ENODEV;
	}

	/* Get physical timer irq number */
	irq[GENERIC_PHYSICAL_TIMER] = vmm_devtree_irq_parse_map(node,
						GENERIC_PHYSICAL_TIMER);
	if (!irq[GENERIC_PHYSICAL_TIMER]) {
		return VMM_ENODEV;
	}

	/* Get virtual timer irq number */
	irq[GENERIC_VIRTUAL_TIMER] = vmm_devtree_irq_parse_map(node,
						GENERIC_VIRTUAL_TIMER);
	if (!irq[GENERIC_VIRTUAL_TIMER]) {
		return VMM_ENODEV;
	}

	/* Number of generic timer irqs */
	num_irqs = vmm_devtree_irq_count(node);
	if (!num_irqs) {
		return VMM_EFAIL;
	}

	/* Ensure hypervisor timer is stopped */
	generic_timer_stop();

	/* Create generic hypervisor timer clockchip */
	cc = vmm_zalloc(sizeof(struct vmm_clockchip));
	if (!cc) {
		return VMM_EFAIL;
	}
	cc->name = "gen-hyp-timer";
	cc->hirq = irq[GENERIC_HYPERVISOR_TIMER];
	cc->rating = 400;
	cc->cpumask = vmm_cpumask_of(vmm_smp_processor_id());
	cc->features = VMM_CLOCKCHIP_FEAT_ONESHOT;
	vmm_clocks_calc_mult_shift(&cc->mult, &cc->shift,
				   VMM_NSEC_PER_SEC, generic_timer_hz, 10);
	cc->min_delta_ns = vmm_clockchip_delta2ns(0xF, cc);
	cc->max_delta_ns = vmm_clockchip_delta2ns(0x7FFFFFFF, cc);
	cc->set_mode = &generic_timer_set_mode;
	cc->set_next_event = &generic_timer_set_next_event;
	cc->priv = NULL;

	/* Register hypervisor timer clockchip */
	rc = vmm_clockchip_register(cc);
	if (rc) {
		goto fail_free_cc;
	}

	/* Register irq handler for hypervisor timer */
	rc = vmm_host_irq_register(irq[GENERIC_HYPERVISOR_TIMER],
				   "gen-hyp-timer",
				   &generic_hyp_timer_handler, cc);
	if (rc) {
		goto fail_unreg_cc;
	}

	if (num_irqs > 1) {
		/* Register irq handler for physical timer */
		rc = vmm_host_irq_register(irq[GENERIC_PHYSICAL_TIMER],
					   "gen-phys-timer",
					   &generic_phys_timer_handler,
					   NULL);
		if (rc) {
			goto fail_unreg_htimer;
		}
	}

	if (num_irqs > 2) {
		/* Register irq handler for virtual timer */
		rc = vmm_host_irq_register(irq[GENERIC_VIRTUAL_TIMER],
					   "gen-virt-timer",
					   &generic_virt_timer_handler,
					   NULL);
		if (rc) {
			goto fail_unreg_ptimer;
		}
	}

	if (num_irqs > 1) {
		val = generic_timer_reg_read(GENERIC_TIMER_REG_HCTL);
		val |= GENERIC_TIMER_HCTL_KERN_PCNT_EN;
		val |= GENERIC_TIMER_HCTL_KERN_PTMR_EN;
		generic_timer_reg_write(GENERIC_TIMER_REG_HCTL, val);
	}

	return VMM_OK;

fail_unreg_ptimer:
	if (num_irqs > 1) {
		vmm_host_irq_unregister(irq[GENERIC_PHYSICAL_TIMER],
					&generic_phys_timer_handler);
	}
fail_unreg_htimer:
	vmm_host_irq_unregister(irq[GENERIC_HYPERVISOR_TIMER],
					&generic_hyp_timer_handler);
fail_unreg_cc:
	vmm_clockchip_unregister(cc);
fail_free_cc:
	vmm_free(cc);
	return rc;
}
Ejemplo n.º 21
0
int __cpuinit generic_timer_clockchip_init(void)
{
	int rc;
	u32 irq[3], num_irqs, val;
	struct vmm_clockchip *cc;
	struct vmm_devtree_node *node;

	/* Find generic timer device tree node */
	node = vmm_devtree_find_matching(NULL, generic_timer_match);
	if (!node) {
		return VMM_ENODEV;
	}

	/* Determine generic timer frequency */
	if (generic_timer_hz == 0) {
		rc =  vmm_devtree_clock_frequency(node, &generic_timer_hz);
		if (rc) {
			/* Use preconfigured counter frequency 
			 * in absence of dts node
			 */
			generic_timer_hz = 
				generic_timer_reg_read(GENERIC_TIMER_REG_FREQ);
		} else if (generic_timer_freq_writeable()) {
			/* Program the counter frequency as per the dts node */
			generic_timer_reg_write(GENERIC_TIMER_REG_FREQ, 
							generic_timer_hz);
		}
	}
	if (generic_timer_hz == 0) {
		return VMM_EFAIL;
	}

	/* Get hypervisor timer irq number */
	rc = vmm_devtree_irq_get(node, 
				 &irq[GENERIC_HYPERVISOR_TIMER], 
				 GENERIC_HYPERVISOR_TIMER);
	if (rc) {
		return rc;
	}

	/* Get physical timer irq number */
	rc = vmm_devtree_irq_get(node, 
				 &irq[GENERIC_PHYSICAL_TIMER], 
				 GENERIC_PHYSICAL_TIMER);
	if (rc) {
		return rc;
	}

	/* Get virtual timer irq number */
	rc = vmm_devtree_irq_get(node, 
				 &irq[GENERIC_VIRTUAL_TIMER], 
				 GENERIC_VIRTUAL_TIMER);
	if (rc) {
		return rc;
	}

	/* Number of generic timer irqs */
	num_irqs = vmm_devtree_irq_count(node);
	if (!num_irqs) {
		return VMM_EFAIL;
	}

	/* Ensure hypervisor timer is stopped */
	generic_timer_stop();

	/* Create generic hypervisor timer clockchip */
	cc = vmm_zalloc(sizeof(struct vmm_clockchip));
	if (!cc) {
		return VMM_EFAIL;
	}
	cc->name = "gen-hyp-timer";
	cc->hirq = irq[GENERIC_HYPERVISOR_TIMER];
	cc->rating = 400;
	cc->cpumask = vmm_cpumask_of(vmm_smp_processor_id());
	cc->features = VMM_CLOCKCHIP_FEAT_ONESHOT;
	vmm_clocks_calc_mult_shift(&cc->mult, &cc->shift, 
				   VMM_NSEC_PER_SEC, generic_timer_hz, 10);
	cc->min_delta_ns = vmm_clockchip_delta2ns(0xF, cc);
	cc->max_delta_ns = vmm_clockchip_delta2ns(0x7FFFFFFF, cc);
	cc->set_mode = &generic_timer_set_mode;
	cc->set_next_event = &generic_timer_set_next_event;
	cc->priv = NULL;

	/* Register hypervisor timer clockchip */
	rc = vmm_clockchip_register(cc);
	if (rc) {
		goto fail_free_cc;
	}

	if (!vmm_smp_processor_id()) {
		/* Register irq handler for hypervisor timer */
		rc = vmm_host_irq_register(irq[GENERIC_HYPERVISOR_TIMER],
					   "gen-hyp-timer", 
					   &generic_hyp_timer_handler, cc);
		if (rc) {
			goto fail_unreg_cc;
		}

		/* Mark hypervisor timer irq as per-CPU */
		if ((rc = vmm_host_irq_mark_per_cpu(cc->hirq))) {
			goto fail_unreg_htimer;
		}

		if (num_irqs > 1) {
			/* Register irq handler for physical timer */
			rc = vmm_host_irq_register(irq[GENERIC_PHYSICAL_TIMER],
						   "gen-phys-timer",
						   &generic_phys_timer_handler,
						   NULL);
			if (rc) {
				goto fail_unreg_htimer;
			}

			/* Mark physical timer irq as per-CPU */
			rc = vmm_host_irq_mark_per_cpu(
						irq[GENERIC_PHYSICAL_TIMER]);
			if (rc)	{
				goto fail_unreg_ptimer;
			}
		}

		if (num_irqs > 2) {
			/* Register irq handler for virtual timer */
			rc = vmm_host_irq_register(irq[GENERIC_VIRTUAL_TIMER],
						   "gen-virt-timer",
						   &generic_virt_timer_handler,
						   NULL);
			if (rc) {
				goto fail_unreg_ptimer;
			}

			/* Mark virtual timer irq as per-CPU */
			rc = vmm_host_irq_mark_per_cpu(
						irq[GENERIC_VIRTUAL_TIMER]);
			if (rc) {
				goto fail_unreg_vtimer;
			}
		}
	}

	if (num_irqs > 1) {
		val = generic_timer_reg_read(GENERIC_TIMER_REG_HCTL);
		val |= GENERIC_TIMER_HCTL_KERN_PCNT_EN;
		val |= GENERIC_TIMER_HCTL_KERN_PTMR_EN;
		generic_timer_reg_write(GENERIC_TIMER_REG_HCTL, val);
	}

	for (val = 0; val < num_irqs; val++) {
		gic_enable_ppi(irq[val]);
	}

	return VMM_OK;

fail_unreg_vtimer:
	if (!vmm_smp_processor_id() && num_irqs > 2) {
		vmm_host_irq_unregister(irq[GENERIC_HYPERVISOR_TIMER],
					&generic_virt_timer_handler);
	}
fail_unreg_ptimer:
	if (!vmm_smp_processor_id() && num_irqs > 1) {
		vmm_host_irq_unregister(irq[GENERIC_PHYSICAL_TIMER],
					&generic_phys_timer_handler);
	}
fail_unreg_htimer:
	if (!vmm_smp_processor_id()) {
		vmm_host_irq_unregister(irq[GENERIC_HYPERVISOR_TIMER],
					&generic_hyp_timer_handler);
	}
fail_unreg_cc:
	vmm_clockchip_register(cc);
fail_free_cc:
	vmm_free(cc);
	return rc;
}
Ejemplo n.º 22
0
void vmm_init(void)
{
	int ret;
#if defined(CONFIG_SMP)
	u32 c;
#endif
	u32 cpu = vmm_smp_processor_id();
	struct vmm_work sysinit;

	/* Mark this CPU possible & present */
	vmm_set_cpu_possible(cpu, TRUE);
	vmm_set_cpu_present(cpu, TRUE);

	/* Print version string */
	vmm_printf("\n");
	vmm_printf("%s v%d.%d.%d (%s %s)\n", VMM_NAME, 
		   VMM_VERSION_MAJOR, VMM_VERSION_MINOR, VMM_VERSION_RELEASE,
		   __DATE__, __TIME__);
	vmm_printf("\n");

	/* Initialize host virtual address space */
	vmm_printf("Initialize Host Address Space\n");
	ret = vmm_host_aspace_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize heap */
	vmm_printf("Initialize Heap Management\n");
	ret = vmm_heap_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize per-cpu area */
	vmm_printf("Initialize PerCPU Areas\n");
	ret = vmm_percpu_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize device tree */
	vmm_printf("Initialize Device Tree\n");
	ret = vmm_devtree_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize host interrupts */
	vmm_printf("Initialize Host IRQ\n");
	ret = vmm_host_irq_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize CPU early */
	vmm_printf("Initialize CPU Early\n");
	ret = arch_cpu_early_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize Board early */
	vmm_printf("Initialize Board Early\n");
	ret = arch_board_early_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize standerd input/output */
	vmm_printf("Initialize Standard I/O\n");
	ret = vmm_stdio_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize clocksource manager */
	vmm_printf("Initialize Clocksource Manager\n");
	ret = vmm_clocksource_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize clockchip manager */
	vmm_printf("Initialize Clockchip Manager\n");
	ret = vmm_clockchip_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize hypervisor timer */
	vmm_printf("Initialize Hypervisor Timer\n");
	ret = vmm_timer_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize soft delay */
	vmm_printf("Initialize Soft Delay\n");
	ret = vmm_delay_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize hypervisor manager */
	vmm_printf("Initialize Hypervisor Manager\n");
	ret = vmm_manager_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize hypervisor scheduler */
	vmm_printf("Initialize Hypervisor Scheduler\n");
	ret = vmm_scheduler_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize hypervisor threads */
	vmm_printf("Initialize Hypervisor Threads\n");
	ret = vmm_threads_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

#ifdef CONFIG_PROFILE
	/* Intialize hypervisor profiler */
	vmm_printf("Initialize Hypervisor Profiler\n");
	ret = vmm_profiler_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}
#endif

#if defined(CONFIG_SMP)
	/* Initialize inter-processor interrupts */
	vmm_printf("Initialize Inter Processor Interrupts\n")
	ret = vmm_smp_ipi_init();
	if (ret) {
		vmm_hang();
	}

	/* Initialize secondary CPUs */
	vmm_printf("Initialize Secondary CPUs\n");
	ret = arch_smp_init_cpus();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Prepare secondary CPUs */
	ret = arch_smp_prepare_cpus(vmm_num_possible_cpus());
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Start each present secondary CPUs */
	for_each_present_cpu(c) {
		if (c == cpu) {
			continue;
		}
		ret = arch_smp_start_cpu(c);
		if (ret) {
			vmm_printf("Failed to start CPU%d\n", ret);
		}
	}

	/* Initialize hypervisor load balancer */
	vmm_printf("Initialize Hypervisor Load Balancer\n");
	ret = vmm_loadbal_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}
#endif

	/* Initialize workqueue framework */
	vmm_printf("Initialize Workqueue Framework\n");
	ret = vmm_workqueue_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Initialize wallclock */
	vmm_printf("Initialize Wallclock Subsystem\n");
	ret = vmm_wallclock_init();
	if (ret) {
		vmm_panic("Error %d\n", ret);
	}

	/* Schedule system initialization work */
	INIT_WORK(&sysinit, &system_init_work);
	vmm_workqueue_schedule_work(NULL, &sysinit);

	/* Start timer (Must be last step) */
	vmm_timer_start();

	/* Wait here till scheduler gets invoked by timer */
	vmm_hang();
}
Ejemplo n.º 23
0
void do_hyp_trap(arch_regs_t *regs)
{
	int rc = VMM_OK;
	u32 hsr, ec, il, iss;
	virtual_addr_t far;
	physical_addr_t fipa = 0;
	struct vmm_vcpu *vcpu;

	hsr = read_hsr();
	ec = (hsr & HSR_EC_MASK) >> HSR_EC_SHIFT;
	il = (hsr & HSR_IL_MASK) >> HSR_IL_SHIFT;
	iss = (hsr & HSR_ISS_MASK) >> HSR_ISS_SHIFT;

	vcpu = vmm_scheduler_current_vcpu();

	/* We dont expect any faults from hypervisor code itself 
	 * so, any trap we get from hypervisor mode means something
	 * unexpected has occured.
	 */
	if ((regs->cpsr & CPSR_MODE_MASK) == CPSR_MODE_HYPERVISOR) {
		vmm_printf("%s: CPU%d unexpected exception\n", 
			   __func__, vmm_smp_processor_id());
		vmm_printf("%s: Current VCPU=%s HSR=0x%08x\n",
			   __func__, (vcpu) ? vcpu->name : "(NULL)", 
			   read_hsr());
		vmm_printf("%s: HPFAR=0x%08x HIFAR=0x%08x HDFAR=0x%08x\n",
			   __func__, read_hpfar(), read_hifar(), read_hdfar());
		cpu_vcpu_dump_user_reg(regs);
		vmm_panic("%s: please reboot ...\n", __func__);
	}

	vmm_scheduler_irq_enter(regs, TRUE);

	switch (ec) {
	case EC_UNKNOWN:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_WFI_WFE:
		/* WFI emulation */
		rc = cpu_vcpu_emulate_wfi_wfe(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP15:
		/* MCR/MRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCRR_MRRC_CP15:
		/* MCRR/MRRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcrr_mrrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP14:
		/* MCR/MRC CP14 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_LDC_STC_CP14:
		/* LDC/STC CP14 emulation */
		rc = cpu_vcpu_emulate_ldc_stc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_CP0_TO_CP13:
		/* CP0 to CP13 emulation */
		rc = cpu_vcpu_emulate_cp0_cp13(vcpu, regs, il, iss);
		break;
	case EC_TRAP_VMRS:
		/* MRC (or VMRS) to CP10 for MVFR0, MVFR1 or FPSID */
		rc = cpu_vcpu_emulate_vmrs(vcpu, regs, il, iss);
		break;
	case EC_TRAP_JAZELLE:
		/* Jazelle emulation */
		rc = cpu_vcpu_emulate_jazelle(vcpu, regs, il, iss);
		break;
	case EC_TRAP_BXJ:
		/* BXJ emulation */
		rc = cpu_vcpu_emulate_bxj(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MRRC_CP14:
		/* MRRC to CP14 emulation */
		rc = cpu_vcpu_emulate_mrrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SVC:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_HVC:
		/* Hypercall or HVC emulation */
		rc = cpu_vcpu_emulate_hvc(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SMC:
		/* System Monitor Call or SMC emulation */
		rc = cpu_vcpu_emulate_smc(vcpu, regs, il, iss);
		break;
	case EC_TRAP_STAGE2_INST_ABORT:
		/* Stage2 instruction abort */
		far  = read_hifar();
		fipa = (read_hpfar() & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (far & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_inst_abort(vcpu, regs, il, iss, far, fipa);
		break;
	case EC_TRAP_STAGE1_INST_ABORT:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_STAGE2_DATA_ABORT:
		/* Stage2 data abort */
		far  = read_hdfar();
		fipa = (read_hpfar() & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (far & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_data_abort(vcpu, regs, il, iss, far, fipa);
		break;
	case EC_TRAP_STAGE1_DATA_ABORT:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	default:
		/* Unknown EC value so error */
		rc = VMM_EFAIL;
		break;
	};

	if (rc) {
		vmm_printf("\n%s: ec=0x%x, il=0x%x, iss=0x%x,"
			   " fipa=0x%x, error=%d\n", __func__,
			   ec, il, iss, fipa, rc);
		if (vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_HALTED) {
			cpu_vcpu_halt(vcpu, regs);
		}
	}

	vmm_scheduler_irq_exit(regs);
}