Ejemplo n.º 1
0
void save_sprs(struct vcpu *v)
{
    v->arch.timebase = mftb();

    v->arch.sprg[0] = mfsprg0();
    v->arch.sprg[1] = mfsprg1();
    v->arch.sprg[2] = mfsprg2();
    v->arch.sprg[3] = mfsprg3();

    v->arch.dar = mfdar();
    v->arch.dsisr = mfdsisr();

    if (v->arch.pmu_enabled) {
        save_pmc_sprs(&(v->arch.perf_sprs));
        v->arch.perf_sprs_stored = 1;
    }

    save_cpu_sprs(v);
}
Ejemplo n.º 2
0
sval
xh_kern_pgflt(struct cpu_thread* thr, uval type, struct vexc_save_regs *vr)
{
	struct vm_class *vmc = NULL;
	uval orig_addr;
	struct thread_control_area *tca = get_tca();

	if (type == 1) {
		orig_addr = mfdar();
	} else {
		orig_addr = tca->srr0;
	}


	if (thr->vstate.thread_mode & VSTATE_KERN_MODE) {
		vmc = find_kernel_vmc(thr, orig_addr);
	}

	if (!vmc) {
		vmc = find_app_vmc(thr, orig_addr);
	}

	if (!vmc) {
		hprintf("No vm_class for 0x%lx\n", orig_addr);
		breakpoint();

		return insert_debug_exception(thr, V_DEBUG_MEM_FAULT);
	}

	uval addr = ALIGN_DOWN(orig_addr, PGSIZE);
	union ptel pte = { .word = 0 };
	uval la = vmc_xlate(vmc, addr, &pte);
	uval ra;
	uval vsid;

	if (la == INVALID_LOGICAL_ADDRESS) {
		/* If logical address is invalid, and pte is non-zero, then
		 * pte contains physical address
		 */
		if (pte.word == 0) {
			goto reflect;
		}

		ra = pte.bits.rpn << LOG_PGSIZE;
	} else {
		ra = logical_to_physical_address(thr->cpu->os, la, PGSIZE);
	}

	vsid = vmc_class_vsid(thr, vmc, addr);

	pte.bits.rpn = ra >> LOG_PGSIZE;


	sval ret = insert_ea_map(thr, vsid, addr, pte);
	if (ret == H_Success) {

		return vr->reg_gprs[3];
	}

reflect:
	thr->vregs->v_dar = orig_addr;
	thr->vregs->v_dsisr = mfdsisr();


	assert(thr->vregs->exception_vectors[EXC_V_PGFLT],
	       "no pgflt vector defined\n");

	return insert_exception(thr, EXC_V_PGFLT);
}


sval
xh_kern_slb(struct cpu_thread* thread, uval type, struct vexc_save_regs *vr)
{
	struct vm_class *vmc = NULL;
	struct thread_control_area *tca = get_tca();
	uval addr;

	if (type == 1) {
		addr = mfdar();
	} else {
		addr = tca->srr0;
	}

	uval seg_base = ALIGN_DOWN(addr, SEGMENT_SIZE);
	uval lp = LOG_PGSIZE;  /* FIXME: get large page size */
	uval l = 1;
	uval spot;

	if (thread->vstate.thread_mode & VSTATE_KERN_MODE) {
		vmc = find_kernel_vmc(thread, addr);
	}

	if (!vmc) {
		vmc = find_app_vmc(thread, addr);
	}

	if (!vmc) {
		hprintf("No vm_class for 0x%lx\n", addr);
		return insert_debug_exception(thread, V_DEBUG_MEM_FAULT);
	}


	uval vsid = vmc_class_vsid(thread, vmc, addr);

#ifdef FORCE_4K_PAGES
	lp = 12;
	l = 0;
	spot = slb_insert(seg_base, 0, 0, 1, vsid, thread->slb_entries);
#else
	spot = slb_insert(ea, 1, SELECT_LG, 1, vsid, thread->slb_entries);
#endif

	return vr->reg_gprs[3];
}

uval
xh_syscall(uval a1, uval a2, uval a3, uval a4, uval a5, uval a6,
	   uval a7, uval a8, uval a9, uval a10)
{
	struct thread_control_area* tca = (struct thread_control_area*)mfr13();
	struct cpu_thread* thread = tca->active_thread;
	hcall_fn_t hcall_fn;
	const hcall_vec_t* vec = (const hcall_vec_t*)hca.hcall_vector;
	thread->return_args = tca->save_area;

	a1 >>= 2;

	if (a1 >= hca.hcall_vector_len &&
	    a1 - 0x1800 < hca.hcall_6000_vector_len) {
		vec = (const hcall_vec_t*)hca.hcall_6000_vector;
		a1 -= 0x1800;
	}

	hcall_fn = *(const hcall_fn_t*)&vec[a1];
	return hcall_fn(thread, a2, a3, a4, a5, a6, a7, a8, a9, a10);
}

extern void insert_dec_exception(void);
extern void insert_ext_exception(void);

inline void
set_v_msr(struct cpu_thread* thr, uval val)
{
	struct thread_control_area *tca = get_tca();

	if ((val ^ thr->vregs->v_msr) & MSR_PR) {
		if (val & MSR_PR) {
			vmc_exit_kernel(thr);
			thr->vstate.thread_mode &= ~VSTATE_KERN_MODE;
		} else {
			vmc_enter_kernel(thr);
			thr->vstate.thread_mode |= VSTATE_KERN_MODE;
		}
		tca->vstate = thr->vstate.thread_mode;
	}

	thr->vregs->v_msr = (val & ~(MSR_HV|(MSR_SF>>2))) | MSR_AM;

	if (! (val & MSR_EE)) {
		return;
	}
	assert(get_tca()->restore_fn == NULL,
	       "Exception delivery already pending.\n");

	if (thr->vstate.thread_mode & VSTATE_PENDING_EXT) {
		get_tca()->restore_fn = insert_ext_exception;

	} else if (thr->vstate.thread_mode & VSTATE_PENDING_DEC) {
		get_tca()->restore_fn = insert_dec_exception;
	}
}

static inline void
mtgpr(struct cpu_thread* thr, uval gpr, uval val)
{
	switch (gpr) {
	case 14 ... 31:
		thr->reg_gprs[gpr] = val;
		break;
	case 0 ... 13:
		get_tca()->save_area->reg_gprs[gpr] = val;
		break;
	}
}

static inline uval
mfgpr(struct cpu_thread* thr, uval gpr)
{
	uval val;
	switch (gpr) {
	case 14 ... 31:
		val = thr->reg_gprs[gpr];
		break;
	case 0 ... 13:
		val = get_tca()->save_area->reg_gprs[gpr];
		break;
	}
	return val;

}
int cpu_machinecheck(struct cpu_user_regs *regs)
{
    int recover = 0;
    u32 dsisr = mfdsisr();

    if (regs->msr & MCK_SRR1_RI)
        recover = 1;

    printk("MACHINE CHECK: %s Recoverable\n", recover ? "IS": "NOT");
    if (mck_cpu_stats[mfpir()] != 0)
        printk("While in CI IO\n");

    show_backtrace_regs(regs);

    printk("SRR1: 0x%016lx\n", regs->msr);
    if (regs->msr & MCK_SRR1_INSN_FETCH_UNIT)
        printk("42: Exception caused by Instruction Fetch Unit (IFU)\n"
               "    detection of a hardware uncorrectable error (UE).\n");

    if (regs->msr & MCK_SRR1_LOAD_STORE)
        printk("43: Exception caused by load/store detection of error\n"
               "    (see DSISR)\n");

    switch (regs->msr & MCK_SRR1_CAUSE_MASK) {
    case 0:
        printk("0b00: Likely caused by an asynchronous machine check,\n"
               "      see SCOM Asynchronous Machine Check Register\n");
        cpu_scom_AMCR();
        break;
    case MCK_SRR1_CAUSE_SLB_PAR:
        printk("0b01: Exception caused by an SLB parity error detected\n"
               "      while translating an instruction fetch address.\n");
        break;
    case MCK_SRR1_CAUSE_TLB_PAR:
        printk("0b10: Exception caused by a TLB parity error detected\n"
               "      while translating an instruction fetch address.\n");
        break;
    case MCK_SRR1_CAUSE_UE:
        printk("0b11: Exception caused by a hardware uncorrectable\n"
               "      error (UE) detected while doing a reload of an\n"
               "      instruction-fetch TLB tablewalk.\n");
        break;
    }

    printk("\nDSISR: 0x%08x\n", dsisr);
    if (dsisr & MCK_DSISR_UE)
        printk("16: Exception caused by a UE deferred error\n"
               "    (DAR is undefined).\n");
    
    if (dsisr & MCK_DSISR_UE_TABLE_WALK)
        printk("17: Exception caused by a UE deferred error\n"
               "    during a tablewalk (D-side).\n"); 

    if (dsisr & MCK_DSISR_L1_DCACHE_PAR)
        printk("18: Exception was caused by a software recoverable\n"
               "    parity error in the L1 D-cache.\n");

    if (dsisr & MCK_DSISR_L1_DCACHE_TAG_PAR)
        printk("19: Exception was caused by a software recoverable\n"
               "    parity error in the L1 D-cache tag.\n");

    if (dsisr & MCK_DSISR_D_ERAT_PAR)
        printk("20: Exception was caused by a software recoverable parity\n"
               "    error in the D-ERAT.\n");
        
    if (dsisr & MCK_DSISR_TLB_PAR)
        printk("21: Exception was caused by a software recoverable parity\n"
               "    error in the TLB.\n");

    if (dsisr & MCK_DSISR_SLB_PAR) {
        printk("23: Exception was caused by an SLB parity error (may not be\n"
               "    recoverable). This condition could occur if the\n"
               "    effective segment ID (ESID) fields of two or more SLB\n"
               "    entries contain the same value.\n");
        dump_segments(0);
    }

    return 0; /* for now lets not recover */
}