/* * Routine: cpu_machine_init * Function: */ void cpu_machine_init( void) { struct per_proc_info *proc_info; volatile struct per_proc_info *mproc_info; proc_info = getPerProc(); mproc_info = PerProcTable[master_cpu].ppe_vaddr; if (proc_info != mproc_info) { simple_lock(&rht_lock); if (rht_state & RHT_WAIT) thread_wakeup(&rht_state); rht_state &= ~(RHT_BUSY|RHT_WAIT); simple_unlock(&rht_lock); } PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone)); if (proc_info->hibernate) { uint32_t tbu, tbl; do { tbu = mftbu(); tbl = mftb(); } while (mftbu() != tbu); proc_info->hibernate = 0; hibernate_machine_init(); // hibernate_machine_init() could take minutes and we don't want timeouts // to fire as soon as scheduling starts. Reset timebase so it appears // no time has elapsed, as it would for regular sleep. mttb(0); mttbu(tbu); mttb(tbl); } if (proc_info != mproc_info) { while (!((mproc_info->cpu_flags) & SignalReady)) continue; cpu_sync_timebase(); } ml_init_interrupt(); if (proc_info != mproc_info) simple_lock(&SignalReadyLock); proc_info->cpu_flags |= BootDone|SignalReady; if (proc_info != mproc_info) { if (proc_info->ppXFlags & SignalReadyWait) { (void)hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait); thread_wakeup(&proc_info->cpu_flags); } simple_unlock(&SignalReadyLock); pmsPark(); /* Timers should be cool now, park the power management stepper */ } }
/* * Task dispatcher. Basically it may be the same one no matter what scheduling algorithm is used */ void core_Dispatch(regs_t *regs) { volatile struct ExecBase *SysBase = getSysBase(); struct Task *task; if (SysBase) { wrmsr(rdmsr() & ~MSR_EE); /* * Is the list of ready tasks empty? Well, increment the idle switch cound and halt CPU. * It should be extended by some plugin mechanism which would put CPU and whole machine * into some more sophisticated sleep states (ACPI?) */ while (IsListEmpty(&SysBase->TaskReady)) { // SysBase->IdleCount++; SysBase->AttnResched |= ARF_AttnSwitch; //D(bug("[KRN] TaskReady list empty. Sleeping for a while...\n")); /* Sleep almost forever ;) */ wrmsr(rdmsr() | MSR_EE); asm volatile("sync"); // wrmsr(rdmsr() | MSR_POW); // asm volatile("isync"); if (SysBase->SysFlags & SFF_SoftInt) { core_Cause(SysBase); } } SysBase->DispCount++; /* Get the first task from the TaskReady list, and populate it's settings through Sysbase */ task = (struct Task *)REMHEAD(&SysBase->TaskReady); SysBase->ThisTask = task; SysBase->Elapsed = SysBase->Quantum; SysBase->SysFlags &= ~0x2000; task->tc_State = TS_RUN; SysBase->IDNestCnt = task->tc_IDNestCnt; //D(bug("[KRN] New task = %p (%s)\n", task, task->tc_Node.ln_Name)); /* Handle tasks's flags */ if (task->tc_Flags & TF_EXCEPT) Exception(); /* Store the launch time */ GetIntETask(task)->iet_private1 = mftbu(); if (task->tc_Flags & TF_LAUNCH) { AROS_UFC1(void, task->tc_Launch, AROS_UFCA(struct ExecBase *, SysBase, A6)); } /* Restore the task's state */ regs = task->tc_UnionETask.tc_ETask->et_RegFrame; if (SysBase->IDNestCnt < 0) regs->srr1 |= MSR_EE; /* Copy the fpu, mmx, xmm state */ #warning FIXME: Change to the lazy saving of the FPU state!!!! #warning TODO: No FPU support yet!!!!!!! Yay, it sucks! :-D }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = vcpu->arch.last_inst; u32 ea; int ra; int rb; int rs; int rt; int sprn; enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); switch (get_op(inst)) { case OP_TRAP: vcpu->arch.esr |= ESR_PTR; kvmppc_core_queue_program(vcpu); advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_LWZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_STWX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); break; case OP_31_XOP_STBX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); break; case OP_31_XOP_STBUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = vcpu->arch.gpr[rb]; if (ra) ea += vcpu->arch.gpr[ra]; emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); vcpu->arch.gpr[rs] = ea; break; case OP_31_XOP_LHZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); ea = vcpu->arch.gpr[rb]; if (ra) ea += vcpu->arch.gpr[ra]; emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); vcpu->arch.gpr[ra] = ea; break; case OP_31_XOP_MFSPR: sprn = get_sprn(inst); rt = get_rt(inst); switch (sprn) { case SPRN_SRR0: vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; case SPRN_SRR1: vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; case SPRN_PVR: vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: vcpu->arch.gpr[rt] = mftbl(); break; case SPRN_TBWU: vcpu->arch.gpr[rt] = mftbu(); break; case SPRN_SPRG0: vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; case SPRN_SPRG1: vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; case SPRN_SPRG2: vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; case SPRN_SPRG3: vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_FAIL) { printk("mfspr: unknown spr %x\n", sprn); vcpu->arch.gpr[rt] = 0; } break; } break; case OP_31_XOP_STHX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); break; case OP_31_XOP_STHUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = vcpu->arch.gpr[rb]; if (ra) ea += vcpu->arch.gpr[ra]; emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); vcpu->arch.gpr[ra] = ea; break; case OP_31_XOP_MTSPR: sprn = get_sprn(inst); rs = get_rs(inst); switch (sprn) { case SPRN_SRR0: vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; case SPRN_SRR1: vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_DEC: vcpu->arch.dec = vcpu->arch.gpr[rs]; kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; case SPRN_SPRG1: vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; case SPRN_SPRG2: vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; case SPRN_SPRG3: vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); if (emulated == EMULATE_FAIL) printk("mtspr: unknown spr %x\n", sprn); break; } break; case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 0); break; case OP_31_XOP_LHBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_LWZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_LBZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_STW: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); break; case OP_STWU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_STB: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); break; case OP_STBU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_LHZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_STH: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); break; case OP_STHU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); } } KVMTRACE_3D(PPC_INSTR, vcpu, inst, (int)vcpu->arch.pc, emulated, entryexit); if (advance) vcpu->arch.pc += 4; /* Advance past emulated instruction. */ return emulated; }