int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; int r = ppc_gdb_register_len_apple(n); if (!r) { return r; } if (msr_le) { /* If cpu is in LE mode, convert memory contents to LE. */ ppc_gdb_swap_register(mem_buf, n, r); } if (n < 32) { /* gprs */ env->gpr[n] = ldq_p(mem_buf); } else if (n < 64) { /* fprs */ env->fpr[n-32] = ldfq_p(mem_buf); } else { switch (n) { case 64 + 32: env->nip = ldq_p(mem_buf); break; case 65 + 32: ppc_store_msr(env, ldq_p(mem_buf)); break; case 66 + 32: { uint32_t cr = ldl_p(mem_buf); int i; for (i = 0; i < 8; i++) { env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF; } break; } case 67 + 32: env->lr = ldq_p(mem_buf); break; case 68 + 32: env->ctr = ldq_p(mem_buf); break; case 69 + 32: env->xer = ldq_p(mem_buf); break; case 70 + 32: /* fpscr */ store_fpscr(env, ldq_p(mem_buf), 0xffffffff); break; } } return r; }
int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; int r = ppc_gdb_register_len(n); if (!r) { return r; } ppc_maybe_bswap_register(env, mem_buf, r); if (n < 32) { /* gprs */ env->gpr[n] = ldtul_p(mem_buf); } else if (n < 64) { /* fprs */ env->fpr[n-32] = ldfq_p(mem_buf); } else { switch (n) { case 64: env->nip = ldtul_p(mem_buf); break; case 65: ppc_store_msr(env, ldtul_p(mem_buf)); break; case 66: { uint32_t cr = ldl_p(mem_buf); int i; for (i = 0; i < 8; i++) { env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF; } break; } case 67: env->lr = ldtul_p(mem_buf); break; case 68: env->ctr = ldtul_p(mem_buf); break; case 69: env->xer = ldtul_p(mem_buf); break; case 70: /* fpscr */ store_fpscr(env, ldtul_p(mem_buf), 0xffffffff); break; } } return r; }
static int cpu_post_load(void *opaque, int version_id) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; int i; target_ulong msr; /* * We always ignore the source PVR. The user or management * software has to take care of running QEMU in a compatible mode. */ env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value; env->lr = env->spr[SPR_LR]; env->ctr = env->spr[SPR_CTR]; cpu_write_xer(env, env->spr[SPR_XER]); #if defined(TARGET_PPC64) env->cfar = env->spr[SPR_CFAR]; #endif env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR]; for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2*i]; env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2*i + 1]; env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2*i]; env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2*i + 1]; } for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) { env->DBAT[0][i+4] = env->spr[SPR_DBAT4U + 2*i]; env->DBAT[1][i+4] = env->spr[SPR_DBAT4U + 2*i + 1]; env->IBAT[0][i+4] = env->spr[SPR_IBAT4U + 2*i]; env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1]; } if (!env->external_htab) { /* Restore htab_base and htab_mask variables */ ppc_store_sdr1(env, env->spr[SPR_SDR1]); } /* Invalidate all msr bits except MSR_TGPR/MSR_HVB before restoring */ msr = env->msr; env->msr ^= ~((1ULL << MSR_TGPR) | MSR_HVB); ppc_store_msr(env, msr); hreg_compute_mem_idx(env); return 0; }
static int cpu_post_load(void *opaque, int version_id) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; int i; target_ulong msr; /* * If we're operating in compat mode, we should be ok as long as * the destination supports the same compatiblity mode. * * Otherwise, however, we require that the destination has exactly * the same CPU model as the source. */ #if defined(TARGET_PPC64) if (cpu->compat_pvr) { uint32_t compat_pvr = cpu->compat_pvr; Error *local_err = NULL; cpu->compat_pvr = 0; ppc_set_compat(cpu, compat_pvr, &local_err); if (local_err) { error_report_err(local_err); return -1; } } else #endif { if (!pvr_match(cpu, env->spr[SPR_PVR])) { return -1; } } /* * If we're running with KVM HV, there is a chance that the guest * is running with KVM HV and its kernel does not have the * capability of dealing with a different PVR other than this * exact host PVR in KVM_SET_SREGS. If that happens, the * guest freezes after migration. * * The function kvmppc_pvr_workaround_required does this verification * by first checking if the kernel has the cap, returning true immediately * if that is the case. Otherwise, it checks if we're running in KVM PR. * If the guest kernel does not have the cap and we're not running KVM-PR * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will * receive the PVR it expects as a workaround. * */ #if defined(CONFIG_KVM) if (kvmppc_pvr_workaround_required(cpu)) { env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value; } #endif env->lr = env->spr[SPR_LR]; env->ctr = env->spr[SPR_CTR]; cpu_write_xer(env, env->spr[SPR_XER]); #if defined(TARGET_PPC64) env->cfar = env->spr[SPR_CFAR]; #endif env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR]; for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2*i]; env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2*i + 1]; env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2*i]; env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2*i + 1]; } for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) { env->DBAT[0][i+4] = env->spr[SPR_DBAT4U + 2*i]; env->DBAT[1][i+4] = env->spr[SPR_DBAT4U + 2*i + 1]; env->IBAT[0][i+4] = env->spr[SPR_IBAT4U + 2*i]; env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1]; } if (!cpu->vhyp) { ppc_store_sdr1(env, env->spr[SPR_SDR1]); } /* Invalidate all supported msr bits except MSR_TGPR/MSR_HVB before restoring */ msr = env->msr; env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); ppc_store_msr(env, msr); hreg_compute_mem_idx(env); return 0; }