} static void kvmclock_realize(DeviceState *dev, Error **errp) { KVMClockState *s = KVM_CLOCK(dev); qemu_add_vm_change_state_handler(kvmclock_vm_state_change, s); } static const VMStateDescription kvmclock_vmsd = { .name = "kvmclock", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT64(clock, KVMClockState), VMSTATE_END_OF_LIST() } }; static void kvmclock_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = kvmclock_realize; dc->no_user = 1; dc->vmsd = &kvmclock_vmsd; } static const TypeInfo kvmclock_info = { .name = TYPE_KVM_CLOCK,
VMSTATE_UINT32(value, PXA2xxTimer0), VMSTATE_END_OF_LIST(), }, }; static const VMStateDescription vmstate_pxa2xx_timer4_regs = { .name = "pxa2xx_timer4", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_STRUCT(tm, PXA2xxTimer4, 1, vmstate_pxa2xx_timer0_regs, PXA2xxTimer0), VMSTATE_INT32(oldclock, PXA2xxTimer4), VMSTATE_INT32(clock, PXA2xxTimer4), VMSTATE_UINT64(lastload, PXA2xxTimer4), VMSTATE_UINT32(freq, PXA2xxTimer4), VMSTATE_UINT32(control, PXA2xxTimer4), VMSTATE_END_OF_LIST(), }, }; static bool pxa2xx_timer_has_tm4_test(void *opaque, int version_id) { return pxa2xx_timer_has_tm4(opaque); } static const VMStateDescription vmstate_pxa2xx_timer_regs = { .name = "pxa2xx_timer", .version_id = 1, .minimum_version_id = 1,
.write = imx_gpio_write, .valid.min_access_size = 4, .valid.max_access_size = 4, .endianness = DEVICE_NATIVE_ENDIAN, }; static const VMStateDescription vmstate_imx_gpio = { .name = TYPE_IMX_GPIO, .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(dr, IMXGPIOState), VMSTATE_UINT32(gdir, IMXGPIOState), VMSTATE_UINT32(psr, IMXGPIOState), VMSTATE_UINT64(icr, IMXGPIOState), VMSTATE_UINT32(imr, IMXGPIOState), VMSTATE_UINT32(isr, IMXGPIOState), VMSTATE_BOOL(has_edge_sel, IMXGPIOState), VMSTATE_UINT32(edge_sel, IMXGPIOState), VMSTATE_END_OF_LIST() } }; static Property imx_gpio_properties[] = { DEFINE_PROP_BOOL("has-edge-sel", IMXGPIOState, has_edge_sel, true), DEFINE_PROP_END_OF_LIST(), }; static void imx_gpio_reset(DeviceState *dev) {
qdev_init_gpio_in_named(DEVICE(s), bcm2835_ic_set_gpu_irq, BCM2835_IC_GPU_IRQ, GPU_IRQS); qdev_init_gpio_in_named(DEVICE(s), bcm2835_ic_set_arm_irq, BCM2835_IC_ARM_IRQ, ARM_IRQS); sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq); sysbus_init_irq(SYS_BUS_DEVICE(s), &s->fiq); } static const VMStateDescription vmstate_bcm2835_ic = { .name = TYPE_BCM2835_IC, .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT64(gpu_irq_level, BCM2835ICState), VMSTATE_UINT64(gpu_irq_enable, BCM2835ICState), VMSTATE_UINT8(arm_irq_level, BCM2835ICState), VMSTATE_UINT8(arm_irq_enable, BCM2835ICState), VMSTATE_BOOL(fiq_enable, BCM2835ICState), VMSTATE_UINT8(fiq_select, BCM2835ICState), VMSTATE_END_OF_LIST() } }; static void bcm2835_ic_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->reset = bcm2835_ic_reset; dc->vmsd = &vmstate_bcm2835_ic;
.version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .pre_save = nand_pre_save, .post_load = nand_post_load, .fields = (VMStateField[]) { VMSTATE_UINT8(cle, NANDFlashState), VMSTATE_UINT8(ale, NANDFlashState), VMSTATE_UINT8(ce, NANDFlashState), VMSTATE_UINT8(wp, NANDFlashState), VMSTATE_UINT8(gnd, NANDFlashState), VMSTATE_BUFFER(io, NANDFlashState), VMSTATE_UINT32(ioaddr_vmstate, NANDFlashState), VMSTATE_INT32(iolen, NANDFlashState), VMSTATE_UINT32(cmd, NANDFlashState), VMSTATE_UINT64(addr, NANDFlashState), VMSTATE_INT32(addrlen, NANDFlashState), VMSTATE_INT32(status, NANDFlashState), VMSTATE_INT32(offset, NANDFlashState), /* XXX: do we want to save s->storage too? */ VMSTATE_END_OF_LIST() } }; static int nand_device_init(SysBusDevice *dev) { int pagesize; NANDFlashState *s = FROM_SYSBUS(NANDFlashState, dev); s->buswidth = nand_flash_ids[s->chip_id].width >> 3; s->size = nand_flash_ids[s->chip_id].size << 20;
VMSTATE_UINT16(devno, IplParameterBlock), VMSTATE_UINT8_ARRAY(reserved2, IplParameterBlock, 88), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription*[]) { &vmstate_iplb_extended, NULL } }; static const VMStateDescription vmstate_ipl = { .name = "ipl", .version_id = 0, .minimum_version_id = 0, .fields = (VMStateField[]) { VMSTATE_UINT64(compat_start_addr, S390IPLState), VMSTATE_UINT64(compat_bios_start_addr, S390IPLState), VMSTATE_STRUCT(iplb, S390IPLState, 0, vmstate_iplb, IplParameterBlock), VMSTATE_BOOL(iplb_valid, S390IPLState), VMSTATE_UINT8(cssid, S390IPLState), VMSTATE_UINT8(ssid, S390IPLState), VMSTATE_UINT16(devno, S390IPLState), VMSTATE_END_OF_LIST() } }; static S390IPLState *get_ipl_device(void) { return S390_IPL(object_resolve_path_type("", TYPE_S390_IPL, NULL)); }
#include "hw/hw.h" #include "hw/boards.h" #include "qemu/timer.h" #include "migration/cpu.h" #ifdef TARGET_SPARC64 static const VMStateDescription vmstate_cpu_timer = { .name = "cpu_timer", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(frequency, CPUTimer), VMSTATE_UINT32(disabled, CPUTimer), VMSTATE_UINT64(disabled_mask, CPUTimer), VMSTATE_UINT32(npt, CPUTimer), VMSTATE_UINT64(npt_mask, CPUTimer), VMSTATE_INT64(clock_offset, CPUTimer), VMSTATE_TIMER_PTR(qtimer, CPUTimer), VMSTATE_END_OF_LIST() } }; #define VMSTATE_CPU_TIMER(_f, _s) \ VMSTATE_STRUCT_POINTER(_f, _s, vmstate_cpu_timer, CPUTimer) static const VMStateDescription vmstate_trap_state = { .name = "trap_state", .version_id = 1, .minimum_version_id = 1,
VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(rx_pool, VIOsPAPRVLANDevice, RX_MAX_POOLS, 1, vmstate_rx_buffer_pool, RxBufPool), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_spapr_llan = { .name = "spapr_llan", .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_SPAPR_VIO(sdev, VIOsPAPRVLANDevice), /* LLAN state */ VMSTATE_BOOL(isopen, VIOsPAPRVLANDevice), VMSTATE_UINT64(buf_list, VIOsPAPRVLANDevice), VMSTATE_UINT32(add_buf_ptr, VIOsPAPRVLANDevice), VMSTATE_UINT32(use_buf_ptr, VIOsPAPRVLANDevice), VMSTATE_UINT32(rx_bufs, VIOsPAPRVLANDevice), VMSTATE_UINT64(rxq_ptr, VIOsPAPRVLANDevice), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription * []) { &vmstate_rx_pools, NULL } }; static void spapr_vlan_class_init(ObjectClass *klass, void *data) {
.size = sizeof(SegmentCache), \ .vmsd = &vmstate_segment, \ .flags = VMS_STRUCT, \ .offset = offsetof(_state, _field) \ + type_check(SegmentCache,typeof_field(_state, _field)) \ } #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache) static const VMStateDescription vmstate_xmm_reg = { .name = "xmm_reg", .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT64(ZMM_Q(0), ZMMReg), VMSTATE_UINT64(ZMM_Q(1), ZMMReg), VMSTATE_END_OF_LIST() } }; #define VMSTATE_XMM_REGS(_field, _state, _start) \ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \ vmstate_xmm_reg, ZMMReg) /* YMMH format is the same as XMM, but for bits 128-255 */ static const VMStateDescription vmstate_ymmh_reg = { .name = "ymmh_reg", .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) {
if (c->post_load) { c->post_load(s); } return 0; } static const VMStateDescription vmstate_its = { .name = "arm_gicv3_its", .pre_save = gicv3_its_pre_save, .post_load = gicv3_its_post_load, .priority = MIG_PRI_GICV3_ITS, .fields = (VMStateField[]) { VMSTATE_UINT32(ctlr, GICv3ITSState), VMSTATE_UINT32(iidr, GICv3ITSState), VMSTATE_UINT64(cbaser, GICv3ITSState), VMSTATE_UINT64(cwriter, GICv3ITSState), VMSTATE_UINT64(creadr, GICv3ITSState), VMSTATE_UINT64_ARRAY(baser, GICv3ITSState, 8), VMSTATE_END_OF_LIST() }, }; static MemTxResult gicv3_its_trans_read(void *opaque, hwaddr offset, uint64_t *data, unsigned size, MemTxAttrs attrs) { qemu_log_mask(LOG_GUEST_ERROR, "ITS read at offset 0x%"PRIx64"\n", offset); *data = 0; return MEMTX_OK; }
return 0; } static inline bool fpu_needed(void *opaque) { /* This looks odd, but we might want to NOT transfer fprs in the future */ return true; } static const VMStateDescription vmstate_fpu = { .name = "cpu/fpu", .version_id = 1, .minimum_version_id = 1, .needed = fpu_needed, .fields = (VMStateField[]) { VMSTATE_UINT64(env.vregs[0][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[1][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[2][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[3][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[4][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[5][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[6][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[7][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[8][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[9][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[10][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[11][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[12][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[13][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[14][0].ll, S390CPU), VMSTATE_UINT64(env.vregs[15][0].ll, S390CPU),
break; case TIMER_CONT: break; } cpu_openrisc_timer_update(cpu); qemu_cpu_kick(CPU(cpu)); } static const VMStateDescription vmstate_or1k_timer = { .name = "or1k_timer", .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(ttcr, OR1KTimerState), VMSTATE_UINT64(last_clk, OR1KTimerState), VMSTATE_END_OF_LIST() } }; void cpu_openrisc_clock_init(OpenRISCCPU *cpu) { cpu->env.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &openrisc_timer_cb, cpu); cpu->env.ttmr = 0x00000000; if (or1k_timer == NULL) { or1k_timer = g_new0(OR1KTimerState, 1); vmstate_register(NULL, 0, &vmstate_or1k_timer, or1k_timer); } }
/* Called from RCU critical section */ static IOMMUTLBEntry sun4m_translate_iommu(IOMMUMemoryRegion *iommu, hwaddr addr, IOMMUAccessFlags flags) { IOMMUState *is = container_of(iommu, IOMMUState, iommu); hwaddr page, pa; int is_write = (flags & IOMMU_WO) ? 1 : 0; uint32_t pte; IOMMUTLBEntry ret = { .target_as = &address_space_memory, .iova = 0, .translated_addr = 0, .addr_mask = ~(hwaddr)0, .perm = IOMMU_NONE, }; page = addr & IOMMU_PAGE_MASK; pte = iommu_page_get_flags(is, page); if (!(pte & IOPTE_VALID)) { iommu_bad_addr(is, page, is_write); return ret; } pa = iommu_translate_pa(addr, pte); if (is_write && !(pte & IOPTE_WRITE)) { iommu_bad_addr(is, page, is_write); return ret; } if (pte & IOPTE_WRITE) { ret.perm = IOMMU_RW; } else { ret.perm = IOMMU_RO; } ret.iova = page; ret.translated_addr = pa; ret.addr_mask = ~IOMMU_PAGE_MASK; return ret; } static const VMStateDescription vmstate_iommu = { .name ="iommu", .version_id = 2, .minimum_version_id = 2, .fields = (VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IOMMUState, IOMMU_NREGS), VMSTATE_UINT64(iostart, IOMMUState), VMSTATE_END_OF_LIST() } }; static void iommu_reset(DeviceState *d) { IOMMUState *s = SUN4M_IOMMU(d); memset(s->regs, 0, IOMMU_NREGS * 4); s->iostart = 0; s->regs[IOMMU_CTRL] = s->version; s->regs[IOMMU_ARBEN] = IOMMU_MID; s->regs[IOMMU_AFSR] = IOMMU_AFSR_RESV; s->regs[IOMMU_AER] = IOMMU_AER_EN_P0_ARB | IOMMU_AER_EN_P1_ARB; s->regs[IOMMU_MASK_ID] = IOMMU_TS_MASK; } static void iommu_init(Object *obj) { IOMMUState *s = SUN4M_IOMMU(obj); SysBusDevice *dev = SYS_BUS_DEVICE(obj); memory_region_init_iommu(&s->iommu, sizeof(s->iommu), TYPE_SUN4M_IOMMU_MEMORY_REGION, OBJECT(dev), "iommu-sun4m", UINT64_MAX); address_space_init(&s->iommu_as, MEMORY_REGION(&s->iommu), "iommu-as"); sysbus_init_irq(dev, &s->irq); memory_region_init_io(&s->iomem, obj, &iommu_mem_ops, s, "iommu", IOMMU_NREGS * sizeof(uint32_t)); sysbus_init_mmio(dev, &s->iomem); }
} static bool slb_shadow_needed(void *opaque) { SpaprCpuState *spapr_cpu = opaque; return spapr_cpu->slb_shadow_addr != 0; } static const VMStateDescription vmstate_spapr_cpu_slb_shadow = { .name = "spapr_cpu/vpa/slb_shadow", .version_id = 1, .minimum_version_id = 1, .needed = slb_shadow_needed, .fields = (VMStateField[]) { VMSTATE_UINT64(slb_shadow_addr, SpaprCpuState), VMSTATE_UINT64(slb_shadow_size, SpaprCpuState), VMSTATE_END_OF_LIST() } }; static bool dtl_needed(void *opaque) { SpaprCpuState *spapr_cpu = opaque; return spapr_cpu->dtl_addr != 0; } static const VMStateDescription vmstate_spapr_cpu_dtl = { .name = "spapr_cpu/vpa/dtl", .version_id = 1,
/* If this was a vmstate, saved in recording mode, we need to initialize replay data fields. */ replay_fetch_data_kind(); return 0; } static const VMStateDescription vmstate_replay = { .name = "replay", .version_id = 1, .minimum_version_id = 1, .pre_save = replay_pre_save, .post_load = replay_post_load, .fields = (VMStateField[]) { VMSTATE_INT64_ARRAY(cached_clock, ReplayState, REPLAY_CLOCK_COUNT), VMSTATE_UINT64(current_step, ReplayState), VMSTATE_INT32(instructions_count, ReplayState), VMSTATE_UINT32(data_kind, ReplayState), VMSTATE_UINT32(has_unread_data, ReplayState), VMSTATE_UINT64(file_offset, ReplayState), VMSTATE_UINT64(block_request_id, ReplayState), VMSTATE_END_OF_LIST() }, }; void replay_vmstate_register(void) { vmstate_register(NULL, 0, &vmstate_replay, &replay_state); } void replay_vmstate_init(void)
VMSTATE_UINT32(env.current_fpu, MIPSCPU), VMSTATE_INT32(env.error_code, MIPSCPU), VMSTATE_UINTTL(env.btarget, MIPSCPU), VMSTATE_UINTTL(env.bcond, MIPSCPU), /* Remaining CP0 registers */ VMSTATE_INT32(env.CP0_Index, MIPSCPU), VMSTATE_INT32(env.CP0_Random, MIPSCPU), VMSTATE_INT32(env.CP0_VPEControl, MIPSCPU), VMSTATE_INT32(env.CP0_VPEConf0, MIPSCPU), VMSTATE_INT32(env.CP0_VPEConf1, MIPSCPU), VMSTATE_UINTTL(env.CP0_YQMask, MIPSCPU), VMSTATE_UINTTL(env.CP0_VPESchedule, MIPSCPU), VMSTATE_UINTTL(env.CP0_VPEScheFBack, MIPSCPU), VMSTATE_INT32(env.CP0_VPEOpt, MIPSCPU), VMSTATE_UINT64(env.CP0_EntryLo0, MIPSCPU), VMSTATE_UINT64(env.CP0_EntryLo1, MIPSCPU), VMSTATE_UINTTL(env.CP0_Context, MIPSCPU), VMSTATE_INT32(env.CP0_PageMask, MIPSCPU), VMSTATE_INT32(env.CP0_PageGrain, MIPSCPU), VMSTATE_INT32(env.CP0_Wired, MIPSCPU), VMSTATE_INT32(env.CP0_SRSConf0, MIPSCPU), VMSTATE_INT32(env.CP0_SRSConf1, MIPSCPU), VMSTATE_INT32(env.CP0_SRSConf2, MIPSCPU), VMSTATE_INT32(env.CP0_SRSConf3, MIPSCPU), VMSTATE_INT32(env.CP0_SRSConf4, MIPSCPU), VMSTATE_INT32(env.CP0_HWREna, MIPSCPU), VMSTATE_UINTTL(env.CP0_BadVAddr, MIPSCPU), VMSTATE_UINT32(env.CP0_BadInstr, MIPSCPU), VMSTATE_UINT32(env.CP0_BadInstrP, MIPSCPU), VMSTATE_INT32(env.CP0_Count, MIPSCPU),
hw_breakpoint_update_all(cpu); hw_watchpoint_update_all(cpu); return 0; } const VMStateDescription vmstate_arm_cpu = { .name = "cpu", .version_id = 22, .minimum_version_id = 22, .pre_save = cpu_pre_save, .post_load = cpu_post_load, .fields = (VMStateField[]) { VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16), VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32), VMSTATE_UINT64(env.pc, ARMCPU), { .name = "cpsr", .version_id = 0, .size = sizeof(uint32_t), .info = &vmstate_cpsr, .flags = VMS_SINGLE, .offset = 0, }, VMSTATE_UINT32(env.spsr, ARMCPU), VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8), VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8), VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8), VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5), VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5), VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, env.nb_tlb, vmstate_tlb6xx_entry, ppc6xx_tlb_t), VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_tlbemb_entry = { .name = "cpu/tlbemb_entry", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField []) { VMSTATE_UINT64(RPN, ppcemb_tlb_t), VMSTATE_UINTTL(EPN, ppcemb_tlb_t), VMSTATE_UINTTL(PID, ppcemb_tlb_t), VMSTATE_UINTTL(size, ppcemb_tlb_t), VMSTATE_UINT32(prot, ppcemb_tlb_t), VMSTATE_UINT32(attr, ppcemb_tlb_t), VMSTATE_END_OF_LIST() }, }; static bool tlbemb_needed(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; return env->nb_tlb && (env->tlb_type == TLB_EMB);
.vmsd = &vmstate_segment, \ .flags = VMS_STRUCT, \ .offset = offsetof(_state, _field) \ + type_check(SegmentCache,typeof_field(_state, _field)) \ } #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache) static const VMStateDescription vmstate_xmm_reg = { .name = "xmm_reg", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField []) { VMSTATE_UINT64(XMM_Q(0), XMMReg), VMSTATE_UINT64(XMM_Q(1), XMMReg), VMSTATE_END_OF_LIST() } }; #define VMSTATE_XMM_REGS(_field, _state, _n) \ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_xmm_reg, XMMReg) /* YMMH format is the same as XMM */ static const VMStateDescription vmstate_ymmh_reg = { .name = "ymmh_reg", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField []) {
.version_id = 0, .minimum_version_id = 0, .fields = (VMStateField[]) { VMSTATE_UINT8_ARRAY(reserved1, IplParameterBlock, 110), VMSTATE_UINT16(devno, IplParameterBlock), VMSTATE_UINT8_ARRAY(reserved2, IplParameterBlock, 88), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_ipl = { .name = "ipl", .version_id = 0, .minimum_version_id = 0, .fields = (VMStateField[]) { VMSTATE_UINT64(start_addr, S390IPLState), VMSTATE_UINT64(bios_start_addr, S390IPLState), VMSTATE_STRUCT(iplb, S390IPLState, 0, vmstate_iplb, IplParameterBlock), VMSTATE_BOOL(iplb_valid, S390IPLState), VMSTATE_UINT8(cssid, S390IPLState), VMSTATE_UINT8(ssid, S390IPLState), VMSTATE_UINT16(devno, S390IPLState), VMSTATE_END_OF_LIST() } }; static S390IPLState *get_ipl_device(void) { return S390_IPL(object_resolve_path_type("", TYPE_S390_IPL, NULL)); }
{ PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; return msr_ts; } static const VMStateDescription vmstate_tm = { .name = "cpu/tm", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .needed = tm_needed, .fields = (VMStateField []) { VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64), VMSTATE_UINT64(env.tm_cr, PowerPCCPU), VMSTATE_UINT64(env.tm_lr, PowerPCCPU), VMSTATE_UINT64(env.tm_ctr, PowerPCCPU), VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU), VMSTATE_UINT64(env.tm_amr, PowerPCCPU), VMSTATE_UINT64(env.tm_ppr, PowerPCCPU), VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU), VMSTATE_UINT32(env.tm_vscr, PowerPCCPU), VMSTATE_UINT64(env.tm_dscr, PowerPCCPU), VMSTATE_UINT64(env.tm_tar, PowerPCCPU), VMSTATE_END_OF_LIST() }, }; #endif static bool sr_needed(void *opaque)
sysbus_init_mmio(sbd, &s->iomem); } static void mips_gcr_reset(DeviceState *dev) { MIPSGCRState *s = MIPS_GCR(dev); update_cpc_base(s, 0); } static const VMStateDescription vmstate_mips_gcr = { .name = "mips-gcr", .version_id = 0, .minimum_version_id = 0, .fields = (VMStateField[]) { VMSTATE_UINT64(cpc_base, MIPSGCRState), VMSTATE_END_OF_LIST() }, }; static Property mips_gcr_properties[] = { DEFINE_PROP_INT32("num-vp", MIPSGCRState, num_vps, 1), DEFINE_PROP_INT32("gcr-rev", MIPSGCRState, gcr_rev, 0x800), DEFINE_PROP_UINT64("gcr-base", MIPSGCRState, gcr_base, GCR_BASE_ADDR), DEFINE_PROP_END_OF_LIST(), }; static void mips_gcr_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->props = mips_gcr_properties;