static void syborg_rtc_save(QEMUFile *f, void *opaque) { SyborgRTCState *s = opaque; qemu_put_be64(f, s->offset); qemu_put_be64(f, s->data); }
static void put_avr(QEMUFile *f, void *pv, size_t size) { ppc_avr_t *v = pv; qemu_put_be64(f, v->u64[0]); qemu_put_be64(f, v->u64[1]); }
static void put_slbe(QEMUFile *f, void *pv, size_t size) { ppc_slb_t *v = pv; qemu_put_be64(f, v->esid); qemu_put_be64(f, v->vsid); }
static int ram_save_block(QEMUFile *f) { static ram_addr_t current_addr = 0; ram_addr_t saved_addr = current_addr; ram_addr_t addr = 0; int found = 0; while (addr < last_ram_offset) { if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) { uint8_t *p; cpu_physical_memory_reset_dirty(current_addr, current_addr + TARGET_PAGE_SIZE, MIGRATION_DIRTY_FLAG); p = qemu_get_ram_ptr(current_addr); if (is_dup_page(p, *p)) { qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_COMPRESS); qemu_put_byte(f, *p); } else { qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_PAGE); qemu_put_buffer(f, p, TARGET_PAGE_SIZE); } found = 1; break; } addr += TARGET_PAGE_SIZE; current_addr = (saved_addr + addr) % last_ram_offset; } return found; }
static int put_slbe(QEMUFile *f, void *pv, size_t size, const VMStateField *field, QJSON *vmdesc) { ppc_slb_t *v = pv; qemu_put_be64(f, v->esid); qemu_put_be64(f, v->vsid); return 0; }
static int put_avr(QEMUFile *f, void *pv, size_t size, const VMStateField *field, QJSON *vmdesc) { ppc_avr_t *v = pv; qemu_put_be64(f, v->u64[0]); qemu_put_be64(f, v->u64[1]); return 0; }
static void timer_save(QEMUFile *f, void *opaque) { TimersState *s = opaque; if (s->cpu_ticks_enabled) { hw_error("cannot save state if virtual timers are running"); } qemu_put_be64(f, s->cpu_ticks_prev); qemu_put_be64(f, s->cpu_ticks_offset); qemu_put_be64(f, s->cpu_clock_offset); }
static void goldfish_timer_save(QEMUFile* f, void* opaque) { struct timer_state* s = opaque; qemu_put_be64(f, s->now_ns); /* in case the kernel is in the middle of a timer read */ qemu_put_byte(f, s->armed); if (s->armed) { int64_t now_tks = qemu_get_clock(vm_clock); int64_t alarm_tks = ns2tks(s->alarm_low_ns | (int64_t)s->alarm_high_ns << 32); qemu_put_be64(f, alarm_tks - now_tks); } }
static void goldfish_timer_save(QEMUFile* f, void* opaque) { struct timer_state* s = opaque; qemu_put_be64(f, s->now); /* in case the kernel is in the middle of a timer read */ qemu_put_byte(f, s->armed); if (s->armed) { int64_t now = qemu_get_clock(vm_clock); int64_t alarm = muldiv64(s->alarm_low | (int64_t)s->alarm_high << 32, ticks_per_sec, 1000000000); qemu_put_be64(f, alarm-now); } }
static void s390_storage_keys_save(QEMUFile *f, void *opaque) { S390SKeysState *ss = S390_SKEYS(opaque); S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); uint64_t pages_left = ram_size / TARGET_PAGE_SIZE; uint64_t read_count, eos = S390_SKEYS_SAVE_FLAG_EOS; vaddr cur_gfn = 0; int error = 0; uint8_t *buf; if (!skeyclass->skeys_enabled(ss)) { goto end_stream; } buf = g_try_malloc(S390_SKEYS_BUFFER_SIZE); if (!buf) { error_report("storage key save could not allocate memory\n"); goto end_stream; } /* We only support initial memory. Standby memory is not handled yet. */ qemu_put_be64(f, (cur_gfn * TARGET_PAGE_SIZE) | S390_SKEYS_SAVE_FLAG_SKEYS); qemu_put_be64(f, pages_left); while (pages_left) { read_count = MIN(pages_left, S390_SKEYS_BUFFER_SIZE); if (!error) { error = skeyclass->get_skeys(ss, cur_gfn, read_count, buf); if (error) { /* * If error: we want to fill the stream with valid data instead * of stopping early so we pad the stream with 0x00 values and * use S390_SKEYS_SAVE_FLAG_ERROR to indicate failure to the * reading side. */ error_report("S390_GET_KEYS error %d\n", error); memset(buf, 0, S390_SKEYS_BUFFER_SIZE); eos = S390_SKEYS_SAVE_FLAG_ERROR; } } qemu_put_buffer(f, buf, read_count); cur_gfn += read_count; pages_left -= read_count; } g_free(buf); end_stream: qemu_put_be64(f, eos); }
static void goldfish_pipe_save( QEMUFile* file, void* opaque ) { PipeDevice* dev = opaque; Pipe* pipe; qemu_put_be32(file, dev->address); qemu_put_be32(file, dev->size); qemu_put_be32(file, dev->status); qemu_put_be32(file, dev->channel); qemu_put_be32(file, dev->wakes); qemu_put_be64(file, dev->params_addr); int count = 0; for ( pipe = dev->pipes; pipe; pipe = pipe->next ) count++; qemu_put_sbe32(file, count); for ( pipe = dev->pipes; pipe; pipe = pipe->next ) { pipe_save(pipe, file); } }
static void pipe_save( Pipe* pipe, QEMUFile* file ) { if (pipe->service == NULL) { /* pipe->service == NULL means we're still using a PipeConnector */ /* Write a zero to indicate this condition */ qemu_put_byte(file, 0); } else { /* Otherwise, write a '1' then the service name */ qemu_put_byte(file, 1); qemu_put_string(file, pipe->service->name); } /* Now save other common data */ qemu_put_be64(file, pipe->channel); qemu_put_byte(file, (int)pipe->wanted); qemu_put_byte(file, (int)pipe->closed); /* Write 1 + args, if any, or simply 0 otherwise */ if (pipe->args != NULL) { qemu_put_byte(file, 1); qemu_put_string(file, pipe->args); } else { qemu_put_byte(file, 0); } if (pipe->funcs->save) { pipe->funcs->save(pipe->opaque, file); } }
void virtio_save(VirtIODevice *vdev, QEMUFile *f) { int i; if (vdev->binding->save_config) vdev->binding->save_config(vdev->binding_opaque, f); qemu_put_8s(f, &vdev->status); qemu_put_8s(f, &vdev->isr); qemu_put_be16s(f, &vdev->queue_sel); qemu_put_be32s(f, &vdev->guest_features); qemu_put_be32(f, vdev->config_len); qemu_put_buffer(f, vdev->config, vdev->config_len); for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { if (vdev->vq[i].vring.num == 0) break; } qemu_put_be32(f, i); for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { if (vdev->vq[i].vring.num == 0) break; qemu_put_be32(f, vdev->vq[i].vring.num); qemu_put_be64(f, vdev->vq[i].pa); qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); if (vdev->binding->save_queue) vdev->binding->save_queue(vdev->binding_opaque, i, f); } }
static void goldfish_mmc_save(QEMUFile* f, void* opaque) { struct goldfish_mmc_state* s = opaque; qemu_put_be64(f, s->buffer_address); qemu_put_struct(f, goldfish_mmc_fields, s); }
void timer_put(QEMUFile *f, QEMUTimer *ts) { uint64_t expire_time; expire_time = timer_expire_time_ns(ts); qemu_put_be64(f, expire_time); }
static int put_float64(QEMUFile *f, void *pv, size_t size, VMStateField *field, QJSON *vmdesc) { uint64_t *v = pv; qemu_put_be64(f, float64_val(*v)); return 0; }
/* save a timer */ void qemu_put_timer(QEMUFile *f, QEMUTimer *ts) { uint64_t expire_time; if (qemu_timer_pending(ts)) { expire_time = ts->expire_time; } else { expire_time = -1; } qemu_put_be64(f, expire_time); }
static void rtc_save(QEMUFile *f, void *opaque) { RTCState *s = opaque; qemu_put_buffer(f, s->cmos_data, 128); qemu_put_8s(f, &s->cmos_index); qemu_put_be32(f, s->current_tm.tm_sec); qemu_put_be32(f, s->current_tm.tm_min); qemu_put_be32(f, s->current_tm.tm_hour); qemu_put_be32(f, s->current_tm.tm_wday); qemu_put_be32(f, s->current_tm.tm_mday); qemu_put_be32(f, s->current_tm.tm_mon); qemu_put_be32(f, s->current_tm.tm_year); qemu_put_timer(f, s->periodic_timer); qemu_put_be64(f, s->next_periodic_time); qemu_put_be64(f, s->next_second_time); qemu_put_timer(f, s->second_timer); qemu_put_timer(f, s->second_timer2); }
/** * save_page_header: Write page header to wire * * If this is the 1st block, it also writes the block identification * * Returns: Number of bytes written * * @f: QEMUFile where to send the data * @block: block that contains the page we want to send * @offset: offset inside the block for the page * in the lower bits, it contains flags */ static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset) { size_t size; qemu_put_be64(f, offset); size = 8; if (!(offset & RAM_SAVE_FLAG_CONTINUE)) { qemu_put_byte(f, strlen(block->idstr)); qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); size += 1 + strlen(block->idstr); } return size; }
static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset, int cont, int flag) { size_t size; qemu_put_be64(f, offset | cont | flag); size = 8; if (!cont) { qemu_put_byte(f, strlen(block->idstr)); qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); size += 1 + strlen(block->idstr); } return size; }
static int cmma_save_setup(QEMUFile *f, void *opaque) { S390StAttribState *sas = S390_STATTRIB(opaque); S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas); int res; /* * Signal that we want to start a migration, thus needing PGSTE dirty * tracking. */ res = sac->set_migrationmode(sas, 1); if (res) { return res; } qemu_put_be64(f, STATTR_FLAG_EOS); return 0; }
/** * Copies the current contents of a disk image into the snapshot file. * * TODO optimize this using some kind of copy-on-write mechanism for * unchanged disk sections. */ static void nand_dev_save_disk_state(QEMUFile *f, nand_dev *dev) { #ifndef ANDROID_QCOW int buf_size = NAND_DEV_SAVE_DISK_BUF_SIZE; uint8_t buffer[NAND_DEV_SAVE_DISK_BUF_SIZE] = {0}; int ret; uint64_t total_copied = 0; /* Size of file to restore, hence size of data block following. * TODO Work out whether to use lseek64 here. */ ret = do_lseek(dev->fd, 0, SEEK_END); if (ret < 0) { XLOG("%s EOF seek failed: %s\n", __FUNCTION__, strerror(errno)); qemu_file_set_error(f); return; } const uint64_t total_size = ret; qemu_put_be64(f, total_size); /* copy all data from the stream to the stored image */ ret = do_lseek(dev->fd, 0, SEEK_SET); if (ret < 0) { XLOG("%s seek failed: %s\n", __FUNCTION__, strerror(errno)); qemu_file_set_error(f); return; } do { ret = do_read(dev->fd, buffer, buf_size); if (ret < 0) { XLOG("%s read failed: %s\n", __FUNCTION__, strerror(errno)); qemu_file_set_error(f); return; } qemu_put_buffer(f, buffer, ret); total_copied += ret; } while (ret == buf_size && total_copied < dev->max_size); /* TODO Maybe check that we've written total_size bytes */ #endif }
void gtod_save(QEMUFile *f, void *opaque) { uint64_t tod_low; uint8_t tod_high; int r; r = s390_get_clock(&tod_high, &tod_low); if (r) { fprintf(stderr, "WARNING: Unable to get guest clock for migration. " "Error code %d. Guest clock will not be migrated " "which could cause the guest to hang.\n", r); qemu_put_byte(f, S390_TOD_CLOCK_VALUE_MISSING); return; } qemu_put_byte(f, S390_TOD_CLOCK_VALUE_PRESENT); qemu_put_byte(f, tod_high); qemu_put_be64(f, tod_low); }
static void colo_send_message_value(QEMUFile *f, COLOMessage msg, uint64_t value, Error **errp) { Error *local_err = NULL; int ret; colo_send_message(f, msg, &local_err); if (local_err) { error_propagate(errp, local_err); return; } qemu_put_be64(f, value); qemu_fflush(f); ret = qemu_file_get_error(f); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to send value for message:%s", COLOMessage_lookup[msg]); } }
void cpu_save(QEMUFile *f, void *opaque) { int i; CPUARMState *env = (CPUARMState *)opaque; for (i = 0; i < 16; i++) { qemu_put_be32(f, env->regs[i]); } qemu_put_be32(f, cpsr_read(env)); qemu_put_be32(f, env->spsr); for (i = 0; i < 6; i++) { qemu_put_be32(f, env->banked_spsr[i]); qemu_put_be32(f, env->banked_r13[i]); qemu_put_be32(f, env->banked_r14[i]); } for (i = 0; i < 5; i++) { qemu_put_be32(f, env->usr_regs[i]); qemu_put_be32(f, env->fiq_regs[i]); } qemu_put_be32(f, env->cp15.c0_cpuid); qemu_put_be32(f, env->cp15.c0_cachetype); qemu_put_be32(f, env->cp15.c0_cssel); qemu_put_be32(f, env->cp15.c1_sys); qemu_put_be32(f, env->cp15.c1_coproc); qemu_put_be32(f, env->cp15.c1_xscaleauxcr); qemu_put_be32(f, env->cp15.c1_scr); qemu_put_be32(f, env->cp15.c2_base0); qemu_put_be32(f, env->cp15.c2_base1); qemu_put_be32(f, env->cp15.c2_control); qemu_put_be32(f, env->cp15.c2_mask); qemu_put_be32(f, env->cp15.c2_base_mask); qemu_put_be32(f, env->cp15.c2_data); qemu_put_be32(f, env->cp15.c2_insn); qemu_put_be32(f, env->cp15.c3); qemu_put_be32(f, env->cp15.c5_insn); qemu_put_be32(f, env->cp15.c5_data); for (i = 0; i < 8; i++) { qemu_put_be32(f, env->cp15.c6_region[i]); } qemu_put_be32(f, env->cp15.c6_insn); qemu_put_be32(f, env->cp15.c6_data); qemu_put_be32(f, env->cp15.c7_par); qemu_put_be32(f, env->cp15.c9_insn); qemu_put_be32(f, env->cp15.c9_data); qemu_put_be32(f, env->cp15.c9_pmcr); qemu_put_be32(f, env->cp15.c9_pmcnten); qemu_put_be32(f, env->cp15.c9_pmovsr); qemu_put_be32(f, env->cp15.c9_pmxevtyper); qemu_put_be32(f, env->cp15.c9_pmuserenr); qemu_put_be32(f, env->cp15.c9_pminten); qemu_put_be32(f, env->cp15.c13_fcse); qemu_put_be32(f, env->cp15.c13_context); qemu_put_be32(f, env->cp15.c13_tls1); qemu_put_be32(f, env->cp15.c13_tls2); qemu_put_be32(f, env->cp15.c13_tls3); qemu_put_be32(f, env->cp15.c15_cpar); qemu_put_be32(f, env->cp15.c15_power_control); qemu_put_be32(f, env->cp15.c15_diagnostic); qemu_put_be32(f, env->cp15.c15_power_diagnostic); qemu_put_be32(f, env->features); if (arm_feature(env, ARM_FEATURE_VFP)) { for (i = 0; i < 16; i++) { CPU_DoubleU u; u.d = env->vfp.regs[i]; qemu_put_be32(f, u.l.upper); qemu_put_be32(f, u.l.lower); } for (i = 0; i < 16; i++) { qemu_put_be32(f, env->vfp.xregs[i]); } /* TODO: Should use proper FPSCR access functions. */ qemu_put_be32(f, env->vfp.vec_len); qemu_put_be32(f, env->vfp.vec_stride); if (arm_feature(env, ARM_FEATURE_VFP3)) { for (i = 16; i < 32; i++) { CPU_DoubleU u; u.d = env->vfp.regs[i]; qemu_put_be32(f, u.l.upper); qemu_put_be32(f, u.l.lower); } } } if (arm_feature(env, ARM_FEATURE_IWMMXT)) { for (i = 0; i < 16; i++) { qemu_put_be64(f, env->iwmmxt.regs[i]); } for (i = 0; i < 16; i++) { qemu_put_be32(f, env->iwmmxt.cregs[i]); } } if (arm_feature(env, ARM_FEATURE_M)) { qemu_put_be32(f, env->v7m.other_sp); qemu_put_be32(f, env->v7m.vecbase); qemu_put_be32(f, env->v7m.basepri); qemu_put_be32(f, env->v7m.control); qemu_put_be32(f, env->v7m.current_sp); qemu_put_be32(f, env->v7m.exception); } if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { qemu_put_be32(f, env->teecr); qemu_put_be32(f, env->teehbr); } }
static int kvm_s390_register_io_adapter(S390FLICState *fs, uint32_t id, uint8_t isc, bool swap, bool is_maskable) { struct kvm_s390_io_adapter adapter = { .id = id, .isc = isc, .maskable = is_maskable, .swap = swap, }; KVMS390FLICState *flic = KVM_S390_FLIC(fs); int r; struct kvm_device_attr attr = { .group = KVM_DEV_FLIC_ADAPTER_REGISTER, .addr = (uint64_t)&adapter, }; if (!kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING)) { /* nothing to do */ return 0; } r = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); return r ? -errno : 0; } static int kvm_s390_io_adapter_map(S390FLICState *fs, uint32_t id, uint64_t map_addr, bool do_map) { struct kvm_s390_io_adapter_req req = { .id = id, .type = do_map ? KVM_S390_IO_ADAPTER_MAP : KVM_S390_IO_ADAPTER_UNMAP, .addr = map_addr, }; struct kvm_device_attr attr = { .group = KVM_DEV_FLIC_ADAPTER_MODIFY, .addr = (uint64_t)&req, }; KVMS390FLICState *flic = KVM_S390_FLIC(fs); int r; if (!kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING)) { /* nothing to do */ return 0; } r = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); return r ? -errno : 0; } static int kvm_s390_add_adapter_routes(S390FLICState *fs, AdapterRoutes *routes) { int ret, i; uint64_t ind_offset = routes->adapter.ind_offset; for (i = 0; i < routes->num_routes; i++) { ret = kvm_irqchip_add_adapter_route(kvm_state, &routes->adapter); if (ret < 0) { goto out_undo; } routes->gsi[i] = ret; routes->adapter.ind_offset++; } kvm_irqchip_commit_routes(kvm_state); /* Restore passed-in structure to original state. */ routes->adapter.ind_offset = ind_offset; return 0; out_undo: while (--i >= 0) { kvm_irqchip_release_virq(kvm_state, routes->gsi[i]); routes->gsi[i] = -1; } routes->adapter.ind_offset = ind_offset; return ret; } static void kvm_s390_release_adapter_routes(S390FLICState *fs, AdapterRoutes *routes) { int i; for (i = 0; i < routes->num_routes; i++) { if (routes->gsi[i] >= 0) { kvm_irqchip_release_virq(kvm_state, routes->gsi[i]); routes->gsi[i] = -1; } } } /** * kvm_flic_save - Save pending floating interrupts * @f: QEMUFile containing migration state * @opaque: pointer to flic device state * * Note: Pass buf and len to kernel. Start with one page and * increase until buffer is sufficient or maxium size is * reached */ static void kvm_flic_save(QEMUFile *f, void *opaque) { KVMS390FLICState *flic = opaque; int len = FLIC_SAVE_INITIAL_SIZE; void *buf; int count; flic_disable_wait_pfault((struct KVMS390FLICState *) opaque); buf = g_try_malloc0(len); if (!buf) { /* Storing FLIC_FAILED into the count field here will cause the * target system to fail when attempting to load irqs from the * migration state */ error_report("flic: couldn't allocate memory"); qemu_put_be64(f, FLIC_FAILED); return; } count = __get_all_irqs(flic, &buf, len); if (count < 0) { error_report("flic: couldn't retrieve irqs from kernel, rc %d", count); /* Storing FLIC_FAILED into the count field here will cause the * target system to fail when attempting to load irqs from the * migration state */ qemu_put_be64(f, FLIC_FAILED); } else { qemu_put_be64(f, count); qemu_put_buffer(f, (uint8_t *) buf, count * sizeof(struct kvm_s390_irq)); } g_free(buf); }
void cpu_save(QEMUFile *f, void *opaque) { CPUState *env = opaque; uint16_t fptag, fpus, fpuc, fpregs_format; uint32_t hflags; int32_t a20_mask; int i; for(i = 0; i < CPU_NB_REGS; i++) qemu_put_betls(f, &env->regs[i]); qemu_put_betls(f, &env->eip); qemu_put_betls(f, &env->eflags); hflags = env->hflags; /* XXX: suppress most of the redundant hflags */ qemu_put_be32s(f, &hflags); /* FPU */ fpuc = env->fpuc; fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; fptag = 0; for(i = 0; i < 8; i++) { fptag |= ((!env->fptags[i]) << i); } qemu_put_be16s(f, &fpuc); qemu_put_be16s(f, &fpus); qemu_put_be16s(f, &fptag); #ifdef USE_X86LDOUBLE fpregs_format = 0; #else fpregs_format = 1; #endif qemu_put_be16s(f, &fpregs_format); for(i = 0; i < 8; i++) { #ifdef USE_X86LDOUBLE { uint64_t mant; uint16_t exp; /* we save the real CPU data (in case of MMX usage only 'mant' contains the MMX register */ cpu_get_fp80(&mant, &exp, env->fpregs[i].d); qemu_put_be64(f, mant); qemu_put_be16(f, exp); } #else /* if we use doubles for float emulation, we save the doubles to avoid losing information in case of MMX usage. It can give problems if the image is restored on a CPU where long doubles are used instead. */ qemu_put_be64(f, env->fpregs[i].mmx.MMX_Q(0)); #endif } for(i = 0; i < 6; i++) cpu_put_seg(f, &env->segs[i]); cpu_put_seg(f, &env->ldt); cpu_put_seg(f, &env->tr); cpu_put_seg(f, &env->gdt); cpu_put_seg(f, &env->idt); qemu_put_be32s(f, &env->sysenter_cs); qemu_put_betls(f, &env->sysenter_esp); qemu_put_betls(f, &env->sysenter_eip); qemu_put_betls(f, &env->cr[0]); qemu_put_betls(f, &env->cr[2]); qemu_put_betls(f, &env->cr[3]); qemu_put_betls(f, &env->cr[4]); for(i = 0; i < 8; i++) qemu_put_betls(f, &env->dr[i]); /* MMU */ a20_mask = (int32_t) env->a20_mask; qemu_put_sbe32s(f, &a20_mask); /* XMM */ qemu_put_be32s(f, &env->mxcsr); for(i = 0; i < CPU_NB_REGS; i++) { qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(0)); qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(1)); } #ifdef TARGET_X86_64 qemu_put_be64s(f, &env->efer); qemu_put_be64s(f, &env->star); qemu_put_be64s(f, &env->lstar); qemu_put_be64s(f, &env->cstar); qemu_put_be64s(f, &env->fmask); qemu_put_be64s(f, &env->kernelgsbase); #endif qemu_put_be32s(f, &env->smbase); qemu_put_be64s(f, &env->pat); qemu_put_be32s(f, &env->hflags2); qemu_put_be64s(f, &env->vm_hsave); qemu_put_be64s(f, &env->vm_vmcb); qemu_put_be64s(f, &env->tsc_offset); qemu_put_be64s(f, &env->intercept); qemu_put_be16s(f, &env->intercept_cr_read); qemu_put_be16s(f, &env->intercept_cr_write); qemu_put_be16s(f, &env->intercept_dr_read); qemu_put_be16s(f, &env->intercept_dr_write); qemu_put_be32s(f, &env->intercept_exceptions); qemu_put_8s(f, &env->v_tpr); /* MTRRs */ for(i = 0; i < 11; i++) qemu_put_be64s(f, &env->mtrr_fixed[i]); qemu_put_be64s(f, &env->mtrr_deftype); for(i = 0; i < 8; i++) { qemu_put_be64s(f, &env->mtrr_var[i].base); qemu_put_be64s(f, &env->mtrr_var[i].mask); } }
void cpu_save(QEMUFile *f, void *opaque) { CPUState *env = opaque; uint16_t fptag, fpus, fpuc, fpregs_format; uint32_t hflags; int32_t a20_mask; int32_t pending_irq; int i, bit; if (kvm_enabled()) { kvm_save_registers(env); kvm_arch_save_mpstate(env); } for(i = 0; i < CPU_NB_REGS; i++) qemu_put_betls(f, &env->regs[i]); qemu_put_betls(f, &env->eip); qemu_put_betls(f, &env->eflags); hflags = env->hflags; /* XXX: suppress most of the redundant hflags */ qemu_put_be32s(f, &hflags); /* FPU */ fpuc = env->fpuc; fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; fptag = 0; for(i = 0; i < 8; i++) { fptag |= ((!env->fptags[i]) << i); } qemu_put_be16s(f, &fpuc); qemu_put_be16s(f, &fpus); qemu_put_be16s(f, &fptag); #ifdef USE_X86LDOUBLE fpregs_format = 0; #else fpregs_format = 1; #endif qemu_put_be16s(f, &fpregs_format); for(i = 0; i < 8; i++) { #ifdef USE_X86LDOUBLE { uint64_t mant; uint16_t exp; /* we save the real CPU data (in case of MMX usage only 'mant' contains the MMX register */ cpu_get_fp80(&mant, &exp, env->fpregs[i].d); qemu_put_be64(f, mant); qemu_put_be16(f, exp); } #else /* if we use doubles for float emulation, we save the doubles to avoid losing information in case of MMX usage. It can give problems if the image is restored on a CPU where long doubles are used instead. */ qemu_put_be64(f, env->fpregs[i].mmx.MMX_Q(0)); #endif } for(i = 0; i < 6; i++) cpu_put_seg(f, &env->segs[i]); cpu_put_seg(f, &env->ldt); cpu_put_seg(f, &env->tr); cpu_put_seg(f, &env->gdt); cpu_put_seg(f, &env->idt); qemu_put_be32s(f, &env->sysenter_cs); qemu_put_betls(f, &env->sysenter_esp); qemu_put_betls(f, &env->sysenter_eip); qemu_put_betls(f, &env->cr[0]); qemu_put_betls(f, &env->cr[2]); qemu_put_betls(f, &env->cr[3]); qemu_put_betls(f, &env->cr[4]); for(i = 0; i < 8; i++) qemu_put_betls(f, &env->dr[i]); /* MMU */ a20_mask = (int32_t) env->a20_mask; qemu_put_sbe32s(f, &a20_mask); /* XMM */ qemu_put_be32s(f, &env->mxcsr); for(i = 0; i < CPU_NB_REGS; i++) { qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(0)); qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(1)); } #ifdef TARGET_X86_64 qemu_put_be64s(f, &env->efer); qemu_put_be64s(f, &env->star); qemu_put_be64s(f, &env->lstar); qemu_put_be64s(f, &env->cstar); qemu_put_be64s(f, &env->fmask); qemu_put_be64s(f, &env->kernelgsbase); #endif qemu_put_be32s(f, &env->smbase); qemu_put_be64s(f, &env->pat); qemu_put_be32s(f, &env->hflags2); qemu_put_be64s(f, &env->vm_hsave); qemu_put_be64s(f, &env->vm_vmcb); qemu_put_be64s(f, &env->tsc_offset); qemu_put_be64s(f, &env->intercept); qemu_put_be16s(f, &env->intercept_cr_read); qemu_put_be16s(f, &env->intercept_cr_write); qemu_put_be16s(f, &env->intercept_dr_read); qemu_put_be16s(f, &env->intercept_dr_write); qemu_put_be32s(f, &env->intercept_exceptions); qemu_put_8s(f, &env->v_tpr); /* MTRRs */ for(i = 0; i < 11; i++) qemu_put_be64s(f, &env->mtrr_fixed[i]); qemu_put_be64s(f, &env->mtrr_deftype); for(i = 0; i < 8; i++) { qemu_put_be64s(f, &env->mtrr_var[i].base); qemu_put_be64s(f, &env->mtrr_var[i].mask); } /* KVM-related states */ /* There can only be one pending IRQ set in the bitmap at a time, so try to find it and save its number instead (-1 for none). */ pending_irq = -1; for (i = 0; i < ARRAY_SIZE(env->interrupt_bitmap); i++) { if (env->interrupt_bitmap[i]) { bit = ctz64(env->interrupt_bitmap[i]); pending_irq = i * 64 + bit; break; } } qemu_put_sbe32s(f, &pending_irq); qemu_put_be32s(f, &env->mp_state); qemu_put_be64s(f, &env->tsc); /* MCE */ qemu_put_be64s(f, &env->mcg_cap); if (env->mcg_cap && !kvm_enabled()) { qemu_put_be64s(f, &env->mcg_status); qemu_put_be64s(f, &env->mcg_ctl); for (i = 0; i < (env->mcg_cap & 0xff); i++) { qemu_put_be64s(f, &env->mce_banks[4*i]); qemu_put_be64s(f, &env->mce_banks[4*i + 1]); qemu_put_be64s(f, &env->mce_banks[4*i + 2]); qemu_put_be64s(f, &env->mce_banks[4*i + 3]); } } }
void cpu_save(QEMUFile *f, void *opaque) { CPUPPCState *env = (CPUPPCState *)opaque; unsigned int i, j; uint32_t fpscr; for (i = 0; i < 32; i++) qemu_put_betls(f, &env->gpr[i]); #if !defined(TARGET_PPC64) for (i = 0; i < 32; i++) qemu_put_betls(f, &env->gprh[i]); #endif qemu_put_betls(f, &env->lr); qemu_put_betls(f, &env->ctr); for (i = 0; i < 8; i++) qemu_put_be32s(f, &env->crf[i]); qemu_put_betls(f, &env->xer); qemu_put_betls(f, &env->reserve_addr); qemu_put_betls(f, &env->msr); for (i = 0; i < 4; i++) qemu_put_betls(f, &env->tgpr[i]); for (i = 0; i < 32; i++) { union { float64 d; uint64_t l; } u; u.d = env->fpr[i]; qemu_put_be64(f, u.l); } fpscr = env->fpscr; qemu_put_be32s(f, &fpscr); qemu_put_sbe32s(f, &env->access_type); #if defined(TARGET_PPC64) qemu_put_betls(f, &env->asr); qemu_put_sbe32s(f, &env->slb_nr); #endif qemu_put_betls(f, &env->spr[SPR_SDR1]); for (i = 0; i < 32; i++) qemu_put_betls(f, &env->sr[i]); for (i = 0; i < 2; i++) for (j = 0; j < 8; j++) qemu_put_betls(f, &env->DBAT[i][j]); for (i = 0; i < 2; i++) for (j = 0; j < 8; j++) qemu_put_betls(f, &env->IBAT[i][j]); qemu_put_sbe32s(f, &env->nb_tlb); qemu_put_sbe32s(f, &env->tlb_per_way); qemu_put_sbe32s(f, &env->nb_ways); qemu_put_sbe32s(f, &env->last_way); qemu_put_sbe32s(f, &env->id_tlbs); qemu_put_sbe32s(f, &env->nb_pids); if (env->tlb.tlb6) { // XXX assumes 6xx for (i = 0; i < env->nb_tlb; i++) { qemu_put_betls(f, &env->tlb.tlb6[i].pte0); qemu_put_betls(f, &env->tlb.tlb6[i].pte1); qemu_put_betls(f, &env->tlb.tlb6[i].EPN); } } for (i = 0; i < 4; i++) qemu_put_betls(f, &env->pb[i]); for (i = 0; i < 1024; i++) qemu_put_betls(f, &env->spr[i]); qemu_put_be32s(f, &env->vscr); qemu_put_be64s(f, &env->spe_acc); qemu_put_be32s(f, &env->spe_fscr); qemu_put_betls(f, &env->msr_mask); qemu_put_be32s(f, &env->flags); qemu_put_sbe32s(f, &env->error_code); qemu_put_be32s(f, &env->pending_interrupts); qemu_put_be32s(f, &env->irq_input_state); for (i = 0; i < POWERPC_EXCP_NB; i++) qemu_put_betls(f, &env->excp_vectors[i]); qemu_put_betls(f, &env->excp_prefix); qemu_put_betls(f, &env->hreset_excp_prefix); qemu_put_betls(f, &env->ivor_mask); qemu_put_betls(f, &env->ivpr_mask); qemu_put_betls(f, &env->hreset_vector); qemu_put_betls(f, &env->nip); qemu_put_betls(f, &env->hflags); qemu_put_betls(f, &env->hflags_nmsr); qemu_put_sbe32s(f, &env->mmu_idx); qemu_put_sbe32(f, 0); }
static void put_fpcr(QEMUFile *f, void *opaque, size_t size) { CPUAlphaState *env = opaque; qemu_put_be64(f, cpu_alpha_load_fpcr(env)); }