int hvm_save(struct domain *d, hvm_domain_context_t *h) { char *c; struct hvm_save_header hdr; struct hvm_save_end end; hvm_save_handler handler; uint16_t i; hdr.magic = HVM_FILE_MAGIC; hdr.version = HVM_FILE_VERSION; /* Save xen changeset */ c = strrchr(xen_changeset(), ':'); if ( c ) hdr.changeset = simple_strtoll(c, NULL, 16); else hdr.changeset = -1ULL; /* Unknown */ arch_hvm_save(d, &hdr); if ( hvm_save_entry(HEADER, 0, h, &hdr) != 0 ) { gdprintk(XENLOG_ERR, "HVM save: failed to write header\n"); return -EFAULT; } /* Save all available kinds of state */ for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) { handler = hvm_sr_handlers[i].save; if ( handler != NULL ) { gdprintk(XENLOG_INFO, "HVM save: %s\n", hvm_sr_handlers[i].name); if ( handler(d, h) != 0 ) { gdprintk(XENLOG_ERR, "HVM save: failed to save type %"PRIu16"\n", i); return -EFAULT; } } } /* Save an end-of-file marker */ if ( hvm_save_entry(END, 0, h, &end) != 0 ) { /* Run out of data */ gdprintk(XENLOG_ERR, "HVM save: no room for end marker.\n"); return -EFAULT; } /* Save macros should not have let us overrun */ ASSERT(h->cur <= h->size); return 0; }
/* Save RTC hardware state */ static int rtc_save(struct domain *d, hvm_domain_context_t *h) { RTCState *s = domain_vrtc(d); int rc; spin_lock(&s->lock); rc = hvm_save_entry(RTC, 0, h, &s->hw); spin_unlock(&s->lock); return rc; }
static int viridian_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) { struct hvm_viridian_context ctxt; if ( !is_viridian_domain(d) ) return 0; ctxt.hypercall_gpa = d->arch.hvm_domain.viridian.hypercall_gpa.raw; ctxt.guest_os_id = d->arch.hvm_domain.viridian.guest_os_id.raw; return (hvm_save_entry(VIRIDIAN, 0, h, &ctxt) != 0); }
static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) { struct vcpu *v; if ( !is_viridian_domain(d) ) return 0; for_each_vcpu( d, v ) { struct hvm_viridian_vcpu_context ctxt; ctxt.apic_assist = v->arch.hvm_vcpu.viridian.apic_assist.raw; if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 ) return 1; } return 0; }
static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) { struct vcpu *v; int err = 0; for_each_vcpu( d, v ) { struct hvm_vmce_vcpu ctxt = { .caps = v->arch.vmce.mcg_cap, .mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2, .mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2 }; err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt); if ( err ) break; } return err; } static int vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) { unsigned int vcpuid = hvm_load_instance(h); struct vcpu *v; struct hvm_vmce_vcpu ctxt; int err; if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) { dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n", d->domain_id, vcpuid); err = -EINVAL; } else err = hvm_load_entry_zeroextend(VMCE_VCPU, h, &ctxt); return err ?: vmce_restore_vcpu(v, &ctxt); }
static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h) { struct hvm_viridian_domain_context ctxt = { .time_ref_count = d->arch.hvm_domain.viridian.time_ref_count.val, .hypercall_gpa = d->arch.hvm_domain.viridian.hypercall_gpa.raw, .guest_os_id = d->arch.hvm_domain.viridian.guest_os_id.raw, .reference_tsc = d->arch.hvm_domain.viridian.reference_tsc.raw, }; if ( !is_viridian_domain(d) ) return 0; return (hvm_save_entry(VIRIDIAN_DOMAIN, 0, h, &ctxt) != 0); } static int viridian_load_domain_ctxt(struct domain *d, hvm_domain_context_t *h) { struct hvm_viridian_domain_context ctxt; if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 ) return -EINVAL; d->arch.hvm_domain.viridian.time_ref_count.val = ctxt.time_ref_count; d->arch.hvm_domain.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa; d->arch.hvm_domain.viridian.guest_os_id.raw = ctxt.guest_os_id; d->arch.hvm_domain.viridian.reference_tsc.raw = ctxt.reference_tsc; if ( d->arch.hvm_domain.viridian.reference_tsc.fields.enabled ) update_reference_tsc(d, 0); return 0; } HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt, viridian_load_domain_ctxt, 1, HVMSR_PER_DOM); static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) { struct vcpu *v; if ( !is_viridian_domain(d) ) return 0; for_each_vcpu( d, v ) { struct hvm_viridian_vcpu_context ctxt = { .apic_assist_msr = v->arch.hvm_vcpu.viridian.apic_assist.msr.raw, .apic_assist_vector = v->arch.hvm_vcpu.viridian.apic_assist.vector, }; if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 ) return 1; } return 0; } static int viridian_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) { int vcpuid; struct vcpu *v; struct hvm_viridian_vcpu_context ctxt; vcpuid = hvm_load_instance(h); if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) { dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n", d->domain_id, vcpuid); return -EINVAL; } if ( hvm_load_entry_zeroextend(VIRIDIAN_VCPU, h, &ctxt) != 0 ) return -EINVAL; if ( memcmp(&ctxt._pad, zero_page, sizeof(ctxt._pad)) ) return -EINVAL; v->arch.hvm_vcpu.viridian.apic_assist.msr.raw = ctxt.apic_assist_msr; if ( v->arch.hvm_vcpu.viridian.apic_assist.msr.fields.enabled ) initialize_apic_assist(v); v->arch.hvm_vcpu.viridian.apic_assist.vector = ctxt.apic_assist_vector; return 0; } HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_VCPU, viridian_save_vcpu_ctxt, viridian_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);