void domain_wake_from_s3(struct domain *d) { unsigned long s_state = 0; int handle; if (!d) return; if (!d->is_in_s3) return; if (host_pmop_in_progress()) { info("NOT resuming domain %d from S3 - host power operation in progress"); return; } info("Resuming domain %d from S3", d->domid); if (xc_handle != NULL) { xc_get_hvm_param(xc_handle, d->domid, HVM_PARAM_ACPI_S_STATE, &s_state); if (s_state == 3) xc_set_hvm_param(xc_handle, d->domid, HVM_PARAM_ACPI_S_STATE, 0); d->is_in_s3 = 0; d->sstate = 5; send_wakeup(d); } else { error("Failed to open xen control interface"); } // Waking up a PVM from S3 will trigger the PVM guest driver to re-initialize // the graphic device. Therefore, we might as well switch directly to it since // it is displayable until we find a way to recover the device once put in S3. if (d->is_pvm) { switcher_switch(d, 0, 0); } }
static int hvm_build_set_params(xc_interface *xch, int domid, int store_evtchn, unsigned long *store_mfn, int console_evtchn, unsigned long *console_mfn, struct flags f) { struct hvm_info_table *va_hvm; uint8_t *va_map, sum; int i; va_map = xc_map_foreign_range(xch, domid, XC_PAGE_SIZE, PROT_READ | PROT_WRITE, HVM_INFO_PFN); if (va_map == NULL) return -1; va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET); va_hvm->acpi_enabled = f.acpi; va_hvm->apic_mode = f.apic; va_hvm->nr_vcpus = f.vcpus; memset(va_hvm->vcpu_online, 0, sizeof(va_hvm->vcpu_online)); for (i = 0; i < f.vcpus_current; i++) va_hvm->vcpu_online[i/8] |= 1 << (i % 8); #if defined(HVM_INFO_TABLE_HAS_S4_ENABLED) va_hvm->s4_enabled = f.acpi_s4; #endif #if defined(HVM_INFO_TABLE_HAS_S3_ENABLED) va_hvm->s3_enabled = f.acpi_s3; #endif va_hvm->checksum = 0; for (i = 0, sum = 0; i < va_hvm->length; i++) sum += ((uint8_t *) va_hvm)[i]; va_hvm->checksum = -sum; munmap(va_map, XC_PAGE_SIZE); xc_get_hvm_param(xch, domid, HVM_PARAM_STORE_PFN, store_mfn); xc_set_hvm_param(xch, domid, HVM_PARAM_PAE_ENABLED, f.pae); #ifdef HVM_PARAM_VIRIDIAN xc_set_hvm_param(xch, domid, HVM_PARAM_VIRIDIAN, f.viridian); #endif xc_set_hvm_param(xch, domid, HVM_PARAM_STORE_EVTCHN, store_evtchn); #ifndef XEN_UNSTABLE xc_set_hvm_param(xch, domid, HVM_PARAM_NX_ENABLED, f.nx); xc_get_hvm_param(xch, domid, HVM_PARAM_CONSOLE_PFN, console_mfn); xc_set_hvm_param(xch, domid, HVM_PARAM_CONSOLE_EVTCHN, console_evtchn); #endif return 0; }
static int hvm_build_set_params(int store_evtchn, unsigned long *store_mfn, int console_evtchn, unsigned long *console_mfn, struct flags f) { struct hvm_info_table *va_hvm; uint8_t *va_map, sum; uint32_t i; int rc = 0; va_map = xc_map_foreign_range(xch, domid, XC_PAGE_SIZE, PROT_READ | PROT_WRITE, HVM_INFO_PFN); if (va_map == NULL) return -1; va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET); va_hvm->apic_mode = f.apic; va_hvm->nr_vcpus = f.vcpus; memset(va_hvm->vcpu_online, 0, sizeof(va_hvm->vcpu_online)); for (i = 0; i < f.vcpus_current; i++) va_hvm->vcpu_online[i/8] |= 1 << (i % 8); va_hvm->checksum = 0; for (i = 0, sum = 0; i < va_hvm->length; i++) sum += ((uint8_t *) va_hvm)[i]; va_hvm->checksum = -sum; munmap(va_map, XC_PAGE_SIZE); xc_get_hvm_param(xch, domid, HVM_PARAM_STORE_PFN, store_mfn); xc_set_hvm_param(xch, domid, HVM_PARAM_PAE_ENABLED, f.pae); if (f.viridian) hvm_set_viridian_features(&f); xc_set_hvm_param(xch, domid, HVM_PARAM_STORE_EVTCHN, store_evtchn); xc_set_hvm_param(xch, domid, HVM_PARAM_HPET_ENABLED, f.hpet); xc_set_hvm_param(xch, domid, HVM_PARAM_NESTEDHVM, f.nested_hvm); xc_get_hvm_param(xch, domid, HVM_PARAM_CONSOLE_PFN, console_mfn); xc_set_hvm_param(xch, domid, HVM_PARAM_CONSOLE_EVTCHN, console_evtchn); xc_set_hvm_param(xch, domid, HVM_PARAM_TRIPLE_FAULT_REASON, SHUTDOWN_crash); #ifdef HAVE_CORES_PER_SOCKET if ( f.cores_per_socket > 0 ) rc = xc_domain_set_cores_per_socket(xch, domid, f.cores_per_socket); #endif return rc; }
static int set_genid(void) { uint64_t paddr = 0; void *vaddr; char *genid_val_str; char *end; uint64_t genid[2]; int rc = -1; xc_get_hvm_param(xch, domid, HVM_PARAM_VM_GENERATION_ID_ADDR, &paddr); if (paddr == 0) return 0; genid_val_str = xenstore_gets("platform/generation-id"); if ( !genid_val_str ) return 0; errno = 0; genid[0] = strtoull(genid_val_str, &end, 0); genid[1] = 0; if ( end && end[0] == ':' ) genid[1] = strtoull(end+1, NULL, 0); if ( errno ) { xg_err("strtoull of '%s' failed: %s\n", genid_val_str, strerror(errno)); goto out; } else if ( genid[0] == 0 || genid[1] == 0 ) { xg_err("'%s' is not a valid generation id\n", genid_val_str); goto out; } vaddr = xc_map_foreign_range(xch, domid, XC_PAGE_SIZE, PROT_READ | PROT_WRITE, paddr >> XC_PAGE_SHIFT); if (vaddr == NULL) { xg_err("Failed to map VM generation ID page: %s\n", strerror(errno)); goto out; } memcpy(vaddr + (paddr & ~XC_PAGE_MASK), genid, 2 * sizeof(*genid)); munmap(vaddr, XC_PAGE_SIZE); /* * FIXME: Inject ACPI Notify event. */ xg_info("Wrote generation ID %"PRId64":%"PRId64" at 0x%"PRIx64"\n", genid[0], genid[1], paddr); rc = 0; out: free(genid_val_str); return rc; }
CAMLprim value stub_xenctrlext_domain_get_acpi_s_state(value xch, value domid) { CAMLparam2(xch, domid); unsigned long v; int ret; ret = xc_get_hvm_param(_H(xch), _D(domid), HVM_PARAM_ACPI_S_STATE, &v); if (ret != 0) failwith_xc(_H(xch)); CAMLreturn(Val_int(v)); }
static int modify_returncode(int xc_handle, uint32_t domid) { vcpu_guest_context_any_t ctxt; xc_dominfo_t info; xen_capabilities_info_t caps; struct domain_info_context _dinfo = {}; struct domain_info_context *dinfo = &_dinfo; int rc; if ( xc_domain_getinfo(xc_handle, domid, 1, &info) != 1 ) { PERROR("Could not get domain info"); return -1; } if ( info.hvm ) { /* HVM guests without PV drivers have no return code to modify. */ unsigned long irq = 0; xc_get_hvm_param(xc_handle, domid, HVM_PARAM_CALLBACK_IRQ, &irq); if ( !irq ) return 0; /* HVM guests have host address width. */ if ( xc_version(xc_handle, XENVER_capabilities, &caps) != 0 ) { PERROR("Could not get Xen capabilities\n"); return -1; } dinfo->guest_width = strstr(caps, "x86_64") ? 8 : 4; } else { /* Probe PV guest address width. */ dinfo->guest_width = pv_guest_width(xc_handle, domid); if ( dinfo->guest_width < 0 ) return -1; } if ( (rc = xc_vcpu_getcontext(xc_handle, domid, 0, &ctxt)) != 0 ) return rc; SET_FIELD(&ctxt, user_regs.eax, 1); if ( (rc = xc_vcpu_setcontext(xc_handle, domid, 0, &ctxt)) != 0 ) return rc; return 0; }
/* open a checkpoint session to guest domid */ int checkpoint_open(checkpoint_state* s, unsigned int domid) { xc_dominfo_t dominfo; unsigned long pvirq; s->domid = domid; s->xch = xc_interface_open(); if (s->xch < 0) { s->errstr = "could not open control interface (are you root?)"; return -1; } s->xsh = xs_daemon_open(); if (!s->xsh) { checkpoint_close(s); s->errstr = "could not open xenstore handle"; return -1; } s->xce = xc_evtchn_open(); if (s->xce < 0) { checkpoint_close(s); s->errstr = "could not open event channel handle"; return -1; } if (xc_domain_getinfo(s->xch, s->domid, 1, &dominfo) < 0) { checkpoint_close(s); s->errstr = "could not get domain info"; return -1; } if (dominfo.hvm) { if (xc_get_hvm_param(s->xch, s->domid, HVM_PARAM_CALLBACK_IRQ, &pvirq)) { checkpoint_close(s); s->errstr = "could not get HVM callback IRQ"; return -1; } s->domtype = pvirq ? dt_pvhvm : dt_hvm; } else s->domtype = dt_pv; if (setup_shutdown_watch(s) < 0) { checkpoint_close(s); return -1; } if (s->domtype == dt_pv) { if (setup_suspend_evtchn(s) < 0) { fprintf(stderr, "WARNING: suspend event channel unavailable, " "falling back to slow xenstore signalling\n"); } } else if (s->domtype == dt_pvhvm) { checkpoint_close(s); s->errstr = "PV-on-HVM is unsupported"; return -1; } return 0; }