static int alloc_magic_pages(struct xc_dom_image *dom) { int rc, i; xen_pfn_t p2m[NR_MAGIC_PAGES]; DOMPRINTF_CALLED(dom->xch); for (i = 0; i < NR_MAGIC_PAGES; i++) p2m[i] = dom->rambase_pfn + dom->total_pages + i; rc = xc_domain_populate_physmap_exact( dom->xch, dom->guest_domid, NR_MAGIC_PAGES, 0, 0, p2m); if ( rc < 0 ) return rc; dom->console_pfn = dom->rambase_pfn + dom->total_pages + CONSOLE_PFN_OFFSET; dom->xenstore_pfn = dom->rambase_pfn + dom->total_pages + XENSTORE_PFN_OFFSET; xc_clear_domain_page(dom->xch, dom->guest_domid, dom->console_pfn); xc_clear_domain_page(dom->xch, dom->guest_domid, dom->xenstore_pfn); xc_set_hvm_param(dom->xch, dom->guest_domid, HVM_PARAM_CONSOLE_PFN, dom->console_pfn); xc_set_hvm_param(dom->xch, dom->guest_domid, HVM_PARAM_STORE_PFN, dom->xenstore_pfn); /* allocated by toolstack */ xc_set_hvm_param(dom->xch, dom->guest_domid, HVM_PARAM_CONSOLE_EVTCHN, dom->console_evtchn); xc_set_hvm_param(dom->xch, dom->guest_domid, HVM_PARAM_STORE_EVTCHN, dom->xenstore_evtchn); return 0; }
void domain_wake_from_s3(struct domain *d) { unsigned long s_state = 0; int handle; if (!d) return; if (!d->is_in_s3) return; if (host_pmop_in_progress()) { info("NOT resuming domain %d from S3 - host power operation in progress"); return; } info("Resuming domain %d from S3", d->domid); if (xc_handle != NULL) { xc_get_hvm_param(xc_handle, d->domid, HVM_PARAM_ACPI_S_STATE, &s_state); if (s_state == 3) xc_set_hvm_param(xc_handle, d->domid, HVM_PARAM_ACPI_S_STATE, 0); d->is_in_s3 = 0; d->sstate = 5; send_wakeup(d); } else { error("Failed to open xen control interface"); } // Waking up a PVM from S3 will trigger the PVM guest driver to re-initialize // the graphic device. Therefore, we might as well switch directly to it since // it is displayable until we find a way to recover the device once put in S3. if (d->is_pvm) { switcher_switch(d, 0, 0); } }
static int hvm_build_set_params(xc_interface *xch, int domid, int store_evtchn, unsigned long *store_mfn, int console_evtchn, unsigned long *console_mfn, struct flags f) { struct hvm_info_table *va_hvm; uint8_t *va_map, sum; int i; va_map = xc_map_foreign_range(xch, domid, XC_PAGE_SIZE, PROT_READ | PROT_WRITE, HVM_INFO_PFN); if (va_map == NULL) return -1; va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET); va_hvm->acpi_enabled = f.acpi; va_hvm->apic_mode = f.apic; va_hvm->nr_vcpus = f.vcpus; memset(va_hvm->vcpu_online, 0, sizeof(va_hvm->vcpu_online)); for (i = 0; i < f.vcpus_current; i++) va_hvm->vcpu_online[i/8] |= 1 << (i % 8); #if defined(HVM_INFO_TABLE_HAS_S4_ENABLED) va_hvm->s4_enabled = f.acpi_s4; #endif #if defined(HVM_INFO_TABLE_HAS_S3_ENABLED) va_hvm->s3_enabled = f.acpi_s3; #endif va_hvm->checksum = 0; for (i = 0, sum = 0; i < va_hvm->length; i++) sum += ((uint8_t *) va_hvm)[i]; va_hvm->checksum = -sum; munmap(va_map, XC_PAGE_SIZE); xc_get_hvm_param(xch, domid, HVM_PARAM_STORE_PFN, store_mfn); xc_set_hvm_param(xch, domid, HVM_PARAM_PAE_ENABLED, f.pae); #ifdef HVM_PARAM_VIRIDIAN xc_set_hvm_param(xch, domid, HVM_PARAM_VIRIDIAN, f.viridian); #endif xc_set_hvm_param(xch, domid, HVM_PARAM_STORE_EVTCHN, store_evtchn); #ifndef XEN_UNSTABLE xc_set_hvm_param(xch, domid, HVM_PARAM_NX_ENABLED, f.nx); xc_get_hvm_param(xch, domid, HVM_PARAM_CONSOLE_PFN, console_mfn); xc_set_hvm_param(xch, domid, HVM_PARAM_CONSOLE_EVTCHN, console_evtchn); #endif return 0; }
static void hvm_set_viridian_features(struct flags *f) { uint64_t feature_mask = HVMPV_base_freq; xg_info("viridian base\n"); if (f->viridian_time_ref_count) { xg_info("+ time_ref_count\n"); feature_mask |= HVMPV_time_ref_count; } if (f->viridian_reference_tsc) { xg_info("+ viridian_reference_tsc\n"); feature_mask |= HVMPV_reference_tsc; } xc_set_hvm_param(xch, domid, HVM_PARAM_VIRIDIAN, feature_mask); }
static int hvm_build_set_params(int store_evtchn, unsigned long *store_mfn, int console_evtchn, unsigned long *console_mfn, struct flags f) { struct hvm_info_table *va_hvm; uint8_t *va_map, sum; uint32_t i; int rc = 0; va_map = xc_map_foreign_range(xch, domid, XC_PAGE_SIZE, PROT_READ | PROT_WRITE, HVM_INFO_PFN); if (va_map == NULL) return -1; va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET); va_hvm->apic_mode = f.apic; va_hvm->nr_vcpus = f.vcpus; memset(va_hvm->vcpu_online, 0, sizeof(va_hvm->vcpu_online)); for (i = 0; i < f.vcpus_current; i++) va_hvm->vcpu_online[i/8] |= 1 << (i % 8); va_hvm->checksum = 0; for (i = 0, sum = 0; i < va_hvm->length; i++) sum += ((uint8_t *) va_hvm)[i]; va_hvm->checksum = -sum; munmap(va_map, XC_PAGE_SIZE); xc_get_hvm_param(xch, domid, HVM_PARAM_STORE_PFN, store_mfn); xc_set_hvm_param(xch, domid, HVM_PARAM_PAE_ENABLED, f.pae); if (f.viridian) hvm_set_viridian_features(&f); xc_set_hvm_param(xch, domid, HVM_PARAM_STORE_EVTCHN, store_evtchn); xc_set_hvm_param(xch, domid, HVM_PARAM_HPET_ENABLED, f.hpet); xc_set_hvm_param(xch, domid, HVM_PARAM_NESTEDHVM, f.nested_hvm); xc_get_hvm_param(xch, domid, HVM_PARAM_CONSOLE_PFN, console_mfn); xc_set_hvm_param(xch, domid, HVM_PARAM_CONSOLE_EVTCHN, console_evtchn); xc_set_hvm_param(xch, domid, HVM_PARAM_TRIPLE_FAULT_REASON, SHUTDOWN_crash); #ifdef HAVE_CORES_PER_SOCKET if ( f.cores_per_socket > 0 ) rc = xc_domain_set_cores_per_socket(xch, domid, f.cores_per_socket); #endif return rc; }
CAMLprim value stub_xc_domain_restore(value handle, value fd, value domid, value store_evtchn, value store_domid, value console_evtchn, value console_domid, value hvm, value no_incr_generationid) { CAMLparam5(handle, fd, domid, store_evtchn, console_evtchn); CAMLxparam1(hvm); CAMLlocal1(result); unsigned long store_mfn, console_mfn; domid_t c_store_domid, c_console_domid; unsigned long c_vm_generationid_addr; char c_vm_generationid_addr_s[32]; unsigned int c_store_evtchn, c_console_evtchn; int r; size_t size, written; struct flags f; get_flags(&f,_D(domid)); c_store_evtchn = Int_val(store_evtchn); c_store_domid = Int_val(store_domid); c_console_evtchn = Int_val(console_evtchn); c_console_domid = Int_val(console_domid); #ifdef HVM_PARAM_VIRIDIAN xc_set_hvm_param(_H(handle), _D(domid), HVM_PARAM_VIRIDIAN, f.viridian); #endif configure_vcpus(_H(handle), _D(domid), f); caml_enter_blocking_section(); r = xc_domain_restore(_H(handle), Int_val(fd), _D(domid), c_store_evtchn, &store_mfn, #ifdef XENGUEST_4_2 c_store_domid, #endif c_console_evtchn, &console_mfn, #ifdef XENGUEST_4_2 c_console_domid, #endif Bool_val(hvm), f.pae, 0 /*superpages*/ #ifdef XENGUEST_4_2 , Bool_val(no_incr_generationid), &c_vm_generationid_addr, NULL /* restore_callbacks */ #endif ); if (!r) { size = sizeof(c_vm_generationid_addr_s) - 1; /* guarantee a NULL remains on the end */ written = snprintf(c_vm_generationid_addr_s, size, "0x%lx", c_vm_generationid_addr); if (written < size) r = xenstore_puts(_D(domid), c_vm_generationid_addr_s, GENERATION_ID_ADDRESS); else { syslog(LOG_ERR|LOG_DAEMON,"Failed to write %s (%d >= %d)", GENERATION_ID_ADDRESS, written, size); r = 1; } } caml_leave_blocking_section(); if (r) failwith_oss_xc(_H(handle), "xc_domain_restore"); result = caml_alloc_tuple(2); Store_field(result, 0, caml_copy_nativeint(store_mfn)); Store_field(result, 1, caml_copy_nativeint(console_mfn)); CAMLreturn(result); }
int stub_xc_domain_restore(int fd, int store_evtchn, int console_evtchn, int hvm, unsigned long *store_mfn, unsigned long *console_mfn) { int r = 0; struct flags f; get_flags(&f); if ( hvm ) { /* * We have to do this even in the domain restore case as XenServers * prior to 6.0.2 did not create a viridian save record. */ if (f.viridian) hvm_set_viridian_features(&f); xc_set_hvm_param(xch, domid, HVM_PARAM_HPET_ENABLED, f.hpet); #ifdef HAVE_CORES_PER_SOCKET if ( f.cores_per_socket > 0 ) r = xc_domain_set_cores_per_socket(xch, domid, f.cores_per_socket); #endif if ( r ) failwith_oss_xc("xc_domain_set_cores_per_socket"); } configure_vcpus(f); r = xc_domain_restore(xch, fd, domid, store_evtchn, store_mfn, 0, console_evtchn, console_mfn, 0, hvm, f.pae, 0, 0, NULL); if ( r ) failwith_oss_xc("xc_domain_restore"); /* * The legacy -> migration v2 code in XenServer 6.5 didn't combine the * out-of-band HVM_PARAM_PAE_ENABLED into the converted stream, and * xenguest didn't set it, as the v2 restore code was expected to. * * This causes xc_cpuid_apply_policy() to hide the PAE bit from the domain * cpuid policy, which went unnoticed (and without incident, despite being * a guest-visible change) until Xen-4.5 became stricter with its checks * for when a guest writes to %cr4. * * The correct value is still available out-of-band, so clobber the result * from the stream, in case the stream is from XenServer 6.5 and is a VM * which hasn't rebooted and has a bad HVM PARAM in the v2 stream. */ if ( hvm ) xc_set_hvm_param(xch, domid, HVM_PARAM_PAE_ENABLED, f.pae); r = construct_cpuid_policy(&f, hvm); if ( r ) failwith_oss_xc("construct_cpuid_policy"); free_flags(&f); if ( hvm ) { r = set_genid(); if (r) exit(1); } return 0; }
static int xcext_domain_set_timer_mode(xc_interface *xch, unsigned int domid, int mode) { return xc_set_hvm_param(xch, domid, HVM_PARAM_TIMER_MODE, (unsigned long) mode); }
static int xcext_domain_send_s3resume(xc_interface *xch, unsigned int domid) { return xc_set_hvm_param(xch, domid, HVM_PARAM_ACPI_S_STATE, 0); }
static int xc_ia64_hvm_recv_context(int xc_handle, int io_fd, uint32_t dom, unsigned long shared_info_frame, struct xen_ia64_p2m_table *p2m_table, unsigned int store_evtchn, unsigned long *store_mfn, unsigned int console_evtchn, unsigned long *console_mfn) { int rc = -1; xc_dominfo_t info; unsigned int i; /* cpumap */ uint64_t *vcpumap = NULL; /* HVM: magic frames for ioreqs and xenstore comms */ const int hvm_params[] = { HVM_PARAM_STORE_PFN, HVM_PARAM_IOREQ_PFN, HVM_PARAM_BUFIOREQ_PFN, HVM_PARAM_BUFPIOREQ_PFN, }; const int NR_PARAMS = sizeof(hvm_params) / sizeof(hvm_params[0]); /* ioreq_pfn, bufioreq_pfn, store_pfn */ uint64_t magic_pfns[NR_PARAMS]; /* HVM: a buffer for holding HVM contxt */ uint64_t rec_size = 0; uint8_t *hvm_buf = NULL; /* Read shared info. */ if (xc_ia64_recv_shared_info(xc_handle, io_fd, dom, shared_info_frame, NULL)) goto out; /* vcpu map */ if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) { ERROR("Could not get domain info"); goto out; } if (xc_ia64_recv_vcpumap(&info, io_fd, &vcpumap)) goto out; /* vcpu context */ for (i = 0; i <= info.max_vcpu_id; i++) { /* A copy of the CPU context of the guest. */ vcpu_guest_context_any_t ctxt_any; if (!__test_bit(i, vcpumap)) continue; if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, i, &ctxt_any)) goto out; /* system context of vcpu is recieved as hvm context. */ } /* Set HVM-specific parameters */ if (read_exact(io_fd, magic_pfns, sizeof(magic_pfns))) { ERROR("error reading magic page addresses"); goto out; } /* These comms pages need to be zeroed at the start of day */ for (i = 0; i < NR_PARAMS; i++) { rc = xc_clear_domain_page(xc_handle, dom, magic_pfns[i]); if (rc != 0) { ERROR("error zeroing magic pages: %i", rc); goto out; } rc = xc_set_hvm_param(xc_handle, dom, hvm_params[i], magic_pfns[i]); if (rc != 0) { ERROR("error setting HVM params: %i", rc); goto out; } } rc = xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn); if (rc != 0) { ERROR("error setting HVM params: %i", rc); goto out; } rc = -1; *store_mfn = magic_pfns[0]; /* Read HVM context */ if (read_exact(io_fd, &rec_size, sizeof(rec_size))) { ERROR("error read hvm context size!\n"); goto out; } hvm_buf = malloc(rec_size); if (hvm_buf == NULL) { ERROR("memory alloc for hvm context buffer failed"); errno = ENOMEM; goto out; } if (read_exact(io_fd, hvm_buf, rec_size)) { ERROR("error loading the HVM context"); goto out; } rc = xc_domain_hvm_setcontext(xc_handle, dom, hvm_buf, rec_size); if (rc != 0) { ERROR("error setting the HVM context"); goto out; } rc = 0; out: if (vcpumap != NULL) free(vcpumap); if (hvm_buf != NULL) free(hvm_buf); return rc; }