/* * Free memory. */ void osenv_mem_free(void *block, osenv_memflags_t flags, oskit_size_t size) { /* Ignores all flags except AUTO_SIZE */ if (flags & OSENV_AUTO_SIZE) { free_flags(block); } else { sfree_flags(block, size); } }
int stub_xc_linux_build(int c_mem_max_mib, int mem_start_mib, const char *image_name, const char *ramdisk_name, const char *cmdline, const char *features, int flags, int store_evtchn, int store_domid, int console_evtchn, int console_domid, unsigned long *store_mfn, unsigned long *console_mfn, char *protocol) { int r; struct xc_dom_image *dom; struct flags f; get_flags(&f); xc_dom_loginit(xch); dom = xc_dom_allocate(xch, cmdline, features); if (!dom) failwith_oss_xc("xc_dom_allocate"); /* The default image size limits are too large. */ xc_dom_kernel_max_size(dom, get_image_max_size("kernel")); xc_dom_ramdisk_max_size(dom, get_image_max_size("ramdisk")); configure_vcpus(f); configure_tsc(f); r = xc_dom_linux_build(xch, dom, domid, mem_start_mib, image_name, ramdisk_name, flags, store_evtchn, store_mfn, console_evtchn, console_mfn); if ( r ) failwith_oss_xc("xc_dom_linux_build"); r = construct_cpuid_policy(&f, false); if ( r ) failwith_oss_xc("construct_cpuid_policy"); r = xc_dom_gnttab_seed(xch, domid, *console_mfn, *store_mfn, console_domid, store_domid); if ( r ) failwith_oss_xc("xc_dom_gnttab_seed"); strncpy(protocol, xc_domain_get_native_protocol(xch, domid), 64); free_flags(&f); xc_dom_release(dom); return 0; }
int stub_xc_domain_restore(int fd, int store_evtchn, int console_evtchn, int hvm, unsigned long *store_mfn, unsigned long *console_mfn) { int r = 0; struct flags f; get_flags(&f); if ( hvm ) { /* * We have to do this even in the domain restore case as XenServers * prior to 6.0.2 did not create a viridian save record. */ if (f.viridian) hvm_set_viridian_features(&f); xc_set_hvm_param(xch, domid, HVM_PARAM_HPET_ENABLED, f.hpet); #ifdef HAVE_CORES_PER_SOCKET if ( f.cores_per_socket > 0 ) r = xc_domain_set_cores_per_socket(xch, domid, f.cores_per_socket); #endif if ( r ) failwith_oss_xc("xc_domain_set_cores_per_socket"); } configure_vcpus(f); r = xc_domain_restore(xch, fd, domid, store_evtchn, store_mfn, 0, console_evtchn, console_mfn, 0, hvm, f.pae, 0, 0, NULL); if ( r ) failwith_oss_xc("xc_domain_restore"); /* * The legacy -> migration v2 code in XenServer 6.5 didn't combine the * out-of-band HVM_PARAM_PAE_ENABLED into the converted stream, and * xenguest didn't set it, as the v2 restore code was expected to. * * This causes xc_cpuid_apply_policy() to hide the PAE bit from the domain * cpuid policy, which went unnoticed (and without incident, despite being * a guest-visible change) until Xen-4.5 became stricter with its checks * for when a guest writes to %cr4. * * The correct value is still available out-of-band, so clobber the result * from the stream, in case the stream is from XenServer 6.5 and is a VM * which hasn't rebooted and has a bad HVM PARAM in the v2 stream. */ if ( hvm ) xc_set_hvm_param(xch, domid, HVM_PARAM_PAE_ENABLED, f.pae); r = construct_cpuid_policy(&f, hvm); if ( r ) failwith_oss_xc("construct_cpuid_policy"); free_flags(&f); if ( hvm ) { r = set_genid(); if (r) exit(1); } return 0; }
int stub_xc_hvm_build_with_mem(uint64_t max_mem_mib, uint64_t max_start_mib, const char *image) { uint64_t lowmem_end, highmem_start, highmem_end, mmio_start; struct xc_hvm_build_args args = { .mem_size = max_mem_mib << 20, .mem_target = max_start_mib << 20, .mmio_size = HVM_BELOW_4G_MMIO_LENGTH, .image_file_name = image, }; unsigned int i, j, nr = 0; struct e820entry *e820; int rc; unsigned int nr_rdm_entries[MAX_RMRR_DEVICES] = {0}; unsigned int nr_rmrr_devs = 0; struct xen_reserved_device_memory *xrdm[MAX_RMRR_DEVICES] = {0}; unsigned long rmrr_overlapped_ram = 0; char *s; if ( pci_passthrough_sbdf_list ) { s = strtok(pci_passthrough_sbdf_list,","); while ( s != NULL ) { unsigned int seg, bus, device, func; xg_info("Getting RMRRs for device '%s'\n",s); if ( parse_pci_sbdf(s, &seg, &bus, &device, &func) ) { if ( !get_rdm(seg, bus, (device << 3) + func, &nr_rdm_entries[nr_rmrr_devs], &xrdm[nr_rmrr_devs]) ) nr_rmrr_devs++; } if ( nr_rmrr_devs == MAX_RMRR_DEVICES ) { xg_err("Error: hit limit of %d RMRR devices for domain\n", MAX_RMRR_DEVICES); exit(1); } s = strtok (NULL, ","); } } e820 = malloc(sizeof(*e820) * E820MAX); if (!e820) return -ENOMEM; lowmem_end = args.mem_size; highmem_end = highmem_start = 1ull << 32; mmio_start = highmem_start - args.mmio_size; if ( lowmem_end > mmio_start ) { highmem_end = (1ull << 32) + (lowmem_end - mmio_start); lowmem_end = mmio_start; } args.lowmem_end = lowmem_end; args.highmem_end = highmem_end; args.mmio_start = mmio_start; /* Leave low 1MB to HVMLoader... */ e820[nr].addr = 0x100000u; e820[nr].size = args.lowmem_end - 0x100000u; e820[nr].type = E820_RAM; nr++; /* RDM mapping */ for (i = 0; i < nr_rmrr_devs; i++) { for (j = 0; j < nr_rdm_entries[i]; j++) { e820[nr].addr = xrdm[i][j].start_pfn << XC_PAGE_SHIFT; e820[nr].size = xrdm[i][j].nr_pages << XC_PAGE_SHIFT; e820[nr].type = E820_RESERVED; xg_info("Adding RMRR 0x%lx size 0x%lx\n", e820[nr].addr, e820[nr].size); if ( e820[nr].addr < args.lowmem_end ) { rmrr_overlapped_ram += ( args.lowmem_end - e820[nr].addr ); args.lowmem_end = e820[nr].addr; } nr++; } } e820[0].size -= rmrr_overlapped_ram; args.highmem_end += rmrr_overlapped_ram; args.mmio_size += rmrr_overlapped_ram; args.mmio_start -= rmrr_overlapped_ram; for (i = 0; i < nr_rmrr_devs; i++) { free(xrdm[i]); } if ( args.highmem_end > highmem_start ) { e820[nr].addr = highmem_start; e820[nr].size = args.highmem_end - e820[nr].addr; e820[nr].type = E820_RAM; nr++; } rc = xc_hvm_build(xch, domid, &args); if (!rc) rc = xc_domain_set_memory_map(xch, domid, e820, nr); free(e820); return rc; } int stub_xc_hvm_build(int mem_max_mib, int mem_start_mib, const char *image_name, int store_evtchn, int store_domid, int console_evtchn, int console_domid, unsigned long *store_mfn, unsigned long *console_mfn) { int r; struct flags f; get_flags(&f); configure_vcpus(f); configure_tsc(f); r = stub_xc_hvm_build_with_mem(mem_max_mib, mem_start_mib, image_name); if ( r ) failwith_oss_xc("hvm_build"); r = hvm_build_set_params(store_evtchn, store_mfn, console_evtchn, console_mfn, f); if ( r ) failwith_oss_xc("hvm_build_params"); r = construct_cpuid_policy(&f, true); if ( r ) failwith_oss_xc("construct_cpuid_policy"); r = xc_dom_gnttab_hvm_seed(xch, domid, *console_mfn, *store_mfn, console_domid, store_domid); if ( r ) failwith_oss_xc("xc_dom_gnttab_hvm_seed"); free_flags(&f); return 0; }
void free(void *chunk) { return free_flags(chunk); }