static int ia64_setup_memmap(struct xc_dom_image *dom) { unsigned int page_size = XC_DOM_PAGE_SIZE(dom); unsigned long memmap_info_num_pages; unsigned long memmap_info_pfn; xen_ia64_memmap_info_t* memmap_info; unsigned int num_mds; efi_memory_desc_t *md; char* start_info; struct xen_ia64_boot_param* bp; /* setup memmap page */ memmap_info_num_pages = 1; memmap_info_pfn = dom->start_info_pfn - 1; xc_dom_printf("%s: memmap: mfn 0x%" PRIpfn " pages 0x%lx\n", __FUNCTION__, memmap_info_pfn, memmap_info_num_pages); memmap_info = xc_map_foreign_range(dom->guest_xc, dom->guest_domid, page_size * memmap_info_num_pages, PROT_READ | PROT_WRITE, memmap_info_pfn); if (NULL == memmap_info) return -1; /* [0, total_pages) */ memmap_info->efi_memdesc_size = sizeof(md[0]); memmap_info->efi_memdesc_version = EFI_MEMORY_DESCRIPTOR_VERSION; num_mds = 0; md = (efi_memory_desc_t*)&memmap_info->memdesc; md[num_mds].type = EFI_CONVENTIONAL_MEMORY; md[num_mds].pad = 0; md[num_mds].phys_addr = 0; md[num_mds].virt_addr = 0; md[num_mds].num_pages = dom->total_pages << (PAGE_SHIFT - EFI_PAGE_SHIFT); md[num_mds].attribute = EFI_MEMORY_WB; num_mds++; memmap_info->efi_memmap_size = num_mds * sizeof(md[0]); munmap(memmap_info, page_size * memmap_info_num_pages); assert(num_mds <= (page_size * memmap_info_num_pages - offsetof(typeof(*memmap_info), memdesc))/sizeof(*md)); /* * kludge: we need to pass memmap_info page's pfn and other magic pages * somehow. * we use xen_ia64_boot_param::efi_memmap::{efi_memmap, efi_memmap_size} * for this purpose */ start_info = xc_map_foreign_range(dom->guest_xc, dom->guest_domid, page_size, PROT_READ | PROT_WRITE, dom->start_info_pfn); if (NULL == start_info) return -1; bp = (struct xen_ia64_boot_param*)(start_info + sizeof(start_info_t)); memset(bp, 0, sizeof(*bp)); XEN_IA64_MEMMAP_INFO_NUM_PAGES(bp) = memmap_info_num_pages; XEN_IA64_MEMMAP_INFO_PFN(bp) = memmap_info_pfn; munmap(start_info, page_size); return 0; }
int xc_dom_alloc_segment(struct xc_dom_image *dom, struct xc_dom_seg *seg, char *name, xen_vaddr_t start, xen_vaddr_t size) { unsigned int page_size = XC_DOM_PAGE_SIZE(dom); xen_pfn_t pages; void *ptr; if ( start && xc_dom_alloc_pad(dom, start) ) return -1; pages = (size + page_size - 1) / page_size; start = dom->virt_alloc_end; seg->pfn = dom->pfn_alloc_end; seg->pages = pages; if ( xc_dom_chk_alloc_pages(dom, name, pages) ) return -1; /* map and clear pages */ ptr = xc_dom_seg_to_ptr(dom, seg); if ( ptr == NULL ) return -1; memset(ptr, 0, pages * page_size); seg->vstart = start; seg->vend = dom->virt_alloc_end; DOMPRINTF("%-20s: %-12s : 0x%" PRIx64 " -> 0x%" PRIx64 " (pfn 0x%" PRIpfn " + 0x%" PRIpfn " pages)", __FUNCTION__, name, seg->vstart, seg->vend, seg->pfn, pages); return 0; }
static int xc_dom_chk_alloc_pages(struct xc_dom_image *dom, char *name, xen_pfn_t pages) { unsigned int page_size = XC_DOM_PAGE_SIZE(dom); if ( pages > dom->total_pages || /* multiple test avoids overflow probs */ dom->pfn_alloc_end - dom->rambase_pfn > dom->total_pages || pages > dom->total_pages - dom->pfn_alloc_end + dom->rambase_pfn ) { xc_dom_panic(dom->xch, XC_OUT_OF_MEMORY, "%s: segment %s too large (0x%"PRIpfn" > " "0x%"PRIpfn" - 0x%"PRIpfn" pages)", __FUNCTION__, name, pages, dom->total_pages, dom->pfn_alloc_end - dom->rambase_pfn); return -1; } dom->pfn_alloc_end += pages; dom->virt_alloc_end += pages * page_size; if ( dom->allocate ) dom->allocate(dom); return 0; }
int xc_dom_alloc_segment(struct xc_dom_image *dom, struct xc_dom_seg *seg, char *name, xen_vaddr_t start, xen_vaddr_t size) { unsigned int page_size = XC_DOM_PAGE_SIZE(dom); xen_pfn_t pages = (size + page_size - 1) / page_size; xen_pfn_t pfn; void *ptr; if ( start == 0 ) start = dom->virt_alloc_end; if ( start & (page_size - 1) ) { xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, "%s: segment start isn't page aligned (0x%" PRIx64 ")", __FUNCTION__, start); return -1; } if ( start < dom->virt_alloc_end ) { xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, "%s: segment start too low (0x%" PRIx64 " < 0x%" PRIx64 ")", __FUNCTION__, start, dom->virt_alloc_end); return -1; } seg->vstart = start; pfn = (seg->vstart - dom->parms.virt_base) / page_size; seg->pfn = pfn + dom->rambase_pfn; if ( pages > dom->total_pages || /* multiple test avoids overflow probs */ pfn > dom->total_pages || pages > dom->total_pages - pfn) { xc_dom_panic(dom->xch, XC_OUT_OF_MEMORY, "%s: segment %s too large (0x%"PRIpfn" > " "0x%"PRIpfn" - 0x%"PRIpfn" pages)", __FUNCTION__, name, pages, dom->total_pages, pfn); return -1; } seg->vend = start + pages * page_size; dom->virt_alloc_end = seg->vend; if (dom->allocate) dom->allocate(dom, dom->virt_alloc_end); DOMPRINTF("%-20s: %-12s : 0x%" PRIx64 " -> 0x%" PRIx64 " (pfn 0x%" PRIpfn " + 0x%" PRIpfn " pages)", __FUNCTION__, name, seg->vstart, seg->vend, seg->pfn, pages); /* map and clear pages */ ptr = xc_dom_seg_to_ptr(dom, seg); if ( ptr == NULL ) return -1; memset(ptr, 0, pages * page_size); return 0; }
int xc_dom_alloc_page(struct xc_dom_image *dom, char *name) { unsigned int page_size = XC_DOM_PAGE_SIZE(dom); xen_vaddr_t start; xen_pfn_t pfn; start = dom->virt_alloc_end; dom->virt_alloc_end += page_size; if (dom->allocate) dom->allocate(dom, dom->virt_alloc_end); pfn = (start - dom->parms.virt_base) / page_size; xc_dom_printf("%-20s: %-12s : 0x%" PRIx64 " (pfn 0x%" PRIpfn ")\n", __FUNCTION__, name, start, pfn); return pfn; }
int xc_dom_alloc_segment(struct xc_dom_image *dom, struct xc_dom_seg *seg, char *name, xen_vaddr_t start, xen_vaddr_t size) { unsigned int page_size = XC_DOM_PAGE_SIZE(dom); xen_pfn_t pages = (size + page_size - 1) / page_size; void *ptr; if ( start == 0 ) start = dom->virt_alloc_end; if ( start & (page_size - 1) ) { xc_dom_panic(XC_INTERNAL_ERROR, "%s: segment start isn't page aligned (0x%" PRIx64 ")\n", __FUNCTION__, start); return -1; } if ( start < dom->virt_alloc_end ) { xc_dom_panic(XC_INTERNAL_ERROR, "%s: segment start too low (0x%" PRIx64 " < 0x%" PRIx64 ")\n", __FUNCTION__, start, dom->virt_alloc_end); return -1; } seg->vstart = start; seg->vend = start + pages * page_size; seg->pfn = (seg->vstart - dom->parms.virt_base) / page_size; dom->virt_alloc_end = seg->vend; if (dom->allocate) dom->allocate(dom, dom->virt_alloc_end); xc_dom_printf("%-20s: %-12s : 0x%" PRIx64 " -> 0x%" PRIx64 " (pfn 0x%" PRIpfn " + 0x%" PRIpfn " pages)\n", __FUNCTION__, name, seg->vstart, seg->vend, seg->pfn, pages); /* map and clear pages */ ptr = xc_dom_seg_to_ptr(dom, seg); if ( ptr == NULL ) return -1; memset(ptr, 0, pages * page_size); return 0; }
int arch_setup_bootlate(struct xc_dom_image *dom) { unsigned int page_size = XC_DOM_PAGE_SIZE(dom); shared_info_t *shared_info; /* setup shared_info page */ xc_dom_printf("%s: shared_info: mfn 0x%" PRIpfn "\n", __FUNCTION__, dom->shared_info_mfn); shared_info = xc_map_foreign_range(dom->guest_xc, dom->guest_domid, page_size, PROT_READ | PROT_WRITE, dom->shared_info_mfn); if ( shared_info == NULL ) return -1; dom->arch_hooks->shared_info(dom, shared_info); munmap(shared_info, page_size); return 0; }
static int xc_dom_alloc_pad(struct xc_dom_image *dom, xen_vaddr_t boundary) { unsigned int page_size = XC_DOM_PAGE_SIZE(dom); xen_pfn_t pages; if ( boundary & (page_size - 1) ) { xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, "%s: segment boundary isn't page aligned (0x%" PRIx64 ")", __FUNCTION__, boundary); return -1; } if ( boundary < dom->virt_alloc_end ) { xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, "%s: segment boundary too low (0x%" PRIx64 " < 0x%" PRIx64 ")", __FUNCTION__, boundary, dom->virt_alloc_end); return -1; } pages = (boundary - dom->virt_alloc_end) / page_size; return xc_dom_chk_alloc_pages(dom, "padding", pages); }
int xc_dom_build_image(struct xc_dom_image *dom) { unsigned int page_size; xc_dom_printf("%s: called\n", __FUNCTION__); /* check for arch hooks */ if ( dom->arch_hooks == NULL ) { xc_dom_panic(XC_INTERNAL_ERROR, "%s: arch hooks not set\n", __FUNCTION__); goto err; } page_size = XC_DOM_PAGE_SIZE(dom); /* load kernel */ if ( xc_dom_alloc_segment(dom, &dom->kernel_seg, "kernel", dom->kernel_seg.vstart, dom->kernel_seg.vend - dom->kernel_seg.vstart) != 0 ) goto err; if ( dom->kernel_loader->loader(dom) != 0 ) goto err; /* load ramdisk */ if ( dom->ramdisk_blob ) { size_t unziplen, ramdisklen; void *ramdiskmap; unziplen = xc_dom_check_gzip(dom->ramdisk_blob, dom->ramdisk_size); ramdisklen = unziplen ? unziplen : dom->ramdisk_size; if ( xc_dom_alloc_segment(dom, &dom->ramdisk_seg, "ramdisk", 0, ramdisklen) != 0 ) goto err; ramdiskmap = xc_dom_seg_to_ptr(dom, &dom->ramdisk_seg); if ( unziplen ) { if ( xc_dom_do_gunzip(dom->ramdisk_blob, dom->ramdisk_size, ramdiskmap, ramdisklen) == -1 ) goto err; } else memcpy(ramdiskmap, dom->ramdisk_blob, dom->ramdisk_size); } /* allocate other pages */ if ( dom->arch_hooks->alloc_magic_pages(dom) != 0 ) goto err; if ( dom->arch_hooks->count_pgtables ) { dom->arch_hooks->count_pgtables(dom); if ( (dom->pgtables > 0) && (xc_dom_alloc_segment(dom, &dom->pgtables_seg, "page tables", 0, dom->pgtables * page_size) != 0) ) goto err; } if ( dom->alloc_bootstack ) dom->bootstack_pfn = xc_dom_alloc_page(dom, "boot stack"); xc_dom_printf("%-20s: virt_alloc_end : 0x%" PRIx64 "\n", __FUNCTION__, dom->virt_alloc_end); xc_dom_printf("%-20s: virt_pgtab_end : 0x%" PRIx64 "\n", __FUNCTION__, dom->virt_pgtab_end); return 0; err: return -1; }
int libxl__arch_domain_construct_memmap(libxl__gc *gc, libxl_domain_config *d_config, uint32_t domid, struct xc_dom_image *dom) { int rc = 0; unsigned int nr = 0, i; /* We always own at least one lowmem entry. */ unsigned int e820_entries = 1; struct e820entry *e820 = NULL; uint64_t highmem_size = dom->highmem_end ? dom->highmem_end - (1ull << 32) : 0; uint32_t lowmem_start = dom->device_model ? GUEST_LOW_MEM_START_DEFAULT : 0; unsigned page_size = XC_DOM_PAGE_SIZE(dom); /* Add all rdm entries. */ for (i = 0; i < d_config->num_rdms; i++) if (d_config->rdms[i].policy != LIBXL_RDM_RESERVE_POLICY_INVALID) e820_entries++; /* If we should have a highmem range. */ if (highmem_size) e820_entries++; for (i = 0; i < MAX_ACPI_MODULES; i++) if (dom->acpi_modules[i].length) e820_entries++; if (e820_entries >= E820MAX) { LOGD(ERROR, domid, "Ooops! Too many entries in the memory map!"); rc = ERROR_INVAL; goto out; } e820 = libxl__malloc(gc, sizeof(struct e820entry) * e820_entries); /* Low memory */ e820[nr].addr = lowmem_start; e820[nr].size = dom->lowmem_end - lowmem_start; e820[nr].type = E820_RAM; nr++; /* RDM mapping */ for (i = 0; i < d_config->num_rdms; i++) { if (d_config->rdms[i].policy == LIBXL_RDM_RESERVE_POLICY_INVALID) continue; e820[nr].addr = d_config->rdms[i].start; e820[nr].size = d_config->rdms[i].size; e820[nr].type = E820_RESERVED; nr++; } for (i = 0; i < MAX_ACPI_MODULES; i++) { if (dom->acpi_modules[i].length) { e820[nr].addr = dom->acpi_modules[i].guest_addr_out & ~(page_size - 1); e820[nr].size = dom->acpi_modules[i].length + (dom->acpi_modules[i].guest_addr_out & (page_size - 1)); e820[nr].type = E820_ACPI; nr++; } } /* High memory */ if (highmem_size) { e820[nr].addr = ((uint64_t)1 << 32); e820[nr].size = highmem_size; e820[nr].type = E820_RAM; } if (xc_domain_set_memory_map(CTX->xch, domid, e820, e820_entries) != 0) { rc = ERROR_FAIL; goto out; } out: return rc; }