static unsigned long __init compute_dom0_nr_pages(void) { unsigned long avail = avail_domheap_pages() + initial_images_nrpages(); /* * If domain 0 allocation isn't specified, reserve 1/16th of available * memory for things like DMA buffers. This reservation is clamped to * a maximum of 128MB. */ if ( dom0_nrpages == 0 ) { dom0_nrpages = avail; dom0_nrpages = min(dom0_nrpages / 16, 128L << (20 - PAGE_SHIFT)); dom0_nrpages = -dom0_nrpages; } /* Negative memory specification means "all memory - specified amount". */ if ( dom0_nrpages < 0 ) dom0_nrpages += avail; if ( dom0_min_nrpages < 0 ) dom0_min_nrpages += avail; if ( dom0_max_nrpages < 0 ) dom0_max_nrpages += avail; /* Clamp dom0 memory according to min/max limits and available memory. */ dom0_nrpages = max(dom0_nrpages, dom0_min_nrpages); dom0_nrpages = min(dom0_nrpages, dom0_max_nrpages); dom0_nrpages = min(dom0_nrpages, (long)avail); return dom0_nrpages; }
unsigned long __init dom0_compute_nr_pages( struct domain *d, struct elf_dom_parms *parms, unsigned long initrd_len) { nodeid_t node; unsigned long avail = 0, nr_pages, min_pages, max_pages; bool_t need_paging; for_each_node_mask ( node, dom0_nodes ) avail += avail_domheap_pages_region(node, 0, 0) + initial_images_nrpages(node); /* Reserve memory for further dom0 vcpu-struct allocations... */ avail -= (d->max_vcpus - 1UL) << get_order_from_bytes(sizeof(struct vcpu)); /* ...and compat_l4's, if needed. */ if ( is_pv_32bit_domain(d) ) avail -= d->max_vcpus - 1; /* Reserve memory for iommu_dom0_init() (rough estimate). */ if ( iommu_enabled ) { unsigned int s; for ( s = 9; s < BITS_PER_LONG; s += 9 ) avail -= max_pdx >> s; } need_paging = is_hvm_domain(d) && (!iommu_hap_pt_share || !paging_mode_hap(d)); for ( ; ; need_paging = 0 ) { nr_pages = dom0_nrpages; min_pages = dom0_min_nrpages; max_pages = dom0_max_nrpages; /* * If allocation isn't specified, reserve 1/16th of available memory * for things like DMA buffers. This reservation is clamped to a * maximum of 128MB. */ if ( nr_pages == 0 ) nr_pages = -min(avail / 16, 128UL << (20 - PAGE_SHIFT)); /* Negative specification means "all memory - specified amount". */ if ( (long)nr_pages < 0 ) nr_pages += avail; if ( (long)min_pages < 0 ) min_pages += avail; if ( (long)max_pages < 0 ) max_pages += avail; /* Clamp according to min/max limits and available memory. */ nr_pages = max(nr_pages, min_pages); nr_pages = min(nr_pages, max_pages); nr_pages = min(nr_pages, avail); if ( !need_paging ) break; /* Reserve memory for shadow or HAP. */ avail -= dom0_paging_pages(d, nr_pages); } if ( is_pv_domain(d) && (parms->p2m_base == UNSET_ADDR) && (dom0_nrpages <= 0) && ((dom0_min_nrpages <= 0) || (nr_pages > min_pages)) ) { /* * Legacy Linux kernels (i.e. such without a XEN_ELFNOTE_INIT_P2M * note) require that there is enough virtual space beyond the initial * allocation to set up their initial page tables. This space is * roughly the same size as the p2m table, so make sure the initial * allocation doesn't consume more than about half the space that's * available between params.virt_base and the address space end. */ unsigned long vstart, vend, end; size_t sizeof_long = is_pv_32bit_domain(d) ? sizeof(int) : sizeof(long); vstart = parms->virt_base; vend = round_pgup(parms->virt_kend); if ( !parms->unmapped_initrd ) vend += round_pgup(initrd_len); end = vend + nr_pages * sizeof_long; if ( end > vstart ) end += end - vstart; if ( end <= vstart || (sizeof_long < sizeof(end) && end > (1UL << (8 * sizeof_long))) ) { end = sizeof_long >= sizeof(end) ? 0 : 1UL << (8 * sizeof_long); nr_pages = (end - vend) / (2 * sizeof_long); if ( dom0_min_nrpages > 0 && nr_pages < min_pages ) nr_pages = min_pages; printk("Dom0 memory clipped to %lu pages\n", nr_pages); } } d->max_pages = min_t(unsigned long, max_pages, UINT_MAX); return nr_pages; }
static unsigned long __init compute_dom0_nr_pages( struct domain *d, struct elf_dom_parms *parms, unsigned long initrd_len) { unsigned long avail = avail_domheap_pages() + initial_images_nrpages(); unsigned long nr_pages = dom0_nrpages; unsigned long min_pages = dom0_min_nrpages; unsigned long max_pages = dom0_max_nrpages; /* Reserve memory for further dom0 vcpu-struct allocations... */ avail -= (opt_dom0_max_vcpus - 1UL) << get_order_from_bytes(sizeof(struct vcpu)); /* ...and compat_l4's, if needed. */ if ( is_pv_32on64_domain(d) ) avail -= opt_dom0_max_vcpus - 1; /* Reserve memory for iommu_dom0_init() (rough estimate). */ if ( iommu_enabled ) { unsigned int s; for ( s = 9; s < BITS_PER_LONG; s += 9 ) avail -= max_pdx >> s; } /* * If domain 0 allocation isn't specified, reserve 1/16th of available * memory for things like DMA buffers. This reservation is clamped to * a maximum of 128MB. */ if ( nr_pages == 0 ) nr_pages = -min(avail / 16, 128UL << (20 - PAGE_SHIFT)); /* Negative memory specification means "all memory - specified amount". */ if ( (long)nr_pages < 0 ) nr_pages += avail; if ( (long)min_pages < 0 ) min_pages += avail; if ( (long)max_pages < 0 ) max_pages += avail; /* Clamp dom0 memory according to min/max limits and available memory. */ nr_pages = max(nr_pages, min_pages); nr_pages = min(nr_pages, max_pages); nr_pages = min(nr_pages, avail); #ifdef __x86_64__ if ( (parms->p2m_base == UNSET_ADDR) && (dom0_nrpages <= 0) && ((dom0_min_nrpages <= 0) || (nr_pages > min_pages)) ) { /* * Legacy Linux kernels (i.e. such without a XEN_ELFNOTE_INIT_P2M * note) require that there is enough virtual space beyond the initial * allocation to set up their initial page tables. This space is * roughly the same size as the p2m table, so make sure the initial * allocation doesn't consume more than about half the space that's * available between params.virt_base and the address space end. */ unsigned long vstart, vend, end; size_t sizeof_long = is_pv_32bit_domain(d) ? sizeof(int) : sizeof(long); vstart = parms->virt_base; vend = round_pgup(parms->virt_kend); if ( !parms->elf_notes[XEN_ELFNOTE_MOD_START_PFN].data.num ) vend += round_pgup(initrd_len); end = vend + nr_pages * sizeof_long; if ( end > vstart ) end += end - vstart; if ( end <= vstart || (sizeof_long < sizeof(end) && end > (1UL << (8 * sizeof_long))) ) { end = sizeof_long >= sizeof(end) ? 0 : 1UL << (8 * sizeof_long); nr_pages = (end - vend) / (2 * sizeof_long); if ( dom0_min_nrpages > 0 && nr_pages < min_pages ) nr_pages = min_pages; printk("Dom0 memory clipped to %lu pages\n", nr_pages); } } #endif d->max_pages = min_t(unsigned long, max_pages, UINT_MAX); return nr_pages; }
if ( (max_pages & (max_pages-1)) != 0 ) order--; while ( (page = alloc_domheap_pages(d, order, 0)) == NULL ) if ( order-- == 0 ) break; return page; } static unsigned long __init compute_dom0_nr_pages( #ifdef __x86_64__ unsigned long vstart, unsigned long vend, size_t sizeof_long) #else void) #endif { unsigned long avail = avail_domheap_pages() + initial_images_nrpages(); unsigned long nr_pages = dom0_nrpages; unsigned long min_pages = dom0_min_nrpages; unsigned long max_pages = dom0_max_nrpages; /* * If domain 0 allocation isn't specified, reserve 1/16th of available * memory for things like DMA buffers. This reservation is clamped to * a maximum of 128MB. */ if ( nr_pages == 0 ) nr_pages = -min(avail / 16, 128UL << (20 - PAGE_SHIFT)); /* Negative memory specification means "all memory - specified amount". */ if ( (long)nr_pages < 0 ) nr_pages += avail; if ( (long)min_pages < 0 ) min_pages += avail;