Beispiel #1
0
/*
 * Initialise allocator, placing addresses [@min,@max] in free pool.
 * @min and @max are PHYSICAL addresses.
 */
static void init_page_allocator(unsigned long min, unsigned long max)
{
    int i;
    unsigned long range, bitmap_size;
    chunk_head_t *ch;
    chunk_tail_t *ct;
    for ( i = 0; i < FREELIST_SIZE; i++ )
    {
        free_head[i]       = &free_tail[i];
        free_tail[i].pprev = &free_head[i];
        free_tail[i].next  = NULL;
    }

    min = round_pgup  (min);
    max = round_pgdown(max);

    /* Allocate space for the allocation bitmap. */
    bitmap_size  = (max+1) >> (PAGE_SHIFT+3);
    bitmap_size  = round_pgup(bitmap_size);
    alloc_bitmap = (unsigned long *)to_virt(min);
    min         += bitmap_size;
    range        = max - min;

    /* All allocated by default. */
    memset(alloc_bitmap, ~0, bitmap_size);
    /* Free up the memory we've been given to play with. */
    map_free(PHYS_PFN(min), range>>PAGE_SHIFT);

    /* The buddy lists are addressed in high memory. */
    min = (unsigned long) to_virt(min);
    max = (unsigned long) to_virt(max);

    while ( range != 0 )
    {
        /*
         * Next chunk is limited by alignment of min, but also
         * must not be bigger than remaining range.
         */
        for ( i = PAGE_SHIFT; (1UL<<(i+1)) <= range; i++ )
            if ( min & (1UL<<i) ) break;


        ch = (chunk_head_t *)min;
        min   += (1UL<<i);
        range -= (1UL<<i);
        ct = (chunk_tail_t *)min-1;
        i -= PAGE_SHIFT;
        ch->level       = i;
        ch->next        = free_head[i];
        ch->pprev       = &free_head[i];
        ch->next->pprev = &ch->next;
        free_head[i]    = ch;
        ct->level       = i;
    }
}
Beispiel #2
0
static void init_page_allocator(unsigned long min, unsigned long max)
{
    mm.min_phys = round_pgup(min);
    mm.max_phys = round_pgdown(max);

    mm.bitmap_size = (mm.max_phys + 1) >> (PAGE_SHIFT + 3);
    mm.bitmap_size = round_pgup(mm.bitmap_size);
    mm.bitmap = (uint64_t *)to_virt(mm.min_phys);
    mm.min_phys += mm.bitmap_size;
    memset(mm.bitmap, ~0, mm.bitmap_size);

    mm.num_pages = (mm.max_phys - mm.min_phys) >> PAGE_SHIFT;
    bitmap_free(PHYS_PFN(mm.min_phys), mm.num_pages);

    printk("go_mm: page allocator manages %lu free pages\n", mm.num_pages);
}
Beispiel #3
0
#endif
    }

    domain_set_alloc_bitsize(d);

    /*
     * Why do we need this? The number of page-table frames depends on the 
     * size of the bootstrap address space. But the size of the address space 
     * depends on the number of page-table frames (since each one is mapped 
     * read-only). We have a pair of simultaneous equations in two unknowns, 
     * which we solve by exhaustive search.
     */
    v_start          = parms.virt_base;
    vkern_start      = parms.virt_kstart;
    vkern_end        = parms.virt_kend;
    vinitrd_start    = round_pgup(vkern_end);
    vinitrd_end      = vinitrd_start + initrd_len;
    vphysmap_start   = round_pgup(vinitrd_end);
    vphysmap_end     = vphysmap_start + (nr_pages * (!is_pv_32on64_domain(d) ?
                                                     sizeof(unsigned long) :
                                                     sizeof(unsigned int)));
    vstartinfo_start = round_pgup(vphysmap_end);
    vstartinfo_end   = (vstartinfo_start +
                        sizeof(struct start_info) +
                        sizeof(struct dom0_vga_console_info));
    vpt_start        = round_pgup(vstartinfo_end);
    for ( nr_pt_pages = 2; ; nr_pt_pages++ )
    {
        vpt_end          = vpt_start + (nr_pt_pages * PAGE_SIZE);
        vstack_start     = vpt_end;
        vstack_end       = vstack_start + PAGE_SIZE;
Beispiel #4
0
static unsigned long __init compute_dom0_nr_pages(
    struct domain *d, struct elf_dom_parms *parms, unsigned long initrd_len)
{
    unsigned long avail = avail_domheap_pages() + initial_images_nrpages();
    unsigned long nr_pages = dom0_nrpages;
    unsigned long min_pages = dom0_min_nrpages;
    unsigned long max_pages = dom0_max_nrpages;

    /* Reserve memory for further dom0 vcpu-struct allocations... */
    avail -= (opt_dom0_max_vcpus - 1UL)
             << get_order_from_bytes(sizeof(struct vcpu));
    /* ...and compat_l4's, if needed. */
    if ( is_pv_32on64_domain(d) )
        avail -= opt_dom0_max_vcpus - 1;

    /* Reserve memory for iommu_dom0_init() (rough estimate). */
    if ( iommu_enabled )
    {
        unsigned int s;

        for ( s = 9; s < BITS_PER_LONG; s += 9 )
            avail -= max_pdx >> s;
    }

    /*
     * If domain 0 allocation isn't specified, reserve 1/16th of available
     * memory for things like DMA buffers. This reservation is clamped to 
     * a maximum of 128MB.
     */
    if ( nr_pages == 0 )
        nr_pages = -min(avail / 16, 128UL << (20 - PAGE_SHIFT));

    /* Negative memory specification means "all memory - specified amount". */
    if ( (long)nr_pages  < 0 ) nr_pages  += avail;
    if ( (long)min_pages < 0 ) min_pages += avail;
    if ( (long)max_pages < 0 ) max_pages += avail;

    /* Clamp dom0 memory according to min/max limits and available memory. */
    nr_pages = max(nr_pages, min_pages);
    nr_pages = min(nr_pages, max_pages);
    nr_pages = min(nr_pages, avail);

#ifdef __x86_64__
    if ( (parms->p2m_base == UNSET_ADDR) && (dom0_nrpages <= 0) &&
         ((dom0_min_nrpages <= 0) || (nr_pages > min_pages)) )
    {
        /*
         * Legacy Linux kernels (i.e. such without a XEN_ELFNOTE_INIT_P2M
         * note) require that there is enough virtual space beyond the initial
         * allocation to set up their initial page tables. This space is
         * roughly the same size as the p2m table, so make sure the initial
         * allocation doesn't consume more than about half the space that's
         * available between params.virt_base and the address space end.
         */
        unsigned long vstart, vend, end;
        size_t sizeof_long = is_pv_32bit_domain(d) ? sizeof(int) : sizeof(long);

        vstart = parms->virt_base;
        vend = round_pgup(parms->virt_kend);
        if ( !parms->elf_notes[XEN_ELFNOTE_MOD_START_PFN].data.num )
            vend += round_pgup(initrd_len);
        end = vend + nr_pages * sizeof_long;

        if ( end > vstart )
            end += end - vstart;
        if ( end <= vstart ||
             (sizeof_long < sizeof(end) && end > (1UL << (8 * sizeof_long))) )
        {
            end = sizeof_long >= sizeof(end) ? 0 : 1UL << (8 * sizeof_long);
            nr_pages = (end - vend) / (2 * sizeof_long);
            if ( dom0_min_nrpages > 0 && nr_pages < min_pages )
                nr_pages = min_pages;
            printk("Dom0 memory clipped to %lu pages\n", nr_pages);
        }
    }
#endif

    d->max_pages = min_t(unsigned long, max_pages, UINT_MAX);

    return nr_pages;
}
Beispiel #5
0
int construct_guest_dom(struct domain *d,
			unsigned long guest_size,
			unsigned long image_start, unsigned long image_size,
			unsigned long initrd_start, unsigned long initrd_size,
			char *cmdline)
{
	char    *p = NULL;
	int     i;
	int     rc;

	unsigned long nr_pages;
	unsigned long nr_pt_pages;
	unsigned long map_track;
	unsigned long phys_offset;

	struct page_info *page = NULL; 
	struct start_info *si  = NULL;
	struct domain_setup_info dsi;
	struct vcpu *v         = NULL;

	uint32_t domain_features_supported[XENFEAT_NR_SUBMAPS] = { 0 };
	uint32_t domain_features_required[XENFEAT_NR_SUBMAPS] = { 0 };

	BUG_ON(d == NULL);

	BUG_ON(d->domain_id <= 0);
	BUG_ON(d->vcpu[0] == NULL);

	v = d->vcpu[0];

	printk("Image Start = 0x%x\n", image_start);

	/* Guest partition should be aligned to 1MB boundary */
	ASSERT((guest_size & 0xFFFFF) == 0);

	BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));

	write_ptbase(idle_domain->vcpu[0]);

	memset(&dsi, 0, sizeof(struct domain_setup_info));

	dsi.image_addr = image_start;
	dsi.image_len  = image_size;

	printk("*** LOADING DOMAIN : %d ***\n", (int)d->domain_id);

	d->max_pages = ~0U;

	rc = parseelfimage(&dsi);
	if (rc != 0) {
        local_irq_enable();
		return rc;
	}


	if (dsi.xen_section_string == NULL) {
		printk("Not a Xen-ELF image: '__xen_guest' section not found.\n");
        local_irq_enable();
		return -EINVAL;
	}

	if ((p = strstr(dsi.xen_section_string, "FEATURES=")) != NULL) {
		parse_features(p + strlen("FEATURES="),
			domain_features_supported,
			domain_features_required);

		printk("Guest kernel supports features = { %08x }.\n",
			domain_features_supported[0]);
		printk("Guest kernel requires features = { %08x }.\n",
                        domain_features_required[0]);

		if (domain_features_required[0]) {
			printk("Guest kernel requires an unsupported hypervisor feature.\n");
			local_irq_enable();
			return -EINVAL;
		}
	}

	page = (struct page_info *) pages_u_alloc(d,
                                             get_order_from_bytes(guest_size),
                                             ~ALLOC_DOM_DMA);
	if (page == NULL) {
		printk("Not enough RAM for domain %d allocation.\n", d->domain_id);
		return -ENOMEM;
	}

	dsi.p_start = page_to_phys(page);
	dsi.p_end   = dsi.p_start + guest_size;
	printk("Guest physical: 0x%x-0x%x\n", dsi.p_start, dsi.p_end);

	dsi.v_start &= (~(0xFFFFF));

	nr_pt_pages = build_guest_tables(v, &dsi);

	write_ptbase(current);

	rc = inspect_guest_tables(v);
	if(!rc) {
		panic("Wrong guest table found\n");
	}

	nr_pages = guest_size >> PAGE_SHIFT;

	if (d->tot_pages < nr_pages)
		printk(" (%lu pages to be allocated)", nr_pages - d->tot_pages);

	for (i = 0; i < MAX_VIRT_CPUS; i++)
		d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;

	for (i = 1; i < num_online_cpus(); i++)
		(void)alloc_vcpu(d, i, i);

	write_ptbase(v);

	phys_offset = v->arch.guest_pstart - v->arch.guest_vstart;
	dsi.image_addr -= phys_offset;

	/* Copy the OS image and free temporary buffer. */
	(void)loadelfimage(&dsi);

	map_track = round_pgup((unsigned long)(v->arch.guest_vtable) + (PAGE_SIZE * nr_pt_pages));
	si = (start_info_t *)map_track;
	memset(si, 0, PAGE_SIZE);

	si->nr_pages	 = nr_pages;
#if 0
	si->shared_info  = virt_to_phys(d->shared_info);
#endif
	si->shared_info  = d->shared_info;
	si->flags        = 0;
	si->pt_base      = (unsigned long)v->arch.guest_vtable;
	si->nr_pt_frames = nr_pt_pages;
	si->mfn_list     = NULL;
	si->min_mfn      = dsi.p_start >> PAGE_SHIFT;

	map_track += PAGE_SIZE;

	if (initrd_size != 0) {
		si->mod_start = map_track;
		si->mod_len = initrd_size;

		printk("Initrd len 0x%lx, start at 0x%lx\n", si->mod_len, si->mod_start);

		memcpy((void *)map_track, (const void *)(initrd_start - phys_offset), initrd_size);

		map_track = round_pgup(map_track + initrd_size);
	}

	memset(map_track, 0, (PAGE_SIZE * 2));

	si->store_mfn = (map_track + phys_offset) >> PAGE_SHIFT;
	si->store_evtchn = d->store_port;
	
	map_track += PAGE_SIZE;

	si->console_mfn = (map_track + phys_offset) >> PAGE_SHIFT;
	si->console_evtchn = d->console_port;

	map_track += PAGE_SIZE;

	d->console_mfn = si->console_mfn;
	d->store_mfn = si->store_mfn;

	memset(si->cmd_line, 0, sizeof(si->cmd_line));
	if (cmdline != NULL)
		strncpy((char *)si->cmd_line, cmdline, sizeof(si->cmd_line)-1);

#if 0
	/* setup shared info table which is specified each domain */
	rc = setup_shared_info_mapping(d, NULL);

	if (rc != 0) {
		return rc;
	}
#endif
	write_ptbase(current);

	//init_domain_time(d);

	set_bit(_VCPUF_initialised, &v->vcpu_flags);

	new_thread(v, dsi.v_kernentry, map_track + PAGE_SIZE, (unsigned long)si);

	i = 0;

	BUG_ON(i != 0);

	return 0;
}
Beispiel #6
0
unsigned long __init dom0_compute_nr_pages(
    struct domain *d, struct elf_dom_parms *parms, unsigned long initrd_len)
{
    nodeid_t node;
    unsigned long avail = 0, nr_pages, min_pages, max_pages;
    bool_t need_paging;

    for_each_node_mask ( node, dom0_nodes )
        avail += avail_domheap_pages_region(node, 0, 0) +
                 initial_images_nrpages(node);

    /* Reserve memory for further dom0 vcpu-struct allocations... */
    avail -= (d->max_vcpus - 1UL)
             << get_order_from_bytes(sizeof(struct vcpu));
    /* ...and compat_l4's, if needed. */
    if ( is_pv_32bit_domain(d) )
        avail -= d->max_vcpus - 1;

    /* Reserve memory for iommu_dom0_init() (rough estimate). */
    if ( iommu_enabled )
    {
        unsigned int s;

        for ( s = 9; s < BITS_PER_LONG; s += 9 )
            avail -= max_pdx >> s;
    }

    need_paging = is_hvm_domain(d) &&
        (!iommu_hap_pt_share || !paging_mode_hap(d));
    for ( ; ; need_paging = 0 )
    {
        nr_pages = dom0_nrpages;
        min_pages = dom0_min_nrpages;
        max_pages = dom0_max_nrpages;

        /*
         * If allocation isn't specified, reserve 1/16th of available memory
         * for things like DMA buffers. This reservation is clamped to a
         * maximum of 128MB.
         */
        if ( nr_pages == 0 )
            nr_pages = -min(avail / 16, 128UL << (20 - PAGE_SHIFT));

        /* Negative specification means "all memory - specified amount". */
        if ( (long)nr_pages  < 0 ) nr_pages  += avail;
        if ( (long)min_pages < 0 ) min_pages += avail;
        if ( (long)max_pages < 0 ) max_pages += avail;

        /* Clamp according to min/max limits and available memory. */
        nr_pages = max(nr_pages, min_pages);
        nr_pages = min(nr_pages, max_pages);
        nr_pages = min(nr_pages, avail);

        if ( !need_paging )
            break;

        /* Reserve memory for shadow or HAP. */
        avail -= dom0_paging_pages(d, nr_pages);
    }

    if ( is_pv_domain(d) &&
         (parms->p2m_base == UNSET_ADDR) && (dom0_nrpages <= 0) &&
         ((dom0_min_nrpages <= 0) || (nr_pages > min_pages)) )
    {
        /*
         * Legacy Linux kernels (i.e. such without a XEN_ELFNOTE_INIT_P2M
         * note) require that there is enough virtual space beyond the initial
         * allocation to set up their initial page tables. This space is
         * roughly the same size as the p2m table, so make sure the initial
         * allocation doesn't consume more than about half the space that's
         * available between params.virt_base and the address space end.
         */
        unsigned long vstart, vend, end;
        size_t sizeof_long = is_pv_32bit_domain(d) ? sizeof(int) : sizeof(long);

        vstart = parms->virt_base;
        vend = round_pgup(parms->virt_kend);
        if ( !parms->unmapped_initrd )
            vend += round_pgup(initrd_len);
        end = vend + nr_pages * sizeof_long;

        if ( end > vstart )
            end += end - vstart;
        if ( end <= vstart ||
             (sizeof_long < sizeof(end) && end > (1UL << (8 * sizeof_long))) )
        {
            end = sizeof_long >= sizeof(end) ? 0 : 1UL << (8 * sizeof_long);
            nr_pages = (end - vend) / (2 * sizeof_long);
            if ( dom0_min_nrpages > 0 && nr_pages < min_pages )
                nr_pages = min_pages;
            printk("Dom0 memory clipped to %lu pages\n", nr_pages);
        }
    }

    d->max_pages = min_t(unsigned long, max_pages, UINT_MAX);

    return nr_pages;
}