Exemplo n.º 1
0
Arquivo: vdso.c Projeto: Artox/linux
static int __init vdso_init(void)
{
	int i;
	struct page **vdso_pagelist;

	if (memcmp(&vdso_start, "\177ELF", 4)) {
		pr_err("vDSO is not a valid ELF object!\n");
		return -EINVAL;
	}

	vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
	pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
		vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);

	/* Allocate the vDSO pagelist, plus a page for the data. */
	vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
				GFP_KERNEL);
	if (vdso_pagelist == NULL)
		return -ENOMEM;

	/* Grab the vDSO data page. */
	vdso_pagelist[0] = pfn_to_page(PHYS_PFN(__pa(vdso_data)));

	/* Grab the vDSO code pages. */
	for (i = 0; i < vdso_pages; i++)
		vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(&vdso_start)) + i);

	vdso_spec[0].pages = &vdso_pagelist[0];
	vdso_spec[1].pages = &vdso_pagelist[1];

	return 0;
}
Exemplo n.º 2
0
static unsigned long init_altmap_reserve(resource_size_t base)
{
	unsigned long reserve = PHYS_PFN(SZ_8K);
	unsigned long base_pfn = PHYS_PFN(base);

	reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
	return reserve;
}
Exemplo n.º 3
0
void __ref vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
{
	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
	unsigned long page_order = get_order(page_size);
	unsigned long alt_start = ~0, alt_end = ~0;
	unsigned long base_pfn;

	start = _ALIGN_DOWN(start, page_size);
	if (altmap) {
		alt_start = altmap->base_pfn;
		alt_end = altmap->base_pfn + altmap->reserve +
			  altmap->free + altmap->alloc + altmap->align;
	}

	pr_debug("vmemmap_free %lx...%lx\n", start, end);

	for (; start < end; start += page_size) {
		unsigned long nr_pages, addr;
		struct page *section_base;
		struct page *page;

		/*
		 * the section has already be marked as invalid, so
		 * vmemmap_populated() true means some other sections still
		 * in this page, so skip it.
		 */
		if (vmemmap_populated(start, page_size))
			continue;

		addr = vmemmap_list_free(start);
		if (!addr)
			continue;

		page = pfn_to_page(addr >> PAGE_SHIFT);
		section_base = pfn_to_page(vmemmap_section_start(start));
		nr_pages = 1 << page_order;
		base_pfn = PHYS_PFN(addr);

		if (base_pfn >= alt_start && base_pfn < alt_end) {
			vmem_altmap_free(altmap, nr_pages);
		} else if (PageReserved(page)) {
			/* allocated from bootmem */
			if (page_size < PAGE_SIZE) {
				/*
				 * this shouldn't happen, but if it is
				 * the case, leave the memory there
				 */
				WARN_ON_ONCE(1);
			} else {
				while (nr_pages--)
					free_reserved_page(page++);
			}
		} else {
			free_pages((unsigned long)(__va(addr)), page_order);
		}

		vmemmap_remove_mapping(start, page_size);
	}
}
Exemplo n.º 4
0
static void *try_ram_remap(resource_size_t offset, size_t size)
{
	unsigned long pfn = PHYS_PFN(offset);

	/* In the simple case just return the existing linear address */
	if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
		return __va(offset);
	return NULL; /* fallback to arch_memremap_wb */
}
Exemplo n.º 5
0
/*
 * Initialise allocator, placing addresses [@min,@max] in free pool.
 * @min and @max are PHYSICAL addresses.
 */
static void init_page_allocator(unsigned long min, unsigned long max)
{
    int i;
    unsigned long range, bitmap_size;
    chunk_head_t *ch;
    chunk_tail_t *ct;
    for ( i = 0; i < FREELIST_SIZE; i++ )
    {
        free_head[i]       = &free_tail[i];
        free_tail[i].pprev = &free_head[i];
        free_tail[i].next  = NULL;
    }

    min = round_pgup  (min);
    max = round_pgdown(max);

    /* Allocate space for the allocation bitmap. */
    bitmap_size  = (max+1) >> (PAGE_SHIFT+3);
    bitmap_size  = round_pgup(bitmap_size);
    alloc_bitmap = (unsigned long *)to_virt(min);
    min         += bitmap_size;
    range        = max - min;

    /* All allocated by default. */
    memset(alloc_bitmap, ~0, bitmap_size);
    /* Free up the memory we've been given to play with. */
    map_free(PHYS_PFN(min), range>>PAGE_SHIFT);

    /* The buddy lists are addressed in high memory. */
    min = (unsigned long) to_virt(min);
    max = (unsigned long) to_virt(max);

    while ( range != 0 )
    {
        /*
         * Next chunk is limited by alignment of min, but also
         * must not be bigger than remaining range.
         */
        for ( i = PAGE_SHIFT; (1UL<<(i+1)) <= range; i++ )
            if ( min & (1UL<<i) ) break;


        ch = (chunk_head_t *)min;
        min   += (1UL<<i);
        range -= (1UL<<i);
        ct = (chunk_tail_t *)min-1;
        i -= PAGE_SHIFT;
        ch->level       = i;
        ch->next        = free_head[i];
        ch->pprev       = &free_head[i];
        ch->next->pprev = &ch->next;
        free_head[i]    = ch;
        ct->level       = i;
    }
}
Exemplo n.º 6
0
static void hwpoison_clear(struct pmem_device *pmem,
		phys_addr_t phys, unsigned int len)
{
	unsigned long pfn_start, pfn_end, pfn;

	/* only pmem in the linear map supports HWPoison */
	if (is_vmalloc_addr(pmem->virt_addr))
		return;

	pfn_start = PHYS_PFN(phys);
	pfn_end = pfn_start + PHYS_PFN(len);
	for (pfn = pfn_start; pfn < pfn_end; pfn++) {
		struct page *page = pfn_to_page(pfn);

		/*
		 * Note, no need to hold a get_dev_pagemap() reference
		 * here since we're in the driver I/O path and
		 * outstanding I/O requests pin the dev_pagemap.
		 */
		if (test_and_clear_pmem_poison(page))
			clear_mce_nospec(pfn);
	}
}
Exemplo n.º 7
0
/* Allocate 2^@order contiguous pages. Returns a VIRTUAL address. */
unsigned long alloc_pages(int order)
{
    int i;
    chunk_head_t *alloc_ch, *spare_ch;
    chunk_tail_t            *spare_ct;


    /* Find smallest order which can satisfy the request. */
    for ( i = order; i < FREELIST_SIZE; i++ ) {
    if ( !FREELIST_EMPTY(free_head[i]) ) 
        break;
    }

    if ( i == FREELIST_SIZE ) goto no_memory;
 
    /* Unlink a chunk. */
    alloc_ch = free_head[i];
    free_head[i] = alloc_ch->next;
    alloc_ch->next->pprev = alloc_ch->pprev;

    /* We may have to break the chunk a number of times. */
    while ( i != order )
    {
        /* Split into two equal parts. */
        i--;
        spare_ch = (chunk_head_t *)((char *)alloc_ch + (1UL<<(i+PAGE_SHIFT)));
        spare_ct = (chunk_tail_t *)((char *)spare_ch + (1UL<<(i+PAGE_SHIFT)))-1;

        /* Create new header for spare chunk. */
        spare_ch->level = i;
        spare_ch->next  = free_head[i];
        spare_ch->pprev = &free_head[i];
        spare_ct->level = i;

        /* Link in the spare chunk. */
        spare_ch->next->pprev = &spare_ch->next;
        free_head[i] = spare_ch;
    }
    
    map_alloc(PHYS_PFN(to_phys(alloc_ch)), 1UL<<order);

    return((unsigned long)alloc_ch);

 no_memory:

    printk("Cannot handle page request order %d!\n", order);

    return 0;
}
Exemplo n.º 8
0
Arquivo: fadump.c Projeto: VizXu/linux
/*
 * Returns 1, if there are no holes in boot memory area,
 * 0 otherwise.
 */
static int is_boot_memory_area_contiguous(void)
{
	struct memblock_region *reg;
	unsigned long tstart, tend;
	unsigned long start_pfn = PHYS_PFN(RMA_START);
	unsigned long end_pfn = PHYS_PFN(RMA_START + fw_dump.boot_memory_size);
	unsigned int ret = 0;

	for_each_memblock(memory, reg) {
		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
		if (tstart < tend) {
			/* Memory hole from start_pfn to tstart */
			if (tstart > start_pfn)
				break;

			if (tend == end_pfn) {
				ret = 1;
				break;
			}

			start_pfn = tend + 1;
		}
	}
Exemplo n.º 9
0
static void init_page_allocator(unsigned long min, unsigned long max)
{
    mm.min_phys = round_pgup(min);
    mm.max_phys = round_pgdown(max);

    mm.bitmap_size = (mm.max_phys + 1) >> (PAGE_SHIFT + 3);
    mm.bitmap_size = round_pgup(mm.bitmap_size);
    mm.bitmap = (uint64_t *)to_virt(mm.min_phys);
    mm.min_phys += mm.bitmap_size;
    memset(mm.bitmap, ~0, mm.bitmap_size);

    mm.num_pages = (mm.max_phys - mm.min_phys) >> PAGE_SHIFT;
    bitmap_free(PHYS_PFN(mm.min_phys), mm.num_pages);

    printk("go_mm: page allocator manages %lu free pages\n", mm.num_pages);
}
Exemplo n.º 10
0
int kexec_allocate(struct xc_dom_image *dom, xen_vaddr_t up_to)
{
    unsigned long new_allocated = (up_to - dom->parms.virt_base) / PAGE_SIZE;
    unsigned long i;

    pages = realloc(pages, new_allocated * sizeof(*pages));
    pages_mfns = realloc(pages_mfns, new_allocated * sizeof(*pages_mfns));
    pages_moved2pfns = realloc(pages_moved2pfns, new_allocated * sizeof(*pages_moved2pfns));
    for (i = allocated; i < new_allocated; i++) {
        /* Exchange old page of PFN i with a newly allocated page.  */
        xen_pfn_t old_mfn = dom->p2m_host[i];
        xen_pfn_t new_pfn;
        xen_pfn_t new_mfn;

        pages[i] = alloc_page();
        memset((void*) pages[i], 0, PAGE_SIZE);
        new_pfn = PHYS_PFN(to_phys(pages[i]));
        pages_mfns[i] = new_mfn = pfn_to_mfn(new_pfn);

	/*
	 * If PFN of newly allocated page (new_pfn) is less then currently
	 * requested PFN (i) then look for relevant PFN/MFN pair. In this
	 * situation dom->p2m_host[new_pfn] no longer contains proper MFN
	 * because original page with new_pfn was moved earlier
	 * to different location.
	 */
	for (; new_pfn < i; new_pfn = pages_moved2pfns[new_pfn]);

	/* Store destination PFN of currently requested page. */
	pages_moved2pfns[i] = new_pfn;

        /* Put old page at new PFN */
        dom->p2m_host[new_pfn] = old_mfn;

        /* Put new page at PFN i */
        dom->p2m_host[i] = new_mfn;
    }

    allocated = new_allocated;

    return 0;
}
Exemplo n.º 11
0
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, unsigned long attrs)
{
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page *page;

	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
		page = vaddr;
	} else if (platform_vaddr_uncached(vaddr)) {
		page = virt_to_page(platform_vaddr_to_cached(vaddr));
	} else {
#ifdef CONFIG_MMU
		dma_common_free_remap(vaddr, size, VM_MAP);
#endif
		page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
	}

	if (!dma_release_from_contiguous(dev, page, count))
		__free_pages(page, get_order(size));
}
Exemplo n.º 12
0
static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
		struct resource *res, struct vmem_altmap *altmap)
{
	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
	u64 offset = le64_to_cpu(pfn_sb->dataoff);
	u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
	u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
	struct nd_namespace_common *ndns = nd_pfn->ndns;
	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
	resource_size_t base = nsio->res.start + start_pad;
	struct vmem_altmap __altmap = {
		.base_pfn = init_altmap_base(base),
		.reserve = init_altmap_reserve(base),
	};

	memcpy(res, &nsio->res, sizeof(*res));
	res->start += start_pad;
	res->end -= end_trunc;

	if (nd_pfn->mode == PFN_MODE_RAM) {
		if (offset < SZ_8K)
			return ERR_PTR(-EINVAL);
		nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
		altmap = NULL;
	} else if (nd_pfn->mode == PFN_MODE_PMEM) {
		nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
					- offset) / PAGE_SIZE);
		if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
			dev_info(&nd_pfn->dev,
					"number of pfns truncated from %lld to %ld\n",
					le64_to_cpu(nd_pfn->pfn_sb->npfns),
					nd_pfn->npfns);
		memcpy(altmap, &__altmap, sizeof(*altmap));
		altmap->free = PHYS_PFN(offset - SZ_8K);
		altmap->alloc = 0;
	} else
		return ERR_PTR(-ENXIO);

	return altmap;
}
Exemplo n.º 13
0
static void devm_memremap_pages_release(struct device *dev, void *data)
{
	struct page_map *page_map = data;
	struct resource *res = &page_map->res;
	resource_size_t align_start, align_size;
	struct dev_pagemap *pgmap = &page_map->pgmap;

	if (percpu_ref_tryget_live(pgmap->ref)) {
		dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
		percpu_ref_put(pgmap->ref);
	}

	/* pages are dead and unused, undo the arch mapping */
	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(resource_size(res), SECTION_SIZE);
	mem_hotplug_begin();
	arch_remove_memory(align_start, align_size);
	mem_hotplug_done();
	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
	pgmap_radix_release(res);
	dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
			"%s: failed to free all reserved pages\n", __func__);
}
Exemplo n.º 14
0
static int handle_cow(unsigned long addr) {
        pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
	unsigned long new_page;
	int rc;

        page = tab[l4_table_offset(addr)];
	if (!(page & _PAGE_PRESENT))
	    return 0;
        tab = pte_to_virt(page);

        page = tab[l3_table_offset(addr)];
	if (!(page & _PAGE_PRESENT))
	    return 0;
        tab = pte_to_virt(page);

        page = tab[l2_table_offset(addr)];
	if (!(page & _PAGE_PRESENT))
	    return 0;
        tab = pte_to_virt(page);
        
        page = tab[l1_table_offset(addr)];
	if (!(page & _PAGE_PRESENT))
	    return 0;
	/* Only support CoW for the zero page.  */
	if (PHYS_PFN(page) != mfn_zero)
	    return 0;

	new_page = alloc_pages(0);
	memset((void*) new_page, 0, PAGE_SIZE);

	rc = HYPERVISOR_update_va_mapping(addr & PAGE_MASK, __pte(virt_to_mach(new_page) | L1_PROT), UVMF_INVLPG);
	if (!rc)
		return 1;

	printk("Map zero page to %lx failed: %d.\n", addr, rc);
	return 0;
}
Exemplo n.º 15
0
/**
 * devm_memremap_pages - remap and provide memmap backing for the given resource
 * @dev: hosting device for @res
 * @res: "host memory" address range
 * @ref: a live per-cpu reference count
 * @altmap: optional descriptor for allocating the memmap from @res
 *
 * Notes:
 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
 *    (or devm release event).
 *
 * 2/ @res is expected to be a host memory range that could feasibly be
 *    treated as a "System RAM" range, i.e. not a device mmio range, but
 *    this is not enforced.
 */
void *devm_memremap_pages(struct device *dev, struct resource *res,
		struct percpu_ref *ref, struct vmem_altmap *altmap)
{
	resource_size_t key, align_start, align_size, align_end;
	pgprot_t pgprot = PAGE_KERNEL;
	struct dev_pagemap *pgmap;
	struct page_map *page_map;
	int error, nid, is_ram;
	unsigned long pfn;

	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
		- align_start;
	is_ram = region_intersects(align_start, align_size,
		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);

	if (is_ram == REGION_MIXED) {
		WARN_ONCE(1, "%s attempted on mixed region %pr\n",
				__func__, res);
		return ERR_PTR(-ENXIO);
	}

	if (is_ram == REGION_INTERSECTS)
		return __va(res->start);

	if (!ref)
		return ERR_PTR(-EINVAL);

	page_map = devres_alloc_node(devm_memremap_pages_release,
			sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
	if (!page_map)
		return ERR_PTR(-ENOMEM);
	pgmap = &page_map->pgmap;

	memcpy(&page_map->res, res, sizeof(*res));

	pgmap->dev = dev;
	if (altmap) {
		memcpy(&page_map->altmap, altmap, sizeof(*altmap));
		pgmap->altmap = &page_map->altmap;
	}
	pgmap->ref = ref;
	pgmap->res = &page_map->res;

	mutex_lock(&pgmap_lock);
	error = 0;
	align_end = align_start + align_size - 1;
	for (key = align_start; key <= align_end; key += SECTION_SIZE) {
		struct dev_pagemap *dup;

		rcu_read_lock();
		dup = find_dev_pagemap(key);
		rcu_read_unlock();
		if (dup) {
			dev_err(dev, "%s: %pr collides with mapping for %s\n",
					__func__, res, dev_name(dup->dev));
			error = -EBUSY;
			break;
		}
		error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
				page_map);
		if (error) {
			dev_err(dev, "%s: failed: %d\n", __func__, error);
			break;
		}
	}
	mutex_unlock(&pgmap_lock);
	if (error)
		goto err_radix;

	nid = dev_to_node(dev);
	if (nid < 0)
		nid = numa_mem_id();

	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
			align_size);
	if (error)
		goto err_pfn_remap;

	mem_hotplug_begin();
	error = arch_add_memory(nid, align_start, align_size, true);
	mem_hotplug_done();
	if (error)
		goto err_add_memory;

	for_each_device_pfn(pfn, page_map) {
		struct page *page = pfn_to_page(pfn);

		/*
		 * ZONE_DEVICE pages union ->lru with a ->pgmap back
		 * pointer.  It is a bug if a ZONE_DEVICE page is ever
		 * freed or placed on a driver-private list.  Seed the
		 * storage with LIST_POISON* values.
		 */
		list_del(&page->lru);
		page->pgmap = pgmap;
	}
	devres_add(dev, page_map);
	return __va(res->start);

 err_add_memory:
	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
 err_pfn_remap:
 err_radix:
	pgmap_radix_release(res);
	devres_free(page_map);
	return ERR_PTR(error);
}
Exemplo n.º 16
0
void kexec(void *kernel, long kernel_size, void *module, long module_size, char *cmdline, unsigned long flags)
{
    struct xc_dom_image *dom;
    int rc;
    domid_t domid = DOMID_SELF;
    xen_pfn_t pfn;
    xc_interface *xc_handle;
    unsigned long i;
    void *seg;
    xen_pfn_t boot_page_mfn = virt_to_mfn(&_boot_page);
    char features[] = "";
    struct mmu_update *m2p_updates;
    unsigned long nr_m2p_updates;

    DEBUG("booting with cmdline %s\n", cmdline);
    xc_handle = xc_interface_open(0,0,0);

    dom = xc_dom_allocate(xc_handle, cmdline, features);
    dom->allocate = kexec_allocate;

    /* We are using guest owned memory, therefore no limits. */
    xc_dom_kernel_max_size(dom, 0);
    xc_dom_ramdisk_max_size(dom, 0);

    dom->kernel_blob = kernel;
    dom->kernel_size = kernel_size;

    dom->ramdisk_blob = module;
    dom->ramdisk_size = module_size;

    dom->flags = flags;
    dom->console_evtchn = start_info.console.domU.evtchn;
    dom->xenstore_evtchn = start_info.store_evtchn;

    tpm_hash2pcr(dom, cmdline);

    if ( (rc = xc_dom_boot_xen_init(dom, xc_handle, domid)) != 0 ) {
        grub_printf("xc_dom_boot_xen_init returned %d\n", rc);
        errnum = ERR_BOOT_FAILURE;
        goto out;
    }
    if ( (rc = xc_dom_parse_image(dom)) != 0 ) {
        grub_printf("xc_dom_parse_image returned %d\n", rc);
        errnum = ERR_BOOT_FAILURE;
        goto out;
    }

#ifdef __i386__
    if (strcmp(dom->guest_type, "xen-3.0-x86_32p")) {
        grub_printf("can only boot x86 32 PAE kernels, not %s\n", dom->guest_type);
        errnum = ERR_EXEC_FORMAT;
        goto out;
    }
#endif
#ifdef __x86_64__
    if (strcmp(dom->guest_type, "xen-3.0-x86_64")) {
        grub_printf("can only boot x86 64 kernels, not %s\n", dom->guest_type);
        errnum = ERR_EXEC_FORMAT;
        goto out;
    }
#endif

    /* equivalent of xc_dom_mem_init */
    dom->arch_hooks = xc_dom_find_arch_hooks(xc_handle, dom->guest_type);
    dom->total_pages = start_info.nr_pages;

    /* equivalent of arch_setup_meminit */

    /* setup initial p2m */
    dom->p2m_host = malloc(sizeof(*dom->p2m_host) * dom->total_pages);

    /* Start with our current P2M */
    for (i = 0; i < dom->total_pages; i++)
        dom->p2m_host[i] = pfn_to_mfn(i);

    if ( (rc = xc_dom_build_image(dom)) != 0 ) {
        grub_printf("xc_dom_build_image returned %d\n", rc);
        errnum = ERR_BOOT_FAILURE;
        goto out;
    }

    /* copy hypercall page */
    /* TODO: domctl instead, but requires privileges */
    if (dom->parms.virt_hypercall != -1) {
        pfn = PHYS_PFN(dom->parms.virt_hypercall - dom->parms.virt_base);
        memcpy((void *) pages[pfn], hypercall_page, PAGE_SIZE);
    }

    /* Equivalent of xc_dom_boot_image */
    dom->shared_info_mfn = PHYS_PFN(start_info.shared_info);

    if (!xc_dom_compat_check(dom)) {
        grub_printf("xc_dom_compat_check failed\n");
        errnum = ERR_EXEC_FORMAT;
        goto out;
    }

    /* Move current console, xenstore and boot MFNs to the allocated place */
    do_exchange(dom, dom->console_pfn, start_info.console.domU.mfn);
    do_exchange(dom, dom->xenstore_pfn, start_info.store_mfn);
    DEBUG("virt base at %llx\n", dom->parms.virt_base);
    DEBUG("bootstack_pfn %lx\n", dom->bootstack_pfn);
    _boot_target = dom->parms.virt_base + PFN_PHYS(dom->bootstack_pfn);
    DEBUG("_boot_target %lx\n", _boot_target);
    do_exchange(dom, PHYS_PFN(_boot_target - dom->parms.virt_base),
            virt_to_mfn(&_boot_page));

    /* Make sure the bootstrap page table does not RW-map any of our current
     * page table frames */
    kexec_allocate(dom, dom->virt_pgtab_end);

    if ( (rc = xc_dom_update_guest_p2m(dom))) {
        grub_printf("xc_dom_update_guest_p2m returned %d\n", rc);
        errnum = ERR_BOOT_FAILURE;
        goto out;
    }

    if ( dom->arch_hooks->setup_pgtables )
        if ( (rc = dom->arch_hooks->setup_pgtables(dom))) {
            grub_printf("setup_pgtables returned %d\n", rc);
            errnum = ERR_BOOT_FAILURE;
            goto out;
        }

    /* start info page */
#undef start_info
    if ( dom->arch_hooks->start_info )
        dom->arch_hooks->start_info(dom);
#define start_info (start_info_union.start_info)

    xc_dom_log_memory_footprint(dom);

    /* Unmap libxc's projection of the boot page table */
    seg = xc_dom_seg_to_ptr(dom, &dom->pgtables_seg);
    munmap(seg, dom->pgtables_seg.vend - dom->pgtables_seg.vstart);

    /* Unmap day0 pages to avoid having a r/w mapping of the future page table */
    for (pfn = 0; pfn < allocated; pfn++)
        munmap((void*) pages[pfn], PAGE_SIZE);

    /* Pin the boot page table base */
    if ( (rc = pin_table(dom->xch,
#ifdef __i386__
                MMUEXT_PIN_L3_TABLE,
#endif
#ifdef __x86_64__
                MMUEXT_PIN_L4_TABLE,
#endif
                xc_dom_p2m_host(dom, dom->pgtables_seg.pfn),
                dom->guest_domid)) != 0 ) {
        grub_printf("pin_table(%lx) returned %d\n", xc_dom_p2m_host(dom,
                    dom->pgtables_seg.pfn), rc);
        errnum = ERR_BOOT_FAILURE;
        goto out_remap;
    }

    /* We populate the Mini-OS page table here so that boot.S can just call
     * update_va_mapping to project itself there.  */
    need_pgt(_boot_target);
    DEBUG("day0 pages %lx\n", allocated);
    DEBUG("boot target page %lx\n", _boot_target);
    DEBUG("boot page %p\n", &_boot_page);
    DEBUG("boot page mfn %lx\n", boot_page_mfn);
    _boot_page_entry = PFN_PHYS(boot_page_mfn) | L1_PROT;
    DEBUG("boot page entry %llx\n", _boot_page_entry);
    _boot_oldpdmfn = virt_to_mfn(start_info.pt_base);
    DEBUG("boot old pd mfn %lx\n", _boot_oldpdmfn);
    DEBUG("boot pd virt %lx\n", dom->pgtables_seg.vstart);
    _boot_pdmfn = dom->p2m_host[PHYS_PFN(dom->pgtables_seg.vstart - dom->parms.virt_base)];
    DEBUG("boot pd mfn %lx\n", _boot_pdmfn);
    _boot_stack = _boot_target + PAGE_SIZE;
    DEBUG("boot stack %lx\n", _boot_stack);
    _boot_start_info = dom->parms.virt_base + PFN_PHYS(dom->start_info_pfn);
    DEBUG("boot start info %lx\n", _boot_start_info);
    _boot_start = dom->parms.virt_entry;
    DEBUG("boot start %lx\n", _boot_start);

    /* Keep only useful entries */
    for (nr_m2p_updates = pfn = 0; pfn < start_info.nr_pages; pfn++)
        if (dom->p2m_host[pfn] != pfn_to_mfn(pfn))
            nr_m2p_updates++;

    m2p_updates = malloc(sizeof(*m2p_updates) * nr_m2p_updates);
    for (i = pfn = 0; pfn < start_info.nr_pages; pfn++)
        if (dom->p2m_host[pfn] != pfn_to_mfn(pfn)) {
            m2p_updates[i].ptr = PFN_PHYS(dom->p2m_host[pfn]) | MMU_MACHPHYS_UPDATE;
            m2p_updates[i].val = pfn;
            i++;
        }

    for (i = 0; i < blk_nb; i++)
        shutdown_blkfront(blk_dev[i]);
    if (net_dev)
        shutdown_netfront(net_dev);
    if (kbd_dev)
        shutdown_kbdfront(kbd_dev);
    stop_kernel();

    /* Update M2P */
    if ((rc = HYPERVISOR_mmu_update(m2p_updates, nr_m2p_updates, NULL, DOMID_SELF)) < 0) {
        xprintk("Could not update M2P\n");
        ASSERT(0);
    }

    xprintk("go!\n");

    /* Jump to trampoline boot page */
    _boot();

    ASSERT(0);

out_remap:
    for (pfn = 0; pfn < allocated; pfn++)
        do_map_frames(pages[pfn], &pages_mfns[pfn], 1, 0, 0, DOMID_SELF, 0, L1_PROT);
out:
    xc_dom_release(dom);
    for (pfn = 0; pfn < allocated; pfn++)
        free_page((void*)pages[pfn]);
    free(pages);
    free(pages_mfns);
    pages = NULL;
    pages_mfns = NULL;
    allocated = 0;
    xc_interface_close(xc_handle );
}
Exemplo n.º 17
0
/*
 * We hotplug memory at section granularity, pad the reserved area from
 * the previous section base to the namespace base address.
 */
static unsigned long init_altmap_base(resource_size_t base)
{
	unsigned long base_pfn = PHYS_PFN(base);

	return PFN_SECTION_ALIGN_DOWN(base_pfn);
}