예제 #1
0
void guest_physmap_remove_page(struct domain *d,
                               unsigned long gpfn,
                               unsigned long mfn, unsigned int page_order)
{
    apply_p2m_changes(d, REMOVE,
                      pfn_to_paddr(gpfn),
                      pfn_to_paddr(gpfn + (1<<page_order)),
                      pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid);
}
예제 #2
0
int relinquish_p2m_mapping(struct domain *d)
{
    struct p2m_domain *p2m = &d->arch.p2m;

    return apply_p2m_changes(d, RELINQUISH,
                              pfn_to_paddr(p2m->lowest_mapped_gfn),
                              pfn_to_paddr(p2m->max_mapped_gfn),
                              pfn_to_paddr(INVALID_MFN),
                              MATTR_MEM, p2m_invalid);
}
예제 #3
0
int guest_physmap_add_entry(struct domain *d,
                            unsigned long gpfn,
                            unsigned long mfn,
                            unsigned long page_order,
                            p2m_type_t t)
{
    return apply_p2m_changes(d, INSERT,
                             pfn_to_paddr(gpfn),
                             pfn_to_paddr(gpfn + (1 << page_order)),
                             pfn_to_paddr(mfn), MATTR_MEM, t);
}
예제 #4
0
int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn)
{
    struct p2m_domain *p2m = &d->arch.p2m;

    start_mfn = MAX(start_mfn, p2m->lowest_mapped_gfn);
    end_mfn = MIN(end_mfn, p2m->max_mapped_gfn);

    return apply_p2m_changes(d, CACHEFLUSH,
                             pfn_to_paddr(start_mfn),
                             pfn_to_paddr(end_mfn),
                             pfn_to_paddr(INVALID_MFN),
                             MATTR_MEM, p2m_invalid);
}
예제 #5
0
파일: x86.c 프로젝트: karibou/progit
unsigned long long
vtop_x86_remap(unsigned long vaddr)
{
	int i;
	for (i = 0; i < max_numnodes; ++i)
		if (vaddr >= remap_start_vaddr[i] &&
		    vaddr < remap_end_vaddr[i])
			return pfn_to_paddr(remap_start_pfn[i]) +
				vaddr - remap_start_vaddr[i];
	return NOT_PADDR;
}
예제 #6
0
unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
{
    paddr_t p = p2m_lookup(d, pfn_to_paddr(gpfn), NULL);
    return p >> PAGE_SHIFT;
}
예제 #7
0
파일: x86_64.c 프로젝트: karibou/progit
int
get_machdep_info_x86_64(void)
{
	int i, j, mfns[MAX_X86_64_FRAMES];
	unsigned long frame_mfn[MAX_X86_64_FRAMES];
	unsigned long buf[MFNS_PER_FRAME];

	info->section_size_bits = _SECTION_SIZE_BITS;

	if (!is_xen_memory())
		return TRUE;

	/*
	 * Get the information for translating domain-0's physical
	 * address into machine address.
	 */
	if (!readmem(MADDR_XEN, pfn_to_paddr(get_xen_p2m_mfn()),
					 &frame_mfn, PAGESIZE())) {
		ERRMSG("Can't get p2m_mfn.\n");
		return FALSE;
	}

	/*
	 * Count the number of p2m frame.
	 */
	for (i = 0; i < MAX_X86_64_FRAMES; i++) {
		mfns[i] = 0;
		if (!frame_mfn[i])
			break;

		if (!readmem(MADDR_XEN, pfn_to_paddr(frame_mfn[i]), &buf,
		    PAGESIZE())) {
			ERRMSG("Can't get frame_mfn[%d].\n", i);
			return FALSE;
		}
		for (j = 0; j < MFNS_PER_FRAME; j++) {
			if (!buf[j])
				break;

			mfns[i]++;
		}
		info->p2m_frames += mfns[i];
	}
	info->p2m_mfn_frame_list
	    = malloc(sizeof(unsigned long) * info->p2m_frames);
	if (info->p2m_mfn_frame_list == NULL) {
		ERRMSG("Can't allocate memory for p2m_mfn_frame_list. %s\n",
		    strerror(errno));
		return FALSE;
	}

	/*
	 * Get p2m_mfn_frame_list.
	 */
	for (i = 0; i < MAX_X86_64_FRAMES; i++) {
		if (!frame_mfn[i])
			break;

		if (!readmem(MADDR_XEN, pfn_to_paddr(frame_mfn[i]),
		    &info->p2m_mfn_frame_list[i * MFNS_PER_FRAME],
		    mfns[i] * sizeof(unsigned long))) {
			ERRMSG("Can't get p2m_mfn_frame_list.\n");
			return FALSE;
		}
		if (mfns[i] != MFNS_PER_FRAME)
			break;
	}
	return TRUE;
}
예제 #8
0
파일: setup.c 프로젝트: codercold/xen-4.4
static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
{
    paddr_t ram_start, ram_end, ram_size;
    paddr_t contig_start, contig_end;
    paddr_t s, e;
    unsigned long ram_pages;
    unsigned long heap_pages, xenheap_pages, domheap_pages;
    unsigned long dtb_pages;
    unsigned long boot_mfn_start, boot_mfn_end;
    int i;
    void *fdt;

    if ( !early_info.mem.nr_banks )
        early_panic("No memory bank");

    /*
     * We are going to accumulate two regions here.
     *
     * The first is the bounds of the initial memory region which is
     * contiguous with the first bank. For simplicity the xenheap is
     * always allocated from this region.
     *
     * The second is the complete bounds of the regions containing RAM
     * (ie. from the lowest RAM address to the highest), which
     * includes any holes.
     *
     * We also track the number of actual RAM pages (i.e. not counting
     * the holes).
     */
    ram_size  = early_info.mem.bank[0].size;

    contig_start = ram_start = early_info.mem.bank[0].start;
    contig_end   = ram_end = ram_start + ram_size;

    for ( i = 1; i < early_info.mem.nr_banks; i++ )
    {
        paddr_t bank_start = early_info.mem.bank[i].start;
        paddr_t bank_size = early_info.mem.bank[i].size;
        paddr_t bank_end = bank_start + bank_size;

        paddr_t new_ram_size = ram_size + bank_size;
        paddr_t new_ram_start = min(ram_start,bank_start);
        paddr_t new_ram_end = max(ram_end,bank_end);

        /*
         * If the new bank is contiguous with the initial contiguous
         * region then incorporate it into the contiguous region.
         *
         * Otherwise we allow non-contigious regions so long as at
         * least half of the total RAM region actually contains
         * RAM. We actually fudge this slightly and require that
         * adding the current bank does not cause us to violate this
         * restriction.
         *
         * This restriction ensures that the frametable (which is not
         * currently sparse) does not consume all available RAM.
         */
        if ( bank_start == contig_end )
            contig_end = bank_end;
        else if ( bank_end == contig_start )
            contig_start = bank_start;
        else if ( 2 * new_ram_size < new_ram_end - new_ram_start )
            /* Would create memory map which is too sparse, so stop here. */
            break;

        ram_size = new_ram_size;
        ram_start = new_ram_start;
        ram_end = new_ram_end;
    }

    if ( i != early_info.mem.nr_banks )
    {
        early_printk("WARNING: only using %d out of %d memory banks\n",
                     i, early_info.mem.nr_banks);
        early_info.mem.nr_banks = i;
    }

    total_pages = ram_pages = ram_size >> PAGE_SHIFT;

    /*
     * Locate the xenheap using these constraints:
     *
     *  - must be 32 MiB aligned
     *  - must not include Xen itself or the boot modules
     *  - must be at most 1/8 the total RAM in the system
     *  - must be at least 128M
     *
     * We try to allocate the largest xenheap possible within these
     * constraints.
     */
    heap_pages = ram_pages;
    xenheap_pages = (heap_pages/8 + 0x1fffUL) & ~0x1fffUL;
    xenheap_pages = max(xenheap_pages, 128UL<<(20-PAGE_SHIFT));

    do
    {
        /* xenheap is always in the initial contiguous region */
        e = consider_modules(contig_start, contig_end,
                             pfn_to_paddr(xenheap_pages),
                             32<<20, 0);
        if ( e )
            break;

        xenheap_pages >>= 1;
    } while ( xenheap_pages > 128<<(20-PAGE_SHIFT) );

    if ( ! e )
        early_panic("Not not enough space for xenheap");

    domheap_pages = heap_pages - xenheap_pages;

    early_printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages)\n",
                 e - (pfn_to_paddr(xenheap_pages)), e,
                 xenheap_pages);
    early_printk("Dom heap: %lu pages\n", domheap_pages);

    setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages);

    /*
     * Need a single mapped page for populating bootmem_region_list
     * and enough mapped pages for copying the DTB.
     */
    dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
    boot_mfn_start = xenheap_mfn_end - dtb_pages - 1;
    boot_mfn_end = xenheap_mfn_end;

    init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end));

    /* Copy the DTB. */
    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
    copy_from_paddr(fdt, dtb_paddr, dtb_size, BUFFERABLE);
    device_tree_flattened = fdt;

    /* Add non-xenheap memory */
    for ( i = 0; i < early_info.mem.nr_banks; i++ )
    {
        paddr_t bank_start = early_info.mem.bank[i].start;
        paddr_t bank_end = bank_start + early_info.mem.bank[i].size;

        s = bank_start;
        while ( s < bank_end )
        {
            paddr_t n = bank_end;

            e = next_module(s, &n);

            if ( e == ~(paddr_t)0 )
            {
                e = n = ram_end;
            }

            /*
             * Module in a RAM bank other than the one which we are
             * not dealing with here.
             */
            if ( e > bank_end )
                e = bank_end;

            /* Avoid the xenheap */
            if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages)
                 && pfn_to_paddr(xenheap_mfn_start) < e )
            {
                e = pfn_to_paddr(xenheap_mfn_start);
                n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages);
            }

            dt_unreserved_regions(s, e, init_boot_pages, 0);

            s = n;
        }
    }

    /* Frame table covers all of RAM region, including holes */
    setup_frametable_mappings(ram_start, ram_end);
    max_page = PFN_DOWN(ram_end);

    /* Add xenheap memory that was not already added to the boot
       allocator. */
    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
                       pfn_to_paddr(boot_mfn_start));

    end_boot_allocator();
}
예제 #9
0
파일: setup.c 프로젝트: caomw/xen
static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
{
    paddr_t ram_start, ram_end, ram_size;
    paddr_t s, e;
    unsigned long ram_pages;
    unsigned long heap_pages, xenheap_pages, domheap_pages;
    unsigned long dtb_pages;
    unsigned long boot_mfn_start, boot_mfn_end;
    int i;
    void *fdt;

    if ( !bootinfo.mem.nr_banks )
        panic("No memory bank");

    init_pdx();

    ram_start = bootinfo.mem.bank[0].start;
    ram_size  = bootinfo.mem.bank[0].size;
    ram_end   = ram_start + ram_size;

    for ( i = 1; i < bootinfo.mem.nr_banks; i++ )
    {
        paddr_t bank_start = bootinfo.mem.bank[i].start;
        paddr_t bank_size = bootinfo.mem.bank[i].size;
        paddr_t bank_end = bank_start + bank_size;

        ram_size  = ram_size + bank_size;
        ram_start = min(ram_start,bank_start);
        ram_end   = max(ram_end,bank_end);
    }

    total_pages = ram_pages = ram_size >> PAGE_SHIFT;

    /*
     * If the user has not requested otherwise via the command line
     * then locate the xenheap using these constraints:
     *
     *  - must be 32 MiB aligned
     *  - must not include Xen itself or the boot modules
     *  - must be at most 1GB or 1/32 the total RAM in the system if less
     *  - must be at least 32M
     *
     * We try to allocate the largest xenheap possible within these
     * constraints.
     */
    heap_pages = ram_pages;
    if ( opt_xenheap_megabytes )
        xenheap_pages = opt_xenheap_megabytes << (20-PAGE_SHIFT);
    else
    {
        xenheap_pages = (heap_pages/32 + 0x1fffUL) & ~0x1fffUL;
        xenheap_pages = max(xenheap_pages, 32UL<<(20-PAGE_SHIFT));
        xenheap_pages = min(xenheap_pages, 1UL<<(30-PAGE_SHIFT));
    }

    do
    {
        e = consider_modules(ram_start, ram_end,
                             pfn_to_paddr(xenheap_pages),
                             32<<20, 0);
        if ( e )
            break;

        xenheap_pages >>= 1;
    } while ( !opt_xenheap_megabytes && xenheap_pages > 32<<(20-PAGE_SHIFT) );

    if ( ! e )
        panic("Not not enough space for xenheap");

    domheap_pages = heap_pages - xenheap_pages;

    printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages%s)\n",
           e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages,
           opt_xenheap_megabytes ? ", from command-line" : "");
    printk("Dom heap: %lu pages\n", domheap_pages);

    setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages);

    /*
     * Need a single mapped page for populating bootmem_region_list
     * and enough mapped pages for copying the DTB.
     */
    dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
    boot_mfn_start = xenheap_mfn_end - dtb_pages - 1;
    boot_mfn_end = xenheap_mfn_end;

    init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end));

    /* Copy the DTB. */
    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
    copy_from_paddr(fdt, dtb_paddr, dtb_size);
    device_tree_flattened = fdt;

    /* Add non-xenheap memory */
    for ( i = 0; i < bootinfo.mem.nr_banks; i++ )
    {
        paddr_t bank_start = bootinfo.mem.bank[i].start;
        paddr_t bank_end = bank_start + bootinfo.mem.bank[i].size;

        s = bank_start;
        while ( s < bank_end )
        {
            paddr_t n = bank_end;

            e = next_module(s, &n);

            if ( e == ~(paddr_t)0 )
            {
                e = n = ram_end;
            }

            /*
             * Module in a RAM bank other than the one which we are
             * not dealing with here.
             */
            if ( e > bank_end )
                e = bank_end;

            /* Avoid the xenheap */
            if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages)
                 && pfn_to_paddr(xenheap_mfn_start) < e )
            {
                e = pfn_to_paddr(xenheap_mfn_start);
                n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages);
            }

            dt_unreserved_regions(s, e, init_boot_pages, 0);

            s = n;
        }
    }

    /* Frame table covers all of RAM region, including holes */
    setup_frametable_mappings(ram_start, ram_end);
    max_page = PFN_DOWN(ram_end);

    /* Add xenheap memory that was not already added to the boot
       allocator. */
    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
                       pfn_to_paddr(boot_mfn_start));
}