예제 #1
0
파일: p2m.c 프로젝트: bibn115/RT-Xen
int p2m_alloc_table(struct domain *d)
{
    struct p2m_domain *p2m = &d->arch.p2m;
    struct page_info *page;
    void *p;

    /* First level P2M is 2 consecutive pages */
    page = alloc_domheap_pages(NULL, 1, 0);
    if ( page == NULL )
        return -ENOMEM;

    spin_lock(&p2m->lock);

    page_list_add(page, &p2m->pages);

    /* Clear both first level pages */
    p = __map_domain_page(page);
    clear_page(p);
    unmap_domain_page(p);

    p = __map_domain_page(page + 1);
    clear_page(p);
    unmap_domain_page(p);

    p2m->first_level = page;

    d->arch.vttbr = page_to_maddr(p2m->first_level)
        | ((uint64_t)p2m->vmid&0xff)<<48;

    spin_unlock(&p2m->lock);

    return 0;
}
예제 #2
0
파일: p2m.c 프로젝트: Marshalzxy/xen
int p2m_alloc_table(struct domain *d)
{
    struct p2m_domain *p2m = &d->arch.p2m;
    struct page_info *page;
    void *p;

    page = alloc_domheap_pages(NULL, P2M_FIRST_ORDER, 0);
    if ( page == NULL )
        return -ENOMEM;

    spin_lock(&p2m->lock);

    /* Clear both first level pages */
    p = __map_domain_page(page);
    clear_page(p);
    unmap_domain_page(p);

    p = __map_domain_page(page + 1);
    clear_page(p);
    unmap_domain_page(p);

    p2m->first_level = page;

    d->arch.vttbr = page_to_maddr(p2m->first_level)
        | ((uint64_t)p2m->vmid&0xff)<<48;

    /* Make sure that all TLBs corresponding to the new VMID are flushed
     * before using it
     */
    flush_tlb_domain(d);

    spin_unlock(&p2m->lock);

    return 0;
}
예제 #3
0
/*
 * clear_user_page
 * @to: P1 address
 * @address: U0 address to be mapped
 * @page: page (virt_to_page(to))
 */
void clear_user_page(void *to, unsigned long address, struct page *page)
{
	__set_bit(PG_mapped, &page->flags);
	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
		clear_page(to);
	else {
		pgprot_t pgprot = __pgprot(_PAGE_PRESENT | 
					   _PAGE_RW | _PAGE_CACHABLE |
					   _PAGE_DIRTY | _PAGE_ACCESSED | 
					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
		unsigned long phys_addr = PHYSADDR(to);
		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
		pgd_t *dir = pgd_offset_k(p3_addr);
		pmd_t *pmd = pmd_offset(dir, p3_addr);
		pte_t *pte = pte_offset_kernel(pmd, p3_addr);
		pte_t entry;
		unsigned long flags;

		entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
		down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
		set_pte(pte, entry);
		local_irq_save(flags);
		__flush_tlb_page(get_asid(), p3_addr);
		local_irq_restore(flags);
		update_mmu_cache(NULL, p3_addr, entry);
		__clear_user_page((void *)p3_addr, to);
		pte_clear(&init_mm, p3_addr, pte);
		up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
	}
}
예제 #4
0
파일: dax.c 프로젝트: LBSHackathon/linux
int dax_clear_blocks(struct inode *inode, sector_t block, long size)
{
	struct block_device *bdev = inode->i_sb->s_bdev;
	sector_t sector = block << (inode->i_blkbits - 9);

	might_sleep();
	do {
		void *addr;
		unsigned long pfn;
		long count;

		count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
		if (count < 0)
			return count;
		BUG_ON(size < count);
		while (count > 0) {
			unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
			if (pgsz > count)
				pgsz = count;
			if (pgsz < PAGE_SIZE)
				memset(addr, 0, pgsz);
			else
				clear_page(addr);
			addr += pgsz;
			size -= pgsz;
			count -= pgsz;
			BUG_ON(pgsz & 511);
			sector += pgsz / 512;
			cond_resched();
		}
	} while (size);

	return 0;
}
예제 #5
0
파일: pg-sh4.c 프로젝트: ivucica/linux
/*
 * clear_user_page
 * @to: P1 address
 * @address: U0 address to be mapped
 * @page: page (virt_to_page(to))
 */
void clear_user_page(void *to, unsigned long address, struct page *page)
{
	__set_bit(PG_mapped, &page->flags);
	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
		clear_page(to);
	else {
		unsigned long phys_addr = PHYSADDR(to);
		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
		pgd_t *pgd = pgd_offset_k(p3_addr);
		pud_t *pud = pud_offset(pgd, p3_addr);
		pmd_t *pmd = pmd_offset(pud, p3_addr);
		pte_t *pte = pte_offset_kernel(pmd, p3_addr);
		pte_t entry;
		unsigned long flags;

		entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
		mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
		set_pte(pte, entry);
		local_irq_save(flags);
		__flush_tlb_page(get_asid(), p3_addr);
		local_irq_restore(flags);
		update_mmu_cache(NULL, p3_addr, entry);
		__clear_user_page((void *)p3_addr, to);
		pte_clear(&init_mm, p3_addr, pte);
		mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
	}
}
예제 #6
0
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
{
	int ret = LZO_E_OK;
	size_t clen = PAGE_SIZE;
	unsigned char *cmem;
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;

	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
		clear_page(mem);
		return 0;
	}

	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
	if (meta->table[index].size == PAGE_SIZE)
		copy_page(mem, cmem);
	else
		ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
						mem, &clen);
	zs_unmap_object(meta->mem_pool, handle);

	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		zram_stat64_inc(zram, &zram->stats.failed_reads);
		return ret;
	}

	return 0;
}
예제 #7
0
static int __init init_vdso(void)
{
	struct mips_vdso *vdso;

	vdso_page = alloc_page(GFP_KERNEL);
	if (!vdso_page)
		panic("Cannot allocate vdso");

	vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
	if (!vdso)
		panic("Cannot map vdso");
	clear_page(vdso);

	install_trampoline(vdso->rt_signal_trampoline, __NR_rt_sigreturn);
#ifdef CONFIG_32BIT
	install_trampoline(vdso->signal_trampoline, __NR_sigreturn);
#else
	install_trampoline(vdso->n32_rt_signal_trampoline,
			   __NR_N32_rt_sigreturn);
	install_trampoline(vdso->o32_signal_trampoline, __NR_O32_sigreturn);
	install_trampoline(vdso->o32_rt_signal_trampoline,
			   __NR_O32_rt_sigreturn);
#endif

	vunmap(vdso);

	return 0;
}
예제 #8
0
static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
{
	pgd_t *pgdp;
	pmd_t *pmdp;
	pte_t *ptep;

	unsigned long flags = 1; /* 1 = CB0-1 device */


	DEBUG_IOREMAP(("shmedia_mapiopage pa %08x va %08x\n",  pa, va));

	pgdp = pgd_offset_k(va);
	if (pgd_none(*pgdp)) {
		pmdp = alloc_bootmem_low_pages(PTRS_PER_PMD * sizeof(pmd_t));
		if (pmdp == NULL) panic("No memory for pmd\n");
		memset(pmdp, 0, PTRS_PER_PGD * sizeof(pmd_t));
		set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
	}

	pmdp = pmd_offset(pgdp, va);
	if (pmd_none(*pmdp)) {
		ptep = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
		if (ptep == NULL) panic("No memory for pte\n");
		clear_page((void *)ptep);
		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
	}

	ptep = pte_offset(pmdp, va);
	set_pte(ptep, mk_pte_phys(pa, __pgprot(_PAGE_PRESENT |
			_PAGE_READ | _PAGE_WRITE | 
			_PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags)));
}
예제 #9
0
파일: ioremap_64.c 프로젝트: mobilipia/iods
static void shmedia_unmapioaddr(unsigned long vaddr)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	pgdp = pgd_offset_k(vaddr);
	if (pgd_none(*pgdp) || pgd_bad(*pgdp))
		return;

	pudp = pud_offset(pgdp, vaddr);
	if (pud_none(*pudp) || pud_bad(*pudp))
		return;

	pmdp = pmd_offset(pudp, vaddr);
	if (pmd_none(*pmdp) || pmd_bad(*pmdp))
		return;

	ptep = pte_offset_kernel(pmdp, vaddr);

	if (pte_none(*ptep) || !pte_present(*ptep))
		return;

	clear_page((void *)ptep);
	pte_clear(&init_mm, vaddr, ptep);
}
예제 #10
0
파일: mmu.c 프로젝트: 0x7f454c46/linux
/**
 * kvm_mips_walk_pgd() - Walk page table with optional allocation.
 * @pgd:	Page directory pointer.
 * @addr:	Address to index page table using.
 * @cache:	MMU page cache to allocate new page tables from, or NULL.
 *
 * Walk the page tables pointed to by @pgd to find the PTE corresponding to the
 * address @addr. If page tables don't exist for @addr, they will be created
 * from the MMU cache if @cache is not NULL.
 *
 * Returns:	Pointer to pte_t corresponding to @addr.
 *		NULL if a page table doesn't exist for @addr and !@cache.
 *		NULL if a page table allocation failed.
 */
static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
				unsigned long addr)
{
	pud_t *pud;
	pmd_t *pmd;

	pgd += pgd_index(addr);
	if (pgd_none(*pgd)) {
		/* Not used on MIPS yet */
		BUG();
		return NULL;
	}
	pud = pud_offset(pgd, addr);
	if (pud_none(*pud)) {
		pmd_t *new_pmd;

		if (!cache)
			return NULL;
		new_pmd = mmu_memory_cache_alloc(cache);
		pmd_init((unsigned long)new_pmd,
			 (unsigned long)invalid_pte_table);
		pud_populate(NULL, pud, new_pmd);
	}
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd)) {
		pte_t *new_pte;

		if (!cache)
			return NULL;
		new_pte = mmu_memory_cache_alloc(cache);
		clear_page(new_pte);
		pmd_populate_kernel(NULL, pmd, new_pte);
	}
	return pte_offset(pmd, addr);
}
예제 #11
0
파일: p2m.c 프로젝트: bibn115/RT-Xen
/* Allocate a new page table page and hook it in via the given entry */
static int p2m_create_table(struct domain *d,
                            lpae_t *entry)
{
    struct p2m_domain *p2m = &d->arch.p2m;
    struct page_info *page;
    void *p;
    lpae_t pte;

    BUG_ON(entry->p2m.valid);

    page = alloc_domheap_page(NULL, 0);
    if ( page == NULL )
        return -ENOMEM;

    page_list_add(page, &p2m->pages);

    p = __map_domain_page(page);
    clear_page(p);
    unmap_domain_page(p);

    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM);

    write_pte(entry, pte);

    return 0;
}
예제 #12
0
/*
 * Clear the user page.  We need to deal with the aliasing issues,
 * so remap the kernel page into the same cache colour as the user
 * page.
 */
void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
{
	unsigned int offset = DCACHE_COLOUR(vaddr);
	unsigned long to = to_address + (offset << PAGE_SHIFT);

	/*
	 * Discard data in the kernel mapping for the new page
	 * FIXME: needs this MCRR to be supported.
	 */
	__asm__("mcrr	p15, 0, %1, %0, c6	@ 0xec401f06"
	   :
	   : "r" (kaddr),
	     "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES)
	   : "cc");

	/*
	 * Now clear the page using the same cache colour as
	 * the pages ultimate destination.
	 */
	spin_lock(&v6_lock);

	set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
	flush_tlb_kernel_page(to);
	clear_page((void *)to);

	spin_unlock(&v6_lock);
}
예제 #13
0
파일: apic.c 프로젝트: CrazyXen/XEN_CODE
void __init init_apic_mappings(void)
{
    unsigned long apic_phys;

    if ( x2apic_enabled )
        goto __next;
    /*
     * If no local APIC can be found then set up a fake all
     * zeroes page to simulate the local APIC and another
     * one for the IO-APIC.
     */
    if (!smp_found_config && detect_init_APIC()) {
        apic_phys = __pa(alloc_xenheap_page());
        clear_page(__va(apic_phys));
    } else
        apic_phys = mp_lapic_addr;

    set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
    apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", APIC_BASE,
                apic_phys);

__next:
    /*
     * Fetch the APIC ID of the BSP in case we have a
     * default configuration (or the MP table is broken).
     */
    if (boot_cpu_physical_apicid == -1U)
        boot_cpu_physical_apicid = get_apic_id();
    x86_cpu_to_apicid[0] = get_apic_id();

    init_ioapic_mappings();
}
예제 #14
0
파일: pg-sh7705.c 프로젝트: 274914765/C
/*
 * clear_user_page
 * @to: P1 address
 * @address: U0 address to be mapped
 */
void clear_user_page(void *to, unsigned long address, struct page *pg)
{
    struct page *page = virt_to_page(to);

    __set_bit(PG_mapped, &page->flags);
    if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
        clear_page(to);
        __flush_wback_region(to, PAGE_SIZE);
    } else {
        __flush_purge_virtual_region(to,
                         (void *)(address & 0xfffff000),
                         PAGE_SIZE);
        clear_page(to);
        __flush_wback_region(to, PAGE_SIZE);
    }
}
예제 #15
0
파일: pgtable.c 프로젝트: 274914765/C
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
    pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
    if (pte)
        clear_page(pte);
    return pte;
}
예제 #16
0
파일: domain.c 프로젝트: Marshalzxy/xen
int arch_domain_create(struct domain *d, unsigned int domcr_flags)
{
    int rc;

    d->arch.relmem = RELMEM_not_started;

    /* Idle domains do not need this setup */
    if ( is_idle_domain(d) )
        return 0;

    if ( (rc = p2m_init(d)) != 0 )
        goto fail;

    rc = -ENOMEM;
    if ( (d->shared_info = alloc_xenheap_pages(0, 0)) == NULL )
        goto fail;

    /* Default the virtual ID to match the physical */
    d->arch.vpidr = boot_cpu_data.midr.bits;

    clear_page(d->shared_info);
    share_xen_page_with_guest(
        virt_to_page(d->shared_info), d, XENSHARE_writable);

    if ( (rc = p2m_alloc_table(d)) != 0 )
        goto fail;

    if ( (rc = gicv_setup(d)) != 0 )
        goto fail;

    if ( (rc = domain_vgic_init(d)) != 0 )
        goto fail;

    if ( (rc = domain_vtimer_init(d)) != 0 )
        goto fail;

    if ( d->domain_id )
        d->arch.evtchn_irq = GUEST_EVTCHN_PPI;
    else
        d->arch.evtchn_irq = platform_dom0_evtchn_ppi();

    /*
     * Virtual UART is only used by linux early printk and decompress code.
     * Only use it for the hardware domain because the linux kernel may not
     * support multi-platform.
     */
    if ( is_hardware_domain(d) && (rc = domain_vuart_init(d)) )
        goto fail;

    if ( (rc = iommu_domain_init(d)) != 0 )
        goto fail;

    return 0;

fail:
    d->is_dying = DOMDYING_dead;
    arch_domain_destroy(d);

    return rc;
}
예제 #17
0
파일: pgtable.c 프로젝트: sarnobat/knoppix
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *ret;

	if ((ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER)) != NULL)
		clear_page(ret);
	return ret;
}
예제 #18
0
pte_t __bad_page(void)
{
	extern char empty_bad_page[PAGE_SIZE];
	unsigned long page = (unsigned long)empty_bad_page;

	clear_page(page);
	return pte_mkdirty(mk_pte(page, PAGE_SHARED));
}
예제 #19
0
파일: p2m.c 프로젝트: Fantu/Xen
/*
 * Allocate a new page table page and hook it in via the given entry.
 * apply_one_level relies on this returning 0 on success
 * and -ve on failure.
 *
 * If the existing entry is present then it must be a mapping and not
 * a table and it will be shattered into the next level down.
 *
 * level_shift is the number of bits at the level we want to create.
 */
static int p2m_create_table(struct domain *d, lpae_t *entry,
                            int level_shift, bool_t flush_cache)
{
    struct p2m_domain *p2m = &d->arch.p2m;
    struct page_info *page;
    lpae_t *p;
    lpae_t pte;
    int splitting = p2m_valid(*entry);

    BUG_ON(p2m_table(*entry));

    page = alloc_domheap_page(NULL, 0);
    if ( page == NULL )
        return -ENOMEM;

    page_list_add(page, &p2m->pages);

    p = __map_domain_page(page);
    if ( splitting )
    {
        p2m_type_t t = entry->p2m.type;
        unsigned long base_pfn = entry->p2m.base;
        int i;

        /*
         * We are either splitting a first level 1G page into 512 second level
         * 2M pages, or a second level 2M page into 512 third level 4K pages.
         */
         for ( i=0 ; i < LPAE_ENTRIES; i++ )
         {
             pte = mfn_to_p2m_entry(base_pfn + (i<<(level_shift-LPAE_SHIFT)),
                                    MATTR_MEM, t, p2m->default_access);

             /*
              * First and second level super pages set p2m.table = 0, but
              * third level entries set table = 1.
              */
             if ( level_shift - LPAE_SHIFT )
                 pte.p2m.table = 0;

             write_pte(&p[i], pte);
         }
    }
    else
        clear_page(p);

    if ( flush_cache )
        clean_dcache_va_range(p, PAGE_SIZE);

    unmap_domain_page(p);

    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid,
                           p2m->default_access);

    p2m_write_pte(entry, pte, flush_cache);

    return 0;
}
예제 #20
0
파일: domain.c 프로젝트: Marshalzxy/xen
struct vcpu *alloc_vcpu_struct(void)
{
    struct vcpu *v;
    BUILD_BUG_ON(sizeof(*v) > PAGE_SIZE);
    v = alloc_xenheap_pages(0, 0);
    if ( v != NULL )
        clear_page(v);
    return v;
}
예제 #21
0
파일: www.c 프로젝트: 21moons/contiki
/*-----------------------------------------------------------------------------------*/
static void
start_loading(void)
{
    loading = 1;
    x = y = 0;
    pagey = 0;
    webpageptr = webpage;

    clear_page();
}
예제 #22
0
파일: cache.c 프로젝트: 24hours/linux
void clear_user_highpage(struct page *page, unsigned long vaddr)
{
	void *kaddr = kmap_atomic(page);

	clear_page(kaddr);

	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
		__flush_purge_region(kaddr, PAGE_SIZE);

	kunmap_atomic(kaddr);
}
예제 #23
0
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
	clear_page(page);

	/*
	 * We shouldnt have to do this, but some versions of glibc
	 * require it (ld.so assumes zero filled pages are icache clean)
	 * - Anton
	 */
	flush_dcache_page(pg);
}
예제 #24
0
static __init void *alloc_low_page(void)
{
	unsigned long pfn = pgt_buf_end++;
	void *adr;

	if (pfn >= pgt_buf_top)
		panic("alloc_low_page: ran out of memory");

	adr = __va(pfn * PAGE_SIZE);
	clear_page(adr);
	return adr;
}
예제 #25
0
int
ext2_clear_xip_target(struct inode *inode, sector_t block)
{
	void *kaddr;
	unsigned long pfn;
	int rc;

	rc = __inode_direct_access(inode, block, &kaddr, &pfn);
	if (!rc)
		clear_page(kaddr);
	return rc;
}
예제 #26
0
파일: domain.c 프로젝트: Marshalzxy/xen
struct domain *alloc_domain_struct(void)
{
    struct domain *d;
    BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE);
    d = alloc_xenheap_pages(0, 0);
    if ( d == NULL )
        return NULL;

    clear_page(d);
    d->arch.grant_table_gpfn = xmalloc_array(xen_pfn_t, max_nr_grant_frames);
    return d;
}
예제 #27
0
파일: pgtable_32.c 프로젝트: AK101111/linux
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
	pte_t *pte;

	if (slab_is_available()) {
		pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
	} else {
		pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
		if (pte)
			clear_page(pte);
	}
	return pte;
}
예제 #28
0
static pte_t * __init kernel_page_table(void)
{
	pte_t *ptablep;

	ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);

	clear_page(ptablep);
	__flush_page_to_ram(ptablep);
	flush_tlb_kernel_page(ptablep);
	nocache_page(ptablep);

	return ptablep;
}
예제 #29
0
파일: mm.c 프로젝트: chain78/none
/* get a free page by physical address */
void *get_free_page(void){
    while(1){
        foreach(i,CONST_MEM >> 12,KMEM >> 12){
            if(mmap[i] == 0){
                mmap[i]++;
                /* clear the page */
                clear_page((i << 12));
                return (void*)(i << 12);
            }
        }
    }
    return NULL;
}
예제 #30
0
static inline int
get_maptrack_handle(
    struct grant_table *lgt)
{
    int                   i;
    grant_handle_t        handle;
    struct grant_mapping *new_mt;
    unsigned int          new_mt_limit, nr_frames;

    if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
    {
        spin_lock(&lgt->lock);

        if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
        {
            nr_frames = nr_maptrack_frames(lgt);
            if ( nr_frames >= max_nr_maptrack_frames() )
            {
                spin_unlock(&lgt->lock);
                return -1;
            }

            new_mt = alloc_xenheap_page();
            if ( new_mt == NULL )
            {
                spin_unlock(&lgt->lock);
                return -1;
            }

            clear_page(new_mt);

            new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;

            for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
            {
                new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
                new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
            }

            lgt->maptrack[nr_frames] = new_mt;
            lgt->maptrack_limit      = new_mt_limit;

            gdprintk(XENLOG_INFO,
                     "Increased maptrack size to %u frames.\n", nr_frames + 1);
            handle = __get_maptrack_handle(lgt);
        }

        spin_unlock(&lgt->lock);
    }
    return handle;
}