Esempio n. 1
0
/*===========================================================================*
 *			     pt_map_in_range		     		     *
 *===========================================================================*/
PUBLIC int pt_map_in_range(struct vmproc *src_vmp, struct vmproc *dst_vmp,
	vir_bytes start, vir_bytes end)
{
/* Transfer all the mappings from the pt of the source process to the pt of
 * the destination process in the range specified.
 */
	int pde, pte;
	int r;
	vir_bytes viraddr, mapaddr;
	pt_t *pt, *dst_pt;

	pt = &src_vmp->vm_pt;
	dst_pt = &dst_vmp->vm_pt;

	end = end ? end : VM_DATATOP;
	assert(start % I386_PAGE_SIZE == 0);
	assert(end % I386_PAGE_SIZE == 0);
	assert(I386_VM_PDE(start) >= proc_pde && start <= end);
	assert(I386_VM_PDE(end) < I386_VM_DIR_ENTRIES);

#if LU_DEBUG
	printf("VM: pt_map_in_range: src = %d, dst = %d\n",
		src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
	printf("VM: pt_map_in_range: transferring from 0x%08x (pde %d pte %d) to 0x%08x (pde %d pte %d)\n",
		start, I386_VM_PDE(start), I386_VM_PTE(start),
		end, I386_VM_PDE(end), I386_VM_PTE(end));
#endif

	/* Scan all page-table entries in the range. */
	for(viraddr = start; viraddr <= end; viraddr += I386_PAGE_SIZE) {
		pde = I386_VM_PDE(viraddr);
		if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) {
			if(viraddr == VM_DATATOP) break;
			continue;
		}
		pte = I386_VM_PTE(viraddr);
		if(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
			if(viraddr == VM_DATATOP) break;
			continue;
		}

		/* Transfer the mapping. */
		dst_pt->pt_pt[pde][pte] = pt->pt_pt[pde][pte];

                if(viraddr == VM_DATATOP) break;
	}

	return OK;
}
Esempio n. 2
0
/*===========================================================================*
 *				findhole		     		     *
 *===========================================================================*/
PRIVATE u32_t findhole(pt_t *pt, u32_t vmin, u32_t vmax)
{
/* Find a space in the virtual address space of pageteble 'pt',
 * between page-aligned BYTE offsets vmin and vmax, to fit
 * a page in. Return byte offset.
 */
	u32_t freefound = 0, curv;
	int pde = 0, try_restart;
	static u32_t lastv = 0;

	/* Input sanity check. */
	assert(vmin + I386_PAGE_SIZE >= vmin);
	assert(vmax >= vmin + I386_PAGE_SIZE);
	assert((vmin % I386_PAGE_SIZE) == 0);
	assert((vmax % I386_PAGE_SIZE) == 0);

#if SANITYCHECKS
	curv = ((u32_t) random()) % ((vmax - vmin)/I386_PAGE_SIZE);
	curv *= I386_PAGE_SIZE;
	curv += vmin;
#else
	curv = lastv;
	if(curv < vmin || curv >= vmax)
		curv = vmin;
#endif
	try_restart = 1;

	/* Start looking for a free page starting at vmin. */
	while(curv < vmax) {
		int pte;

		assert(curv >= vmin);
		assert(curv < vmax);

		pde = I386_VM_PDE(curv);
		pte = I386_VM_PTE(curv);

		if(!(pt->pt_dir[pde] & I386_VM_PRESENT) ||
		   !(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
			lastv = curv;
			return curv;
		}

		curv+=I386_PAGE_SIZE;

		if(curv >= vmax && try_restart) {
			curv = vmin;
			try_restart = 0;
		}
	}

	printf("VM: out of virtual address space in vm\n");

	return NO_MEM;
}
Esempio n. 3
0
/*===========================================================================*
 *			    pt_ptalloc_in_range		     		     *
 *===========================================================================*/
PUBLIC int pt_ptalloc_in_range(pt_t *pt, vir_bytes start, vir_bytes end,
	u32_t flags, int verify)
{
/* Allocate all the page tables in the range specified. */
	int pde, first_pde, last_pde;

	first_pde = start ? I386_VM_PDE(start) : proc_pde;
	last_pde = end ? I386_VM_PDE(end) : I386_VM_DIR_ENTRIES - 1;
	assert(first_pde >= 0);
	assert(last_pde < I386_VM_DIR_ENTRIES);

	/* Scan all page-directory entries in the range. */
	for(pde = first_pde; pde <= last_pde; pde++) {
		assert(!(pt->pt_dir[pde] & I386_VM_BIGPAGE));
		if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) {
			int r;
			if(verify) {
				printf("pt_ptalloc_in_range: no pde %d\n", pde);
				return EFAULT;
			}
			assert(!pt->pt_dir[pde]);
			if((r=pt_ptalloc(pt, pde, flags)) != OK) {
				/* Couldn't do (complete) mapping.
				 * Don't bother freeing any previously
				 * allocated page tables, they're
				 * still writable, don't point to nonsense,
				 * and pt_ptalloc leaves the directory
				 * and other data in a consistent state.
				 */
				printf("pt_ptalloc_in_range: pt_ptalloc failed\n");
				return r;
			}
		}
		assert(pt->pt_dir[pde] & I386_VM_PRESENT);
	}

	return OK;
}
Esempio n. 4
0
/*===========================================================================*
 *				vm_addrok		     		     *
 *===========================================================================*/
int vm_addrok(void *vir, int writeflag)
{
    pt_t *pt = &vmprocess->vm_pt;
    int pde, pte;
    vir_bytes v = (vir_bytes) vir;

#if defined(__i386__)
    pde = I386_VM_PDE(v);
    pte = I386_VM_PTE(v);
#elif defined(__arm__)
    pde = ARM_VM_PDE(v);
    pte = ARM_VM_PTE(v);
#endif

    if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
        printf("addr not ok: missing pde %d\n", pde);
        return 0;
    }

#if defined(__i386__)
    if(writeflag &&
            !(pt->pt_dir[pde] & ARCH_VM_PTE_RW)) {
        printf("addr not ok: pde %d present but pde unwritable\n", pde);
        return 0;
    }

#endif
    if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
        printf("addr not ok: missing pde %d / pte %d\n",
               pde, pte);
        return 0;
    }

#if defined(__i386__)
    if(writeflag &&
            !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
        printf("addr not ok: pde %d / pte %d present but unwritable\n",
#elif defined(__arm__)
    if(!writeflag &&
            !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
        printf("addr not ok: pde %d / pte %d present but writable\n",
#endif
               pde, pte);
        return 0;
    }
Esempio n. 5
0
void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end,
	kinfo_t *cbi)
{
	static int mapped_pde = -1;
	static u32_t *pt = NULL;
	int pde, pte;

	assert(kernel_may_alloc);

	if(phys == PG_ALLOCATEME) {
		assert(!(vaddr % I386_PAGE_SIZE));
	} else  {
		assert((vaddr % I386_PAGE_SIZE) == (phys % I386_PAGE_SIZE));
		vaddr = pg_rounddown(vaddr);
		phys = pg_rounddown(phys);
	}	
	assert(vaddr < kern_vir_start);

	while(vaddr < vaddr_end) {
		phys_bytes source = phys;
		assert(!(vaddr % I386_PAGE_SIZE));
		if(phys == PG_ALLOCATEME) {
			source = pg_alloc_page(cbi);
		} else {
			assert(!(phys % I386_PAGE_SIZE));
		}
		assert(!(source % I386_PAGE_SIZE));
		pde = I386_VM_PDE(vaddr);
		pte = I386_VM_PTE(vaddr);
		if(mapped_pde < pde) {
			phys_bytes ph;
			pt = alloc_pagetable(&ph);
			pagedir[pde] = (ph & I386_VM_ADDR_MASK)
		                | I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
			mapped_pde = pde;
		}
		assert(pt);
		pt[pte] = (source & I386_VM_ADDR_MASK) |
			I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
		vaddr += I386_PAGE_SIZE;
		if(phys != PG_ALLOCATEME)
			phys += I386_PAGE_SIZE;
	}
}
Esempio n. 6
0
/*===========================================================================*
 *				vm_addrok		     		     *
 *===========================================================================*/
PUBLIC int vm_addrok(void *vir, int writeflag)
{
/* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
	pt_t *pt = &vmprocess->vm_pt;
	int pde, pte;
	vir_bytes v = arch_vir2map(vmprocess, (vir_bytes) vir);

	/* No PT yet? Don't bother looking. */
	if(!(vmprocess->vm_flags & VMF_HASPT)) {
		return 1;
	}

	pde = I386_VM_PDE(v);
	pte = I386_VM_PTE(v);

	if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) {
		printf("addr not ok: missing pde %d\n", pde);
		return 0;
	}

	if(writeflag &&
		!(pt->pt_dir[pde] & I386_VM_WRITE)) {
		printf("addr not ok: pde %d present but pde unwritable\n", pde);
		return 0;
	}

	if(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
		printf("addr not ok: missing pde %d / pte %d\n",
			pde, pte);
		return 0;
	}

	if(writeflag &&
		!(pt->pt_pt[pde][pte] & I386_VM_WRITE)) {
		printf("addr not ok: pde %d / pte %d present but unwritable\n",
			pde, pte);
		return 0;
	}

	return 1;
}
Esempio n. 7
0
/*===========================================================================*
 *				pt_checkrange		     		     *
 *===========================================================================*/
PUBLIC int pt_checkrange(pt_t *pt, vir_bytes v,  size_t bytes,
	int write)
{
	int p, pages, pde;

	assert(!(bytes % I386_PAGE_SIZE));

	pages = bytes / I386_PAGE_SIZE;

	for(p = 0; p < pages; p++) {
		u32_t entry;
		int pde = I386_VM_PDE(v);
		int pte = I386_VM_PTE(v);

		assert(!(v % I386_PAGE_SIZE));
		assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
		assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);

		/* Page table has to be there. */
		if(!(pt->pt_dir[pde] & I386_VM_PRESENT))
			return EFAULT;

		/* Make sure page directory entry for this page table
		 * is marked present and page table entry is available.
		 */
		assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]);

		if(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
			return EFAULT;
		}

		if(write && !(pt->pt_pt[pde][pte] & I386_VM_WRITE)) {
			return EFAULT;
		}

		v += I386_PAGE_SIZE;
	}

	return OK;
}
Esempio n. 8
0
/* This function sets up a mapping from within the kernel's address
 * space to any other area of memory, either straight physical
 * memory (pr == NULL) or a process view of memory, in 4MB windows.
 * I.e., it maps in 4MB chunks of virtual (or physical) address space
 * to 4MB chunks of kernel virtual address space.
 *
 * It recognizes pr already being in memory as a special case (no
 * mapping required).
 *
 * The target (i.e. in-kernel) mapping area is one of the freepdes[]
 * VM has earlier already told the kernel about that is available. It is
 * identified as the 'pde' parameter. This value can be chosen freely
 * by the caller, as long as it is in range (i.e. 0 or higher and corresonds
 * to a known freepde slot). It is up to the caller to keep track of which
 * freepde's are in use, and to determine which ones are free to use.
 *
 * The logical number supplied by the caller is translated into an actual
 * pde number to be used, and a pointer to it (linear address) is returned
 * for actual use by phys_copy or phys_memset.
 */
static phys_bytes createpde(
	const struct proc *pr,	/* Requested process, NULL for physical. */
	const phys_bytes linaddr,/* Address after segment translation. */
	phys_bytes *bytes,	/* Size of chunk, function may truncate it. */
	int free_pde_idx,	/* index of the free slot to use */
	int *changed		/* If mapping is made, this is set to 1. */
	)
{
	u32_t pdeval;
	phys_bytes offset;
	int pde;

	assert(free_pde_idx >= 0 && free_pde_idx < nfreepdes);
	pde = freepdes[free_pde_idx];
	assert(pde >= 0 && pde < 1024);

	if(pr && ((pr == get_cpulocal_var(ptproc)) || !HASPT(pr))) {
		/* Process memory is requested, and
		 * it's a process that is already in current page table, or
		 * a process that is in every page table.
		 * Therefore linaddr is valid directly, with the requested
		 * size.
		 */
		return linaddr;
	}

	if(pr) {
		/* Requested address is in a process that is not currently
		 * accessible directly. Grab the PDE entry of that process'
		 * page table that corresponds to the requested address.
		 */
		assert(pr->p_seg.p_cr3_v);
		pdeval = pr->p_seg.p_cr3_v[I386_VM_PDE(linaddr)];
	} else {
		/* Requested address is physical. Make up the PDE entry. */
		pdeval = (linaddr & I386_VM_ADDR_MASK_4MB) | 
			I386_VM_BIGPAGE | I386_VM_PRESENT | 
			I386_VM_WRITE | I386_VM_USER;
	}

	/* Write the pde value that we need into a pde that the kernel
	 * can access, into the currently loaded page table so it becomes
	 * visible.
	 */
	assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v);
	if(get_cpulocal_var(ptproc)->p_seg.p_cr3_v[pde] != pdeval) {
		get_cpulocal_var(ptproc)->p_seg.p_cr3_v[pde] = pdeval;
		*changed = 1;
	}

	/* Memory is now available, but only the 4MB window of virtual
	 * address space that we have mapped; calculate how much of
	 * the requested range is visible and return that in *bytes,
	 * if that is less than the requested range.
	 */
	offset = linaddr & I386_VM_OFFSET_MASK_4MB; /* Offset in 4MB window. */
	*bytes = MIN(*bytes, I386_BIG_PAGE_SIZE - offset); 

	/* Return the linear address of the start of the new mapping. */
	return I386_BIG_PAGE_SIZE*pde + offset;
}
Esempio n. 9
0
/*===========================================================================*
 *				pt_writemap		     		     *
 *===========================================================================*/
PUBLIC int pt_writemap(struct vmproc * vmp,
			pt_t *pt,
			vir_bytes v,
			phys_bytes physaddr,
			size_t bytes,
			u32_t flags,
			u32_t writemapflags)
{
/* Write mapping into page table. Allocate a new page table if necessary. */
/* Page directory and table entries for this virtual address. */
	int p, r, pages;
	int verify = 0;
	int ret = OK;

	/* FIXME
	 * don't do it everytime, stop the process only on the first change and
	 * resume the execution on the last change. Do in a wrapper of this
	 * function
	 */
	if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
			!(vmp->vm_flags & VMF_EXITING))
		sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0);

	if(writemapflags & WMF_VERIFY)
		verify = 1;

	assert(!(bytes % I386_PAGE_SIZE));
	assert(!(flags & ~(PTF_ALLFLAGS)));

	pages = bytes / I386_PAGE_SIZE;

	/* MAP_NONE means to clear the mapping. It doesn't matter
	 * what's actually written into the PTE if I386_VM_PRESENT
	 * isn't on, so we can just write MAP_NONE into it.
	 */
	assert(physaddr == MAP_NONE || (flags & I386_VM_PRESENT));
	assert(physaddr != MAP_NONE || !flags);

	/* First make sure all the necessary page tables are allocated,
	 * before we start writing in any of them, because it's a pain
	 * to undo our work properly.
	 */
	ret = pt_ptalloc_in_range(pt, v, v + I386_PAGE_SIZE*pages, flags, verify);
	if(ret != OK) {
		goto resume_exit;
	}

	/* Now write in them. */
	for(p = 0; p < pages; p++) {
		u32_t entry;
		int pde = I386_VM_PDE(v);
		int pte = I386_VM_PTE(v);

		assert(!(v % I386_PAGE_SIZE));
		assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
		assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);

		/* Page table has to be there. */
		assert(pt->pt_dir[pde] & I386_VM_PRESENT);

		/* Make sure page directory entry for this page table
		 * is marked present and page table entry is available.
		 */
		assert((pt->pt_dir[pde] & I386_VM_PRESENT));
		assert(pt->pt_pt[pde]);

#if SANITYCHECKS
		/* We don't expect to overwrite a page. */
		if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
			assert(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT));
#endif
		if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
			physaddr = pt->pt_pt[pde][pte] & I386_VM_ADDR_MASK;
		}

		if(writemapflags & WMF_FREE) {
			free_mem(ABS2CLICK(physaddr), 1);
		}

		/* Entry we will write. */
		entry = (physaddr & I386_VM_ADDR_MASK) | flags;

		if(verify) {
			u32_t maskedentry;
			maskedentry = pt->pt_pt[pde][pte];
			maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
			/* Verify pagetable entry. */
			if(entry & I386_VM_WRITE) {
				/* If we expect a writable page, allow a readonly page. */
				maskedentry |= I386_VM_WRITE;
			}
			if(maskedentry != entry) {
				printf("pt_writemap: mismatch: ");
				if((entry & I386_VM_ADDR_MASK) !=
					(maskedentry & I386_VM_ADDR_MASK)) {
					printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ", entry, maskedentry);
				} else printf("phys ok; ");
				printf(" flags: found %s; ",
					ptestr(pt->pt_pt[pde][pte]));
				printf(" masked %s; ",
					ptestr(maskedentry));
				printf(" expected %s\n", ptestr(entry));
				ret = EFAULT;
				goto resume_exit;
			}
		} else {
			/* Write pagetable entry. */
#if SANITYCHECKS
			assert(vm_addrok(pt->pt_pt[pde], 1));
#endif
			pt->pt_pt[pde][pte] = entry;
		}

		physaddr += I386_PAGE_SIZE;
		v += I386_PAGE_SIZE;
	}

resume_exit:

	if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
			!(vmp->vm_flags & VMF_EXITING))
		sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0);

	return ret;
}
Esempio n. 10
0
/*===========================================================================*
 *				pt_writemap		     		     *
 *===========================================================================*/
PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
                       size_t bytes, u32_t flags, u32_t writemapflags)
{
    /* Write mapping into page table. Allocate a new page table if necessary. */
    /* Page directory and table entries for this virtual address. */
    int p, pages, pdecheck;
    int finalpde;
    int verify = 0;

    if(writemapflags & WMF_VERIFY)
        verify = 1;

    vm_assert(!(bytes % I386_PAGE_SIZE));
    vm_assert(!(flags & ~(PTF_ALLFLAGS)));

    pages = bytes / I386_PAGE_SIZE;

    /* MAP_NONE means to clear the mapping. It doesn't matter
     * what's actually written into the PTE if I386_VM_PRESENT
     * isn't on, so we can just write MAP_NONE into it.
     */
#if SANITYCHECKS
    if(physaddr != MAP_NONE && !(flags & I386_VM_PRESENT)) {
        vm_panic("pt_writemap: writing dir with !P\n", NO_NUM);
    }
    if(physaddr == MAP_NONE && flags) {
        vm_panic("pt_writemap: writing 0 with flags\n", NO_NUM);
    }
#endif

    finalpde = I386_VM_PDE(v + I386_PAGE_SIZE * pages);

    /* First make sure all the necessary page tables are allocated,
     * before we start writing in any of them, because it's a pain
     * to undo our work properly. Walk the range in page-directory-entry
     * sized leaps.
     */
    for(pdecheck = I386_VM_PDE(v); pdecheck <= finalpde; pdecheck++) {
        vm_assert(pdecheck >= 0 && pdecheck < I386_VM_DIR_ENTRIES);
        if(pt->pt_dir[pdecheck] & I386_VM_BIGPAGE) {
            printf("pt_writemap: trying to write 0x%lx into 0x%lx\n",
                   physaddr, v);
            vm_panic("pt_writemap: BIGPAGE found", NO_NUM);
        }
        if(!(pt->pt_dir[pdecheck] & I386_VM_PRESENT)) {
            int r;
            if(verify) {
                printf("pt_writemap verify: no pde %d\n", pdecheck);
                return EFAULT;
            }
            vm_assert(!pt->pt_dir[pdecheck]);
            if((r=pt_ptalloc(pt, pdecheck, flags)) != OK) {
                /* Couldn't do (complete) mapping.
                 * Don't bother freeing any previously
                 * allocated page tables, they're
                 * still writable, don't point to nonsense,
                 * and pt_ptalloc leaves the directory
                 * and other data in a consistent state.
                 */
                printf("pt_writemap: pt_ptalloc failed\n", pdecheck);
                return r;
            }
        }
        vm_assert(pt->pt_dir[pdecheck] & I386_VM_PRESENT);
    }

    /* Now write in them. */
    for(p = 0; p < pages; p++) {
        u32_t entry;
        int pde = I386_VM_PDE(v);
        int pte = I386_VM_PTE(v);

        vm_assert(!(v % I386_PAGE_SIZE));
        vm_assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
        vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);

        /* Page table has to be there. */
        vm_assert(pt->pt_dir[pde] & I386_VM_PRESENT);

        /* Make sure page directory entry for this page table
         * is marked present and page table entry is available.
         */
        vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]);

#if SANITYCHECKS
        /* We don't expect to overwrite a page. */
        if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
            vm_assert(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT));
#endif
        if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
            physaddr = pt->pt_pt[pde][pte] & I386_VM_ADDR_MASK;
        }

        if(writemapflags & WMF_FREE) {
            FREE_MEM(ABS2CLICK(physaddr), 1);
        }

        /* Entry we will write. */
        entry = (physaddr & I386_VM_ADDR_MASK) | flags;

        if(verify) {
            u32_t maskedentry;
            maskedentry = pt->pt_pt[pde][pte];
            maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
            /* Verify pagetable entry. */
            if(maskedentry != entry) {
                printf("pt_writemap: 0x%lx found, masked 0x%lx, 0x%lx expected\n",
                       pt->pt_pt[pde][pte], maskedentry, entry);
                return EFAULT;
            }
        } else {
            /* Write pagetable entry. */
            pt->pt_pt[pde][pte] = entry;
        }

        physaddr += I386_PAGE_SIZE;
        v += I386_PAGE_SIZE;
    }

    return OK;
}
Esempio n. 11
0
/*===========================================================================*
 *				findhole		     		     *
 *===========================================================================*/
static u32_t findhole(int pages)
{
    /* Find a space in the virtual address space of VM. */
    u32_t curv;
    int pde = 0, try_restart;
    static u32_t lastv = 0;
    pt_t *pt = &vmprocess->vm_pt;
    vir_bytes vmin, vmax;
#if defined(__arm__)
    u32_t holev;
#endif

    vmin = (vir_bytes) (&_end); /* marks end of VM BSS */
    vmin += 1024*1024*1024;	/* reserve 1GB virtual address space for VM heap */
    vmin &= ARCH_VM_ADDR_MASK;
    vmax = VM_STACKTOP;

    /* Input sanity check. */
    assert(vmin + VM_PAGE_SIZE >= vmin);
    assert(vmax >= vmin + VM_PAGE_SIZE);
    assert((vmin % VM_PAGE_SIZE) == 0);
    assert((vmax % VM_PAGE_SIZE) == 0);
#if defined(__arm__)
    assert(pages > 0);
#endif

#if SANITYCHECKS
    curv = ((u32_t) random()) % ((vmax - vmin)/VM_PAGE_SIZE);
    curv *= VM_PAGE_SIZE;
    curv += vmin;
#else
    curv = lastv;
    if(curv < vmin || curv >= vmax)
        curv = vmin;
#endif
    try_restart = 1;

    /* Start looking for a free page starting at vmin. */
    while(curv < vmax) {
        int pte;
#if defined(__arm__)
        int i, nohole;
#endif

        assert(curv >= vmin);
        assert(curv < vmax);

#if defined(__i386__)
        pde = I386_VM_PDE(curv);
        pte = I386_VM_PTE(curv);
#elif defined(__arm__)
        holev = curv; /* the candidate hole */
        nohole = 0;
        for (i = 0; i < pages && !nohole; ++i) {
            if(curv >= vmax) {
                break;
            }
#endif

#if defined(__i386__)
        if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) ||
                !(pt->pt_pt[pde][pte] & ARCH_VM_PAGE_PRESENT)) {
#elif defined(__arm__)
        pde = ARM_VM_PDE(curv);
        pte = ARM_VM_PTE(curv);

        /* if page present, no hole */
        if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) &&
                (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT))
            nohole = 1;

        /* if not contiguous, no hole */
        if (curv != holev + i * VM_PAGE_SIZE)
            nohole = 1;

        curv+=VM_PAGE_SIZE;
    }

    /* there's a large enough hole */
    if (!nohole && i == pages) {
#endif
            lastv = curv;
#if defined(__i386__)
            return curv;
#elif defined(__arm__)
            return holev;
#endif
        }

#if defined(__i386__)
        curv+=VM_PAGE_SIZE;

#elif defined(__arm__)
        /* Reset curv */
#endif
        if(curv >= vmax && try_restart) {
            curv = vmin;
            try_restart = 0;
        }
    }

    printf("VM: out of virtual address space in vm\n");

    return NO_MEM;
}

/*===========================================================================*
 *				vm_freepages		     		     *
 *===========================================================================*/
void vm_freepages(vir_bytes vir, int pages)
{
    assert(!(vir % VM_PAGE_SIZE));

    if(is_staticaddr(vir)) {
        printf("VM: not freeing static page\n");
        return;
    }

    if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
                   MAP_NONE, pages*VM_PAGE_SIZE, 0,
                   WMF_OVERWRITE | WMF_FREE) != OK)
        panic("vm_freepages: pt_writemap failed");

    vm_self_pages--;

#if SANITYCHECKS
    /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
     * always trapped, also if not in tlb.
     */
    if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed");
    }
#endif
}