コード例 #1
0
/*===========================================================================*
 *				vm_freepages		     		     *
 *===========================================================================*/
void vm_freepages(vir_bytes vir, int pages)
{
    assert(!(vir % VM_PAGE_SIZE));

    if(is_staticaddr(vir)) {
        printf("VM: not freeing static page\n");
        return;
    }

    if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
                   MAP_NONE, pages*VM_PAGE_SIZE, 0,
                   WMF_OVERWRITE | WMF_FREE) != OK)
        panic("vm_freepages: pt_writemap failed");

    vm_self_pages--;

#if SANITYCHECKS
    /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
     * always trapped, also if not in tlb.
     */
    if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed");
    }
#endif
}
コード例 #2
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
void *vm_allocpages(phys_bytes *phys, int reason, int pages)
{
    /* Allocate a page for use by VM itself. */
    phys_bytes newpage;
    static int level = 0;
    void *ret;
    u32_t mem_flags = 0;

    assert(reason >= 0 && reason < VMP_CATEGORIES);

    assert(pages > 0);

    level++;

    assert(level >= 1);
    assert(level <= 2);

    if((level > 1) || !pt_init_done) {
        void *s;

        if(pages == 1) s=vm_getsparepage(phys);
        else if(pages == 4) s=vm_getsparepagedir(phys);
        else panic("%d pages", pages);

        level--;
        if(!s) {
            util_stacktrace();
            printf("VM: warning: out of spare pages\n");
        }
        if(!is_staticaddr(s)) vm_self_pages++;
        return s;
    }

#if defined(__arm__)
    if (reason == VMP_PAGEDIR) {
        mem_flags |= PAF_ALIGN16K;
    }
#endif

    /* Allocate page of memory for use by VM. As VM
     * is trusted, we don't have to pre-clear it.
     */
    if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: alloc_mem failed\n");
        return NULL;
    }

    *phys = CLICK2ABS(newpage);

    if(!(ret = vm_mappages(*phys, pages))) {
        level--;
        printf("VM: vm_allocpage: vm_mappages failed\n");
        return NULL;
    }

    level--;
    vm_self_pages++;

    return ret;
}
コード例 #3
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
void *vm_allocpages(phys_bytes *phys, int reason, int pages)
{
    /* Allocate a page for use by VM itself. */
    phys_bytes newpage;
    vir_bytes loc;
    pt_t *pt;
    int r;
    static int level = 0;
    void *ret;
    u32_t mem_flags = 0;

    pt = &vmprocess->vm_pt;
    assert(reason >= 0 && reason < VMP_CATEGORIES);

    assert(pages > 0);

    level++;

    assert(level >= 1);
    assert(level <= 2);

    if((level > 1) || !pt_init_done) {
        void *s;

        if(pages == 1) s=vm_getsparepage(phys);
        else if(pages == 4) s=vm_getsparepagedir(phys);
        else panic("%d pages", pages);

        level--;
        if(!s) {
            util_stacktrace();
            printf("VM: warning: out of spare pages\n");
        }
        if(!is_staticaddr(s)) vm_self_pages++;
        return s;
    }

#if defined(__arm__)
    if (reason == VMP_PAGEDIR) {
        mem_flags |= PAF_ALIGN16K;
    }
#endif

    /* VM does have a pagetable, so get a page and map it in there.
     * Where in our virtual address space can we put it?
     */
    loc = findhole(pages);
    if(loc == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: findhole failed\n");
        return NULL;
    }

    /* Allocate page of memory for use by VM. As VM
     * is trusted, we don't have to pre-clear it.
     */
    if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: alloc_mem failed\n");
        return NULL;
    }

    *phys = CLICK2ABS(newpage);

    /* Map this page into our address space. */
    if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages,
                      ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
#if defined(__arm__)
                      | ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
#endif
                      , 0)) != OK) {
        free_mem(newpage, pages);
        printf("vm_allocpage writemap failed\n");
        level--;
        return NULL;
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed: %d", r);
    }

    level--;

    /* Return user-space-ready pointer to it. */
    ret = (void *) loc;

    vm_self_pages++;
    return ret;
}
コード例 #4
0
/*===========================================================================*
 *				findhole		     		     *
 *===========================================================================*/
static u32_t findhole(int pages)
{
    /* Find a space in the virtual address space of VM. */
    u32_t curv;
    int pde = 0, try_restart;
    static u32_t lastv = 0;
    pt_t *pt = &vmprocess->vm_pt;
    vir_bytes vmin, vmax;
#if defined(__arm__)
    u32_t holev;
#endif

    vmin = (vir_bytes) (&_end); /* marks end of VM BSS */
    vmin += 1024*1024*1024;	/* reserve 1GB virtual address space for VM heap */
    vmin &= ARCH_VM_ADDR_MASK;
    vmax = VM_STACKTOP;

    /* Input sanity check. */
    assert(vmin + VM_PAGE_SIZE >= vmin);
    assert(vmax >= vmin + VM_PAGE_SIZE);
    assert((vmin % VM_PAGE_SIZE) == 0);
    assert((vmax % VM_PAGE_SIZE) == 0);
#if defined(__arm__)
    assert(pages > 0);
#endif

#if SANITYCHECKS
    curv = ((u32_t) random()) % ((vmax - vmin)/VM_PAGE_SIZE);
    curv *= VM_PAGE_SIZE;
    curv += vmin;
#else
    curv = lastv;
    if(curv < vmin || curv >= vmax)
        curv = vmin;
#endif
    try_restart = 1;

    /* Start looking for a free page starting at vmin. */
    while(curv < vmax) {
        int pte;
#if defined(__arm__)
        int i, nohole;
#endif

        assert(curv >= vmin);
        assert(curv < vmax);

#if defined(__i386__)
        pde = I386_VM_PDE(curv);
        pte = I386_VM_PTE(curv);
#elif defined(__arm__)
        holev = curv; /* the candidate hole */
        nohole = 0;
        for (i = 0; i < pages && !nohole; ++i) {
            if(curv >= vmax) {
                break;
            }
#endif

#if defined(__i386__)
        if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) ||
                !(pt->pt_pt[pde][pte] & ARCH_VM_PAGE_PRESENT)) {
#elif defined(__arm__)
        pde = ARM_VM_PDE(curv);
        pte = ARM_VM_PTE(curv);

        /* if page present, no hole */
        if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) &&
                (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT))
            nohole = 1;

        /* if not contiguous, no hole */
        if (curv != holev + i * VM_PAGE_SIZE)
            nohole = 1;

        curv+=VM_PAGE_SIZE;
    }

    /* there's a large enough hole */
    if (!nohole && i == pages) {
#endif
            lastv = curv;
#if defined(__i386__)
            return curv;
#elif defined(__arm__)
            return holev;
#endif
        }

#if defined(__i386__)
        curv+=VM_PAGE_SIZE;

#elif defined(__arm__)
        /* Reset curv */
#endif
        if(curv >= vmax && try_restart) {
            curv = vmin;
            try_restart = 0;
        }
    }

    printf("VM: out of virtual address space in vm\n");

    return NO_MEM;
}

/*===========================================================================*
 *				vm_freepages		     		     *
 *===========================================================================*/
void vm_freepages(vir_bytes vir, int pages)
{
    assert(!(vir % VM_PAGE_SIZE));

    if(is_staticaddr(vir)) {
        printf("VM: not freeing static page\n");
        return;
    }

    if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
                   MAP_NONE, pages*VM_PAGE_SIZE, 0,
                   WMF_OVERWRITE | WMF_FREE) != OK)
        panic("vm_freepages: pt_writemap failed");

    vm_self_pages--;

#if SANITYCHECKS
    /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
     * always trapped, also if not in tlb.
     */
    if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed");
    }
#endif
}