Ejemplo n.º 1
0
/*===========================================================================*
 *				pt_ptmap		     		     *
 *===========================================================================*/
PUBLIC int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
{
/* Transfer mappings to page dir and page tables from source process and
 * destination process. Make sure all the mappings are above the stack, not
 * to corrupt valid mappings in the data segment of the destination process.
 */
	int pde, r;
	phys_bytes physaddr;
	vir_bytes viraddr;
	pt_t *pt;

	assert(src_vmp->vm_stacktop == dst_vmp->vm_stacktop);
	pt = &src_vmp->vm_pt;

#if LU_DEBUG
	printf("VM: pt_ptmap: src = %d, dst = %d\n",
		src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
#endif

	/* Transfer mapping to the page directory. */
	assert((vir_bytes) pt->pt_dir >= src_vmp->vm_stacktop);
	viraddr = arch_vir2map(src_vmp, (vir_bytes) pt->pt_dir);
	physaddr = pt->pt_dir_phys & I386_VM_ADDR_MASK;
	if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, I386_PAGE_SIZE,
		I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE,
		WMF_OVERWRITE)) != OK) {
		return r;
	}
#if LU_DEBUG
	printf("VM: pt_ptmap: transferred mapping to page dir: 0x%08x (0x%08x)\n",
		viraddr, physaddr);
#endif

	/* Scan all non-reserved page-directory entries. */
	for(pde=proc_pde; pde < I386_VM_DIR_ENTRIES; pde++) {
		if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) {
			continue;
		}

		/* Transfer mapping to the page table. */
		assert((vir_bytes) pt->pt_pt[pde] >= src_vmp->vm_stacktop);
		viraddr = arch_vir2map(src_vmp, (vir_bytes) pt->pt_pt[pde]);
		physaddr = pt->pt_dir[pde] & I386_VM_ADDR_MASK;
		if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, I386_PAGE_SIZE,
			I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE,
			WMF_OVERWRITE)) != OK) {
			return r;
		}
	}
#if LU_DEBUG
	printf("VM: pt_ptmap: transferred mappings to page tables, pde range %d - %d\n",
		proc_pde, I386_VM_DIR_ENTRIES - 1);
#endif

	return OK;
}
Ejemplo n.º 2
0
/*===========================================================================*
 *				vm_pagelock		     		     *
 *===========================================================================*/
void vm_pagelock(void *vir, int lockflag)
{
    /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
    vir_bytes m = (vir_bytes) vir;
    int r;
    u32_t flags = ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER;
    pt_t *pt;

    pt = &vmprocess->vm_pt;

    assert(!(m % VM_PAGE_SIZE));

    if(!lockflag)
        flags |= ARCH_VM_PTE_RW;
#if defined(__arm__)
    else
        flags |= ARCH_VM_PTE_RO;
    flags |= ARM_VM_PTE_WT ;
#endif

    /* Update flags. */
    if((r=pt_writemap(vmprocess, pt, m, 0, VM_PAGE_SIZE,
                      flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
        panic("vm_lockpage: pt_writemap failed");
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed: %d", r);
    }

    return;
}
Ejemplo n.º 3
0
void *vm_mappages(phys_bytes p, int pages)
{
    vir_bytes loc;
    int r;
    pt_t *pt = &vmprocess->vm_pt;

    /* Where in our virtual address space can we put it? */
    loc = findhole(pages);
    if(loc == NO_MEM) {
        printf("vm_mappages: findhole failed\n");
        return NULL;
    }

    /* Map this page into our address space. */
    if((r=pt_writemap(vmprocess, pt, loc, p, VM_PAGE_SIZE*pages,
                      ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
#if defined(__arm__)
                      | ARM_VM_PTE_WT
#endif
                      , 0)) != OK) {
        printf("vm_mappages writemap failed\n");
        return NULL;
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed: %d", r);
    }

    assert(loc);

    return (void *) loc;
}
Ejemplo n.º 4
0
/*===========================================================================*
 *				vm_freepages		     		     *
 *===========================================================================*/
void vm_freepages(vir_bytes vir, int pages)
{
    assert(!(vir % VM_PAGE_SIZE));

    if(is_staticaddr(vir)) {
        printf("VM: not freeing static page\n");
        return;
    }

    if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
                   MAP_NONE, pages*VM_PAGE_SIZE, 0,
                   WMF_OVERWRITE | WMF_FREE) != OK)
        panic("vm_freepages: pt_writemap failed");

    vm_self_pages--;

#if SANITYCHECKS
    /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
     * always trapped, also if not in tlb.
     */
    if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed");
    }
#endif
}
Ejemplo n.º 5
0
int _brk(void *addr)
{
	vir_bytes target = roundup((vir_bytes)addr, VM_PAGE_SIZE), v;
	extern char _end;
	extern char *_brksize;
	static vir_bytes prevbrk = (vir_bytes) &_end;
	struct vmproc *vmprocess = &vmproc[VM_PROC_NR];

	for(v = roundup(prevbrk, VM_PAGE_SIZE); v < target;
		v += VM_PAGE_SIZE) {
		phys_bytes mem, newpage = alloc_mem(1, 0);
		if(newpage == NO_MEM) return -1;
		mem = CLICK2ABS(newpage);
		if(pt_writemap(vmprocess, &vmprocess->vm_pt,
			v, mem, VM_PAGE_SIZE,
			  ARCH_VM_PTE_PRESENT
			| ARCH_VM_PTE_USER
			| ARCH_VM_PTE_RW
#if defined(__arm__)
			| ARM_VM_PTE_WB
#endif
			, 0) != OK) {
			free_mem(newpage, 1);
			return -1;
		}
		prevbrk = v + VM_PAGE_SIZE;
	}

        _brksize = (char *) addr;

        if(sys_vmctl(SELF, VMCTL_FLUSHTLB, 0) != OK)
        	panic("flushtlb failed");

	return 0;
}
Ejemplo n.º 6
0
/*===========================================================================*
 *				vm_pagelock		     		     *
 *===========================================================================*/
PUBLIC void vm_pagelock(void *vir, int lockflag)
{
/* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
	vir_bytes m;
	int r;
	u32_t flags = I386_VM_PRESENT | I386_VM_USER;
	pt_t *pt;

	pt = &vmprocess->vm_pt;
	m = arch_vir2map(vmprocess, (vir_bytes) vir);

	assert(!(m % I386_PAGE_SIZE));

	if(!lockflag)
		flags |= I386_VM_WRITE;

	/* Update flags. */
	if((r=pt_writemap(vmprocess, pt, m, 0, I386_PAGE_SIZE,
		flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
		panic("vm_lockpage: pt_writemap failed");
	}

	if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
		panic("VMCTL_FLUSHTLB failed: %d", r);
	}

	return;
}
Ejemplo n.º 7
0
/*===========================================================================*
 *				vm_freepages		     		     *
 *===========================================================================*/
PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason)
{
    vm_assert(reason >= 0 && reason < VMP_CATEGORIES);
    if(vir >= vmp->vm_stacktop) {
        vm_assert(!(vir % I386_PAGE_SIZE));
        vm_assert(!(phys % I386_PAGE_SIZE));
        FREE_MEM(ABS2CLICK(phys), pages);
        if(pt_writemap(&vmp->vm_pt, arch_vir2map(vmp, vir),
                       MAP_NONE, pages*I386_PAGE_SIZE, 0, WMF_OVERWRITE) != OK)
            vm_panic("vm_freepages: pt_writemap failed",
                     NO_NUM);
    } else {
        printf("VM: vm_freepages not freeing VM heap pages (%d)\n",
               pages);
    }
}
Ejemplo n.º 8
0
/*===========================================================================*
 *                     munmap_lin (used for overrides for VM)                *
 *===========================================================================*/
PRIVATE int munmap_lin(vir_bytes addr, size_t len)
{
	if(addr % VM_PAGE_SIZE) {
		printf("munmap_lin: offset not page aligned\n");
		return EFAULT;
	}

	if(len % VM_PAGE_SIZE) {
		printf("munmap_lin: len not page aligned\n");
		return EFAULT;
	}

	if(pt_writemap(NULL, &vmproc[VM_PROC_NR].vm_pt, addr, MAP_NONE, len, 0,
		WMF_OVERWRITE | WMF_FREE) != OK) {
		printf("munmap_lin: pt_writemap failed\n");
		return EFAULT;
	}

	return OK;
}
Ejemplo n.º 9
0
/*===========================================================================*
 *				pt_mapkernel		     		     *
 *===========================================================================*/
PUBLIC int pt_mapkernel(pt_t *pt)
{
	int r, i;

        /* Any i386 page table needs to map in the kernel address space. */
        assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);

	if(bigpage_ok) {
		int pde;
		for(pde = 0; pde <= id_map_high_pde; pde++) {
			phys_bytes addr;
			addr = pde * I386_BIG_PAGE_SIZE;
			assert((addr & I386_VM_ADDR_MASK) == addr);
			pt->pt_dir[pde] = addr | I386_VM_PRESENT |
				I386_VM_BIGPAGE | I386_VM_USER |
				I386_VM_WRITE | global_bit;
		}
	} else {
		panic("VM: pt_mapkernel: no bigpage");
	}

	if(pagedir_pde >= 0) {
		/* Kernel also wants to know about all page directories. */
		pt->pt_dir[pagedir_pde] = pagedir_pde_val;
	}

	for(i = 0; i < kernmappings; i++) {
		if(pt_writemap(NULL, pt,
			kern_mappings[i].lin_addr,
			kern_mappings[i].phys_addr,
			kern_mappings[i].len,
			kern_mappings[i].flags, 0) != OK) {
			panic("pt_mapkernel: pt_writemap failed");
		}
	}

	return OK;
}
Ejemplo n.º 10
0
/*===========================================================================*
 *				vm_freepages		     		     *
 *===========================================================================*/
PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason)
{
	assert(reason >= 0 && reason < VMP_CATEGORIES);
	if(vir >= vmprocess->vm_stacktop) {
		assert(!(vir % I386_PAGE_SIZE)); 
		assert(!(phys % I386_PAGE_SIZE)); 
		free_mem(ABS2CLICK(phys), pages);
		if(pt_writemap(vmprocess, &vmprocess->vm_pt, arch_vir2map(vmprocess, vir),
			MAP_NONE, pages*I386_PAGE_SIZE, 0, WMF_OVERWRITE) != OK)
			panic("vm_freepages: pt_writemap failed");
	} else {
		printf("VM: vm_freepages not freeing VM heap pages (%d)\n",
			pages);
	}

#if SANITYCHECKS
	/* If SANITYCHECKS are on, flush tlb so accessing freed pages is
	 * always trapped, also if not in tlb.
	 */
	if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
		panic("VMCTL_FLUSHTLB failed");
	}
#endif
}
Ejemplo n.º 11
0
/*===========================================================================*
 *				pt_ptmap		     		     *
 *===========================================================================*/
int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
{
    /* Transfer mappings to page dir and page tables from source process and
     * destination process. Make sure all the mappings are above the stack, not
     * to corrupt valid mappings in the data segment of the destination process.
     */
    int pde, r;
    phys_bytes physaddr;
    vir_bytes viraddr;
    pt_t *pt;

    pt = &src_vmp->vm_pt;

#if LU_DEBUG
    printf("VM: pt_ptmap: src = %d, dst = %d\n",
           src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
#endif

    /* Transfer mapping to the page directory. */
    viraddr = (vir_bytes) pt->pt_dir;
    physaddr = pt->pt_dir_phys & ARCH_VM_ADDR_MASK;
#if defined(__i386__)
    if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
                      ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW,
#elif defined(__arm__)
    if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARCH_PAGEDIR_SIZE,
                      ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER |
                      ARM_VM_PTE_WT ,
#endif
                      WMF_OVERWRITE)) != OK) {
        return r;
    }
#if LU_DEBUG
    printf("VM: pt_ptmap: transferred mapping to page dir: 0x%08x (0x%08x)\n",
           viraddr, physaddr);
#endif

    /* Scan all non-reserved page-directory entries. */
    for(pde=0; pde < ARCH_VM_DIR_ENTRIES; pde++) {
        if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
            continue;
        }

        /* Transfer mapping to the page table. */
        viraddr = (vir_bytes) pt->pt_pt[pde];
#if defined(__i386__)
        physaddr = pt->pt_dir[pde] & ARCH_VM_ADDR_MASK;
#elif defined(__arm__)
        physaddr = pt->pt_dir[pde] & ARCH_VM_PDE_MASK;
#endif
        if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
                          ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
#ifdef __arm__
                          | ARM_VM_PTE_WT
#endif
                          ,
                          WMF_OVERWRITE)) != OK) {
            return r;
        }
    }

    return OK;
}
Ejemplo n.º 12
0
/*===========================================================================*
 *				pt_mapkernel		     		     *
 *===========================================================================*/
int pt_mapkernel(pt_t *pt)
{
    int i;
    int kern_pde = kern_start_pde;
    phys_bytes addr, mapped = 0;

    /* Any page table needs to map in the kernel address space. */
    assert(bigpage_ok);
    assert(kern_pde >= 0);

    /* pt_init() has made sure this is ok. */
    addr = kern_mb_mod->mod_start;

    /* Actually mapping in kernel */
    while(mapped < kern_size) {
#if defined(__i386__)
        pt->pt_dir[kern_pde] = addr | ARCH_VM_PDE_PRESENT |
                               ARCH_VM_BIGPAGE | ARCH_VM_PTE_RW | global_bit;
#elif defined(__arm__)
        pt->pt_dir[kern_pde] = (addr & ARCH_VM_PDE_MASK)
                               | ARM_VM_SECTION
                               | ARM_VM_SECTION_DOMAIN
                               | ARM_VM_SECTION_WT
                               | ARM_VM_SECTION_SUPER;
#endif
        kern_pde++;
        mapped += ARCH_BIG_PAGE_SIZE;
        addr += ARCH_BIG_PAGE_SIZE;
    }

    /* Kernel also wants to know about all page directories. */
    {
        int pd;
        for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
            struct pdm *pdm = &pagedir_mappings[pd];

            assert(pdm->pdeno > 0);
            assert(pdm->pdeno > kern_pde);
            pt->pt_dir[pdm->pdeno] = pdm->val;
        }
    }

    /* Kernel also wants various mappings of its own. */
    for(i = 0; i < kernmappings; i++) {
        int r;
#if defined(__arm__)

#ifdef DM37XX
//FIXME this special case will be removed once we have non 1:1 mapping
#define XXX 0x48000000
#endif
#ifdef AM335X
#define XXX 0x44000000
#endif
        if(kern_mappings[i].phys_addr == XXX) {
            addr = kern_mappings[i].phys_addr;
            assert(!(kern_mappings[i].len % ARCH_BIG_PAGE_SIZE));
            for(mapped = 0; mapped < kern_mappings[i].len;
                    mapped += ARCH_BIG_PAGE_SIZE) {
                int map_pde = addr / ARCH_BIG_PAGE_SIZE;
                assert(!(addr % ARCH_BIG_PAGE_SIZE));
                assert(addr == (addr & ARCH_VM_PDE_MASK));
                assert(!pt->pt_dir[map_pde]);
                pt->pt_dir[map_pde] = addr |
                                      ARM_VM_SECTION | ARM_VM_SECTION_DOMAIN |
                                      ARM_VM_SECTION_DEVICE |
                                      ARM_VM_SECTION_SUPER;
                addr += ARCH_BIG_PAGE_SIZE;
            }
            continue;
        }
#endif

        if((r=pt_writemap(NULL, pt,
                          kern_mappings[i].vir_addr,
                          kern_mappings[i].phys_addr,
                          kern_mappings[i].len,
                          kern_mappings[i].flags, 0)) != OK) {
            return r;
        }

    }

    return OK;
}
Ejemplo n.º 13
0
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
PUBLIC void pt_init(phys_bytes usedlimit)
{
/* By default, the kernel gives us a data segment with pre-allocated
 * memory that then can't grow. We want to be able to allocate memory
 * dynamically, however. So here we copy the part of the page table
 * that's ours, so we get a private page table. Then we increase the
 * hardware segment size so we can allocate memory above our stack.
 */
        pt_t *newpt;
        int s, r;
        vir_bytes v;
        phys_bytes lo, hi; 
        vir_bytes extra_clicks;
        u32_t moveup = 0;
	int global_bit_ok = 0;
	int free_pde;
	int p;
	struct vm_ep_data ep_data;
	vir_bytes sparepages_mem;
	phys_bytes sparepages_ph;
	vir_bytes ptr;

        /* Shorthand. */
        newpt = &vmprocess->vm_pt;

        /* Get ourselves spare pages. */
        ptr = (vir_bytes) static_sparepages;
        ptr += I386_PAGE_SIZE - (ptr % I386_PAGE_SIZE);
        if(!(sparepages_mem = ptr))
		panic("pt_init: aalloc for spare failed");
        if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem,
                I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK)
                panic("pt_init: sys_umap failed: %d", r);

        missing_spares = 0;
        assert(STATIC_SPAREPAGES < SPAREPAGES);
        for(s = 0; s < SPAREPAGES; s++) {
        	if(s >= STATIC_SPAREPAGES) {
        		sparepages[s].page = NULL;
        		missing_spares++;
        		continue;
        	}
        	sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE);
        	sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE;
        }

	/* global bit and 4MB pages available? */
	global_bit_ok = _cpufeature(_CPUF_I386_PGE);
	bigpage_ok = _cpufeature(_CPUF_I386_PSE);

	/* Set bit for PTE's and PDE's if available. */
	if(global_bit_ok)
		global_bit = I386_VM_GLOBAL;

	/* The kernel and boot time processes need an identity mapping.
	 * We use full PDE's for this without separate page tables.
	 * Figure out which pde we can start using for other purposes.
	 */
	id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE;

	/* We have to make mappings up till here. */
	free_pde = id_map_high_pde+1;

        /* Initial (current) range of our virtual address space. */
        lo = CLICK2ABS(vmprocess->vm_arch.vm_seg[T].mem_phys);
        hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys +
                vmprocess->vm_arch.vm_seg[S].mem_len);
                  
        assert(!(lo % I386_PAGE_SIZE)); 
        assert(!(hi % I386_PAGE_SIZE));
 
        if(lo < VM_PROCSTART) {
                moveup = VM_PROCSTART - lo;
                assert(!(VM_PROCSTART % I386_PAGE_SIZE));
                assert(!(lo % I386_PAGE_SIZE));
                assert(!(moveup % I386_PAGE_SIZE));
        }
        
        /* Make new page table for ourselves, partly copied
         * from the current one.
         */     
        if(pt_new(newpt) != OK)
                panic("pt_init: pt_new failed"); 

        /* Set up mappings for VM process. */
        for(v = lo; v < hi; v += I386_PAGE_SIZE)  {
                phys_bytes addr;
                u32_t flags; 
        
                /* We have to write the new position in the PT,
                 * so we can move our segments.
                 */ 
                if(pt_writemap(vmprocess, newpt, v+moveup, v, I386_PAGE_SIZE,
                        I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
                        panic("pt_init: pt_writemap failed");
        }
       
        /* Move segments up too. */
        vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
       
	/* Allocate us a page table in which to remember page directory
	 * pointers.
	 */
	if(!(page_directories = vm_allocpage(&page_directories_phys,
		VMP_PAGETABLE)))
                panic("no virt addr for vm mappings");

	memset(page_directories, 0, I386_PAGE_SIZE);
       
        /* Increase our hardware data segment to create virtual address
         * space above our stack. We want to increase it to VM_DATATOP,
         * like regular processes have.
         */
        extra_clicks = ABS2CLICK(VM_DATATOP - hi);
        vmprocess->vm_arch.vm_seg[S].mem_len += extra_clicks;
       
        /* We pretend to the kernel we have a huge stack segment to
         * increase our data segment.
         */
        vmprocess->vm_arch.vm_data_top =
                (vmprocess->vm_arch.vm_seg[S].mem_vir +
                vmprocess->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
       
        /* Where our free virtual address space starts.
         * This is only a hint to the VM system.
         */
        newpt->pt_virtop = 0;

        /* Let other functions know VM now has a private page table. */
        vmprocess->vm_flags |= VMF_HASPT;

	/* Now reserve another pde for kernel's own mappings. */
	{
		int kernmap_pde;
		phys_bytes addr, len;
		int flags, index = 0;
		u32_t offset = 0;

		kernmap_pde = free_pde++;
		offset = kernmap_pde * I386_BIG_PAGE_SIZE;

		while(sys_vmctl_get_mapping(index, &addr, &len,
			&flags) == OK)  {
			vir_bytes vir;
			if(index >= MAX_KERNMAPPINGS)
                		panic("VM: too many kernel mappings: %d", index);
			kern_mappings[index].phys_addr = addr;
			kern_mappings[index].len = len;
			kern_mappings[index].flags = flags;
			kern_mappings[index].lin_addr = offset;
			kern_mappings[index].flags =
				I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
				global_bit;
			if(flags & VMMF_UNCACHED)
				kern_mappings[index].flags |= PTF_NOCACHE;
			if(addr % I386_PAGE_SIZE)
                		panic("VM: addr unaligned: %d", addr);
			if(len % I386_PAGE_SIZE)
                		panic("VM: len unaligned: %d", len);
			vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset);
			if(sys_vmctl_reply_mapping(index, vir) != OK)
                		panic("VM: reply failed");
			offset += len;
			index++;
			kernmappings++;
		}
	}

	/* Find a PDE below processes available for mapping in the
	 * page directories (readonly).
	 */
	pagedir_pde = free_pde++;
	pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
			I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;

	/* Tell kernel about free pde's. */
	while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
		if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
			panic("VMCTL_I386_FREEPDE failed: %d", r);
		}
	}

	/* first pde in use by process. */
	proc_pde = free_pde;

        /* Give our process the new, copied, private page table. */
	pt_mapkernel(newpt);	/* didn't know about vm_dir pages earlier */
        pt_bind(newpt, vmprocess);
       
	/* new segment limit for the kernel after paging is enabled */
	ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
	/* the memory map which must be installed after paging is enabled */
	ep_data.mem_map = vmprocess->vm_arch.vm_seg;

	/* Now actually enable paging. */
	if(sys_vmctl_enable_paging(&ep_data) != OK)
        	panic("pt_init: enable paging failed");

        /* Back to reality - this is where the stack actually is. */
        vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks;

        /* Pretend VM stack top is the same as any regular process, not to
         * have discrepancies with new VM instances later on.
         */
        vmprocess->vm_stacktop = VM_STACKTOP;

        /* All OK. */
        return;
}
Ejemplo n.º 14
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
{
/* Allocate a page for use by VM itself. */
	phys_bytes newpage;
	vir_bytes loc;
	pt_t *pt;
	int r;
	static int level = 0;
	void *ret;

	pt = &vmprocess->vm_pt;
	assert(reason >= 0 && reason < VMP_CATEGORIES);

	level++;

	assert(level >= 1);
	assert(level <= 2);

	if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) {
		int r;
		void *s;
		s=vm_getsparepage(phys);
		level--;
		if(!s) {
			util_stacktrace();
			printf("VM: warning: out of spare pages\n");
		}
		return s;
	}

	/* VM does have a pagetable, so get a page and map it in there.
	 * Where in our virtual address space can we put it?
	 */
	loc = findhole(pt,  arch_vir2map(vmprocess, vmprocess->vm_stacktop),
		vmprocess->vm_arch.vm_data_top);
	if(loc == NO_MEM) {
		level--;
		printf("VM: vm_allocpage: findhole failed\n");
		return NULL;
	}

	/* Allocate page of memory for use by VM. As VM
	 * is trusted, we don't have to pre-clear it.
	 */
	if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) {
		level--;
		printf("VM: vm_allocpage: alloc_mem failed\n");
		return NULL;
	}

	*phys = CLICK2ABS(newpage);

	/* Map this page into our address space. */
	if((r=pt_writemap(vmprocess, pt, loc, *phys, I386_PAGE_SIZE,
		I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) {
		free_mem(newpage, CLICKSPERPAGE);
		printf("vm_allocpage writemap failed\n");
		level--;
		return NULL;
	}

	if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
		panic("VMCTL_FLUSHTLB failed: %d", r);
	}

	level--;

	/* Return user-space-ready pointer to it. */
	ret = (void *) arch_map2vir(vmprocess, loc);

	return ret;
}
Ejemplo n.º 15
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
void *vm_allocpages(phys_bytes *phys, int reason, int pages)
{
    /* Allocate a page for use by VM itself. */
    phys_bytes newpage;
    vir_bytes loc;
    pt_t *pt;
    int r;
    static int level = 0;
    void *ret;
    u32_t mem_flags = 0;

    pt = &vmprocess->vm_pt;
    assert(reason >= 0 && reason < VMP_CATEGORIES);

    assert(pages > 0);

    level++;

    assert(level >= 1);
    assert(level <= 2);

    if((level > 1) || !pt_init_done) {
        void *s;

        if(pages == 1) s=vm_getsparepage(phys);
        else if(pages == 4) s=vm_getsparepagedir(phys);
        else panic("%d pages", pages);

        level--;
        if(!s) {
            util_stacktrace();
            printf("VM: warning: out of spare pages\n");
        }
        if(!is_staticaddr(s)) vm_self_pages++;
        return s;
    }

#if defined(__arm__)
    if (reason == VMP_PAGEDIR) {
        mem_flags |= PAF_ALIGN16K;
    }
#endif

    /* VM does have a pagetable, so get a page and map it in there.
     * Where in our virtual address space can we put it?
     */
    loc = findhole(pages);
    if(loc == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: findhole failed\n");
        return NULL;
    }

    /* Allocate page of memory for use by VM. As VM
     * is trusted, we don't have to pre-clear it.
     */
    if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: alloc_mem failed\n");
        return NULL;
    }

    *phys = CLICK2ABS(newpage);

    /* Map this page into our address space. */
    if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages,
                      ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
#if defined(__arm__)
                      | ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
#endif
                      , 0)) != OK) {
        free_mem(newpage, pages);
        printf("vm_allocpage writemap failed\n");
        level--;
        return NULL;
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed: %d", r);
    }

    level--;

    /* Return user-space-ready pointer to it. */
    ret = (void *) loc;

    vm_self_pages++;
    return ret;
}
Ejemplo n.º 16
0
/*===========================================================================*
 *				findhole		     		     *
 *===========================================================================*/
static u32_t findhole(int pages)
{
    /* Find a space in the virtual address space of VM. */
    u32_t curv;
    int pde = 0, try_restart;
    static u32_t lastv = 0;
    pt_t *pt = &vmprocess->vm_pt;
    vir_bytes vmin, vmax;
#if defined(__arm__)
    u32_t holev;
#endif

    vmin = (vir_bytes) (&_end); /* marks end of VM BSS */
    vmin += 1024*1024*1024;	/* reserve 1GB virtual address space for VM heap */
    vmin &= ARCH_VM_ADDR_MASK;
    vmax = VM_STACKTOP;

    /* Input sanity check. */
    assert(vmin + VM_PAGE_SIZE >= vmin);
    assert(vmax >= vmin + VM_PAGE_SIZE);
    assert((vmin % VM_PAGE_SIZE) == 0);
    assert((vmax % VM_PAGE_SIZE) == 0);
#if defined(__arm__)
    assert(pages > 0);
#endif

#if SANITYCHECKS
    curv = ((u32_t) random()) % ((vmax - vmin)/VM_PAGE_SIZE);
    curv *= VM_PAGE_SIZE;
    curv += vmin;
#else
    curv = lastv;
    if(curv < vmin || curv >= vmax)
        curv = vmin;
#endif
    try_restart = 1;

    /* Start looking for a free page starting at vmin. */
    while(curv < vmax) {
        int pte;
#if defined(__arm__)
        int i, nohole;
#endif

        assert(curv >= vmin);
        assert(curv < vmax);

#if defined(__i386__)
        pde = I386_VM_PDE(curv);
        pte = I386_VM_PTE(curv);
#elif defined(__arm__)
        holev = curv; /* the candidate hole */
        nohole = 0;
        for (i = 0; i < pages && !nohole; ++i) {
            if(curv >= vmax) {
                break;
            }
#endif

#if defined(__i386__)
        if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) ||
                !(pt->pt_pt[pde][pte] & ARCH_VM_PAGE_PRESENT)) {
#elif defined(__arm__)
        pde = ARM_VM_PDE(curv);
        pte = ARM_VM_PTE(curv);

        /* if page present, no hole */
        if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) &&
                (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT))
            nohole = 1;

        /* if not contiguous, no hole */
        if (curv != holev + i * VM_PAGE_SIZE)
            nohole = 1;

        curv+=VM_PAGE_SIZE;
    }

    /* there's a large enough hole */
    if (!nohole && i == pages) {
#endif
            lastv = curv;
#if defined(__i386__)
            return curv;
#elif defined(__arm__)
            return holev;
#endif
        }

#if defined(__i386__)
        curv+=VM_PAGE_SIZE;

#elif defined(__arm__)
        /* Reset curv */
#endif
        if(curv >= vmax && try_restart) {
            curv = vmin;
            try_restart = 0;
        }
    }

    printf("VM: out of virtual address space in vm\n");

    return NO_MEM;
}

/*===========================================================================*
 *				vm_freepages		     		     *
 *===========================================================================*/
void vm_freepages(vir_bytes vir, int pages)
{
    assert(!(vir % VM_PAGE_SIZE));

    if(is_staticaddr(vir)) {
        printf("VM: not freeing static page\n");
        return;
    }

    if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
                   MAP_NONE, pages*VM_PAGE_SIZE, 0,
                   WMF_OVERWRITE | WMF_FREE) != OK)
        panic("vm_freepages: pt_writemap failed");

    vm_self_pages--;

#if SANITYCHECKS
    /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
     * always trapped, also if not in tlb.
     */
    if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed");
    }
#endif
}