Exemplo n.º 1
0
static int
reservedqueue_addslot(struct reserved_pages *rq)
{
	phys_bytes cl, cl_addr;
	void *vir;
	struct reserved_pageslot *rps;

	sanitycheck_rq(rq);

	if((cl = alloc_mem(rq->npages, rq->allocflags)) == NO_MEM)
		return ENOMEM;

	cl_addr = CLICK2ABS(cl);

	vir = NULL;

	if(rq->mappedin) {
		if(!(vir = vm_mappages(cl_addr, rq->npages))) {
			free_mem(cl, rq->npages);
			printf("reservedqueue_addslot: vm_mappages failed\n");
			return ENOMEM;
		}
	}

	rps = &rq->slots[rq->n_available];

	reservedqueue_fillslot(rq, rps, cl_addr, vir);

	return OK;
}
Exemplo n.º 2
0
int vm_init(void)
{
	slock_init(&global_mem_lock);
#ifdef __ALLOW_VM_SHARE__
	vm_share_init(); /* Setup shared memory */
#endif

	/** 
	 * The boot loader has handled all of the messy work for us.
	 * All we need to do is pick up the free map head and kernel
	 * page directory.
	 */
	
	/* The boot strap directly mapped in the null guard page */
	vm_unmappage(0x0, k_pgdir);

	vmflags_t dir_flags = VM_DIR_READ | VM_DIR_WRIT;
	vmflags_t tbl_flags = VM_TBL_READ | VM_TBL_WRIT;

	/* Map pages in for our kernel stack */
	vm_mappages(KVM_KSTACK_S, KVM_KSTACK_E - KVM_KSTACK_S, k_pgdir, 
		dir_flags, tbl_flags);

	/* Add bootstrap code to the memory pool */
	int boot2_s = PGROUNDDOWN(KVM_BOOT2_S) + PGSIZE;
	int boot2_e = PGROUNDUP(KVM_BOOT2_E);

	int x;
	for(x = boot2_s;x < boot2_e;x += PGSIZE)
		pfree(x);	

	/* Clear the TLB */
	vm_enable_paging(k_pgdir);

	return 0;
}
Exemplo n.º 3
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
void *vm_allocpages(phys_bytes *phys, int reason, int pages)
{
    /* Allocate a page for use by VM itself. */
    phys_bytes newpage;
    static int level = 0;
    void *ret;
    u32_t mem_flags = 0;

    assert(reason >= 0 && reason < VMP_CATEGORIES);

    assert(pages > 0);

    level++;

    assert(level >= 1);
    assert(level <= 2);

    if((level > 1) || !pt_init_done) {
        void *s;

        if(pages == 1) s=vm_getsparepage(phys);
        else if(pages == 4) s=vm_getsparepagedir(phys);
        else panic("%d pages", pages);

        level--;
        if(!s) {
            util_stacktrace();
            printf("VM: warning: out of spare pages\n");
        }
        if(!is_staticaddr(s)) vm_self_pages++;
        return s;
    }

#if defined(__arm__)
    if (reason == VMP_PAGEDIR) {
        mem_flags |= PAF_ALIGN16K;
    }
#endif

    /* Allocate page of memory for use by VM. As VM
     * is trusted, we don't have to pre-clear it.
     */
    if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: alloc_mem failed\n");
        return NULL;
    }

    *phys = CLICK2ABS(newpage);

    if(!(ret = vm_mappages(*phys, pages))) {
        level--;
        printf("VM: vm_allocpage: vm_mappages failed\n");
        return NULL;
    }

    level--;
    vm_self_pages++;

    return ret;
}
Exemplo n.º 4
0
/** MUST HAVE PTABLE LOCK! */
void* mmap(void* hint, size_t sz, int protection, 
	int flags, int fd, off_t offset)
{
	/* Acquire locks */
	vmpage_t pagestart = 0;
	int hint_okay = 1;
	if((uintptr_t)hint != PGROUNDDOWN((uintptr_t)hint) || !hint)
		hint_okay = 0;

	/* If a hint is provided and its not okay, return NULL */
	if(hint && !hint_okay)
		return NULL;

	/* acquire the memory lock */	
	slock_acquire(&rproc->mem_lock);

	if(hint_okay && (flags & MAP_FIXED))
	{
		vmpage_t addr = PGROUNDDOWN((vmpage_t)hint);
		/* Is the address appropriate? */
		if(addr >= PGROUNDUP(rproc->heap_end) + PGSIZE && 
				addr < rproc->mmap_start && 
				addr + sz < rproc->mmap_start)
			pagestart = addr;
		else {
			slock_release(&rproc->mem_lock);
			return NULL;
		}
	} else {
        	pagestart = (uintptr_t)mmap_find_space(sz);
	}

	if(!pagestart) 
	{
		slock_release(&rproc->mem_lock);
		return NULL;
	}

	if(pagestart < rproc->mmap_end)
		rproc->mmap_end = pagestart;

	vmflags_t dir_flags = VM_DIR_USRP | VM_DIR_READ | VM_DIR_WRIT;
	vmflags_t tbl_flags = 0;
	if(protection & PROT_WRITE)
		tbl_flags |= VM_TBL_WRIT;
	if(protection & PROT_EXEC)
		tbl_flags |= VM_TBL_EXEC;
	if(protection & PROT_READ)
		tbl_flags |= VM_TBL_READ;
	
	if(!protection)
	{
		slock_release(&rproc->mem_lock);
		return NULL;
	}

	/* Enable default flags */
	tbl_flags |= VM_TBL_USRP | VM_TBL_READ;

        vm_mappages(pagestart, sz, rproc->pgdir, dir_flags, tbl_flags);

#ifdef  __ALLOW_VM_SHARE__
	/* Is this mapping shared? */
	if(flags & MAP_SHARED)
	{
		if(vm_pgsshare(pagestart, sz, rproc->pgdir))
		{
			slock_release(&rproc->mem_lock);
			return NULL;
		}
	}
#endif

	/* Release locks */
	slock_release(&rproc->mem_lock);
        return (void*)pagestart;
}
Exemplo n.º 5
0
uintptr_t elf_load_binary_inode(inode ino, pgdir_t* pgdir, uintptr_t* seg_start, 
		uintptr_t* seg_end, int user)
{
	if(!ino) return 0;

	if(elf_check_binary_inode(ino))
		return 0;

	/* Load the entire elf header. */
        struct elf32_header elf;
        fs_read(ino, &elf, sizeof(struct elf32_header), 0);

	size_t elf_end = 0;
        uintptr_t elf_entry = elf.e_entry;

	uintptr_t code_start = (int)-1;
	uintptr_t code_end = 0;
	int x;
        for(x = 0;x < elf.e_phnum;x++)
        {
                int header_loc = elf.e_phoff + (x * elf.e_phentsize);
                struct elf32_program_header curr_header;
                fs_read(ino, &curr_header,
                        sizeof(struct elf32_program_header),
                        header_loc);
                /* Skip null program headers */
                if(curr_header.type == ELF_PH_TYPE_NULL) 
			continue;

                /* 
                 * GNU Stack is a recommendation by the compiler
                 * to allow executable stacks. This section doesn't
                 * need to be loaded into memory because it's just
                 * a flag.
                 */
                if(curr_header.type == ELF_PH_TYPE_GNU_STACK)
                        continue;

                if(curr_header.type == ELF_PH_TYPE_LOAD)
                {
                        /* Load this header into memory. */
                        uintptr_t hd_addr = (uintptr_t)curr_header.virt_addr;
                        off_t offset = curr_header.offset;
                        size_t file_sz = curr_header.file_sz;
                        size_t mem_sz = curr_header.mem_sz;
			/* Paging: allocate user pages */
			
			vmflags_t dir_flags = VM_DIR_READ | VM_DIR_WRIT;
			vmflags_t tbl_flags = VM_TBL_READ | VM_TBL_WRIT;
			
			if(user) 
			{
				dir_flags |= VM_DIR_USRP;
				tbl_flags |= VM_TBL_USRP;
			} else {
				dir_flags |= VM_DIR_KRNP;
				tbl_flags |= VM_TBL_KRNP;
			}

			/* Should this section be executable? */
			if(curr_header.flags & ELF_PH_FLAG_X)
                                tbl_flags |= VM_TBL_EXEC;

			/* Map the pages into memory */
			if(vm_mappages(hd_addr, mem_sz, pgdir, 
					dir_flags, tbl_flags))
				return 0;

			if(hd_addr + mem_sz > elf_end)
				elf_end = hd_addr + mem_sz;

                        /* zero this region */
                        memset((void*)hd_addr, 0, mem_sz);

			/* Is this a new start? */
			if((uintptr_t)hd_addr < code_start)
				code_start = (uintptr_t)hd_addr;

			/* Is this a new end? */
			if((uintptr_t)(hd_addr + mem_sz) > code_end)
				code_end = (uintptr_t)(hd_addr + mem_sz);

                        /* Load the section */
                        if(fs_read(ino, (void*)hd_addr, file_sz, offset) 
					!= file_sz)
				return 0;

			/* Should we make these pages writable? */
			if(!(curr_header.flags & ELF_PH_FLAG_W))
			{
				if(vm_pgsreadonly((uintptr_t)hd_addr, 
						(uintptr_t)hd_addr 
						+ mem_sz, pgdir))
					return 0;

			}
                }
        }

	/* Set the entry point of the program */
	if(seg_start)
		*seg_start = code_start;
	if(seg_end)
		*seg_end = code_end;

	return elf_entry;
}