Пример #1
0
void *vm_mappages(phys_bytes p, int pages)
{
    vir_bytes loc;
    int r;
    pt_t *pt = &vmprocess->vm_pt;

    /* Where in our virtual address space can we put it? */
    loc = findhole(pages);
    if(loc == NO_MEM) {
        printf("vm_mappages: findhole failed\n");
        return NULL;
    }

    /* Map this page into our address space. */
    if((r=pt_writemap(vmprocess, pt, loc, p, VM_PAGE_SIZE*pages,
                      ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
#if defined(__arm__)
                      | ARM_VM_PTE_WT
#endif
                      , 0)) != OK) {
        printf("vm_mappages writemap failed\n");
        return NULL;
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed: %d", r);
    }

    assert(loc);

    return (void *) loc;
}
Пример #2
0
/*===========================================================================*
 *				vm_pagelock		     		     *
 *===========================================================================*/
void vm_pagelock(void *vir, int lockflag)
{
    /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
    vir_bytes m = (vir_bytes) vir;
    int r;
    u32_t flags = ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER;
    pt_t *pt;

    pt = &vmprocess->vm_pt;

    assert(!(m % VM_PAGE_SIZE));

    if(!lockflag)
        flags |= ARCH_VM_PTE_RW;
#if defined(__arm__)
    else
        flags |= ARCH_VM_PTE_RO;
    flags |= ARM_VM_PTE_WT ;
#endif

    /* Update flags. */
    if((r=pt_writemap(vmprocess, pt, m, 0, VM_PAGE_SIZE,
                      flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
        panic("vm_lockpage: pt_writemap failed");
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed: %d", r);
    }

    return;
}
Пример #3
0
/*===========================================================================*
 *				pt_bind			     		     *
 *===========================================================================*/
PUBLIC int pt_bind(pt_t *pt, struct vmproc *who)
{
    int slot, ispt;
    u32_t phys;

    /* Basic sanity checks. */
    vm_assert(who);
    vm_assert(who->vm_flags & VMF_INUSE);
    vm_assert(pt);

    slot = who->vm_slot;
    vm_assert(slot >= 0);
    vm_assert(slot < ELEMENTS(vmproc));
    vm_assert(slot < I386_VM_PT_ENTRIES);

    phys = pt->pt_dir_phys & I386_VM_ADDR_MASK;
    vm_assert(pt->pt_dir_phys == phys);

    /* Update "page directory pagetable." */
    page_directories[slot] = phys | I386_VM_PRESENT|I386_VM_WRITE;

#if 0
    printf("VM: slot %d has pde val 0x%lx\n", slot, page_directories[slot]);
#endif
    /* Tell kernel about new page table root. */
    return sys_vmctl(who->vm_endpoint, VMCTL_I386_SETCR3,
                     pt ? pt->pt_dir_phys : 0);
}
Пример #4
0
/*===========================================================================*
 *				vm_freepages		     		     *
 *===========================================================================*/
void vm_freepages(vir_bytes vir, int pages)
{
    assert(!(vir % VM_PAGE_SIZE));

    if(is_staticaddr(vir)) {
        printf("VM: not freeing static page\n");
        return;
    }

    if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
                   MAP_NONE, pages*VM_PAGE_SIZE, 0,
                   WMF_OVERWRITE | WMF_FREE) != OK)
        panic("vm_freepages: pt_writemap failed");

    vm_self_pages--;

#if SANITYCHECKS
    /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
     * always trapped, also if not in tlb.
     */
    if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed");
    }
#endif
}
Пример #5
0
int _brk(void *addr)
{
	vir_bytes target = roundup((vir_bytes)addr, VM_PAGE_SIZE), v;
	extern char _end;
	extern char *_brksize;
	static vir_bytes prevbrk = (vir_bytes) &_end;
	struct vmproc *vmprocess = &vmproc[VM_PROC_NR];

	for(v = roundup(prevbrk, VM_PAGE_SIZE); v < target;
		v += VM_PAGE_SIZE) {
		phys_bytes mem, newpage = alloc_mem(1, 0);
		if(newpage == NO_MEM) return -1;
		mem = CLICK2ABS(newpage);
		if(pt_writemap(vmprocess, &vmprocess->vm_pt,
			v, mem, VM_PAGE_SIZE,
			  ARCH_VM_PTE_PRESENT
			| ARCH_VM_PTE_USER
			| ARCH_VM_PTE_RW
#if defined(__arm__)
			| ARM_VM_PTE_WB
#endif
			, 0) != OK) {
			free_mem(newpage, 1);
			return -1;
		}
		prevbrk = v + VM_PAGE_SIZE;
	}

        _brksize = (char *) addr;

        if(sys_vmctl(SELF, VMCTL_FLUSHTLB, 0) != OK)
        	panic("flushtlb failed");

	return 0;
}
Пример #6
0
/*===========================================================================*
 *				vm_pagelock		     		     *
 *===========================================================================*/
PUBLIC void vm_pagelock(void *vir, int lockflag)
{
/* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
	vir_bytes m;
	int r;
	u32_t flags = I386_VM_PRESENT | I386_VM_USER;
	pt_t *pt;

	pt = &vmprocess->vm_pt;
	m = arch_vir2map(vmprocess, (vir_bytes) vir);

	assert(!(m % I386_PAGE_SIZE));

	if(!lockflag)
		flags |= I386_VM_WRITE;

	/* Update flags. */
	if((r=pt_writemap(vmprocess, pt, m, 0, I386_PAGE_SIZE,
		flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
		panic("vm_lockpage: pt_writemap failed");
	}

	if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
		panic("VMCTL_FLUSHTLB failed: %d", r);
	}

	return;
}
Пример #7
0
void pt_clearmapcache(void)
{
    /* Make sure kernel will invalidate tlb when using current
     * pagetable (i.e. vm's) to make new mappings before new cr3
     * is loaded.
     */
    if(sys_vmctl(SELF, VMCTL_CLEARMAPCACHE, 0) != OK)
        panic("VMCTL_CLEARMAPCACHE failed");
}
Пример #8
0
void exec_bootproc(struct vmproc *vmp, struct boot_image *ip)
{
	struct vm_exec_info vmexeci;
	struct exec_info *execi = &vmexeci.execi;
	char hdr[VM_PAGE_SIZE];

	memset(&vmexeci, 0, sizeof(vmexeci));

	if(pt_new(&vmp->vm_pt) != OK)
		panic("VM: no new pagetable");

	if(pt_bind(&vmp->vm_pt, vmp) != OK)
		panic("VM: pt_bind failed");

	if(sys_physcopy(NONE, ip->start_addr, SELF,
		(vir_bytes) hdr, sizeof(hdr)) != OK)
		panic("can't look at boot proc header");

        execi->stack_high = kernel_boot_info.user_sp;
        execi->stack_size = DEFAULT_STACK_LIMIT;
        execi->proc_e = vmp->vm_endpoint;
        execi->hdr = hdr;
        execi->hdr_len = sizeof(hdr);
        strlcpy(execi->progname, ip->proc_name, sizeof(execi->progname));
        execi->frame_len = 0;
	execi->opaque = &vmexeci;
	execi->filesize = ip->len;

	vmexeci.ip = ip;
	vmexeci.vmp = vmp;

        /* callback functions and data */
        execi->copymem = libexec_copy_physcopy;
        execi->clearproc = NULL;
        execi->clearmem = libexec_clear_sys_memset;
	execi->allocmem_prealloc_junk = libexec_alloc_vm_prealloc;
	execi->allocmem_prealloc_cleared = libexec_alloc_vm_prealloc;
        execi->allocmem_ondemand = libexec_alloc_vm_ondemand;

	if(libexec_load_elf(execi) != OK)
		panic("vm: boot process load of process %s (ep=%d) failed\n", 
			execi->progname,vmp->vm_endpoint);

        if(sys_exec(vmp->vm_endpoint, (char *) execi->stack_high - 12,
		(char *) ip->proc_name, execi->pc) != OK)
		panic("vm: boot process exec of process %s (ep=%d) failed\n", 
			execi->progname,vmp->vm_endpoint);

	/* make it runnable */
	if(sys_vmctl(vmp->vm_endpoint, VMCTL_BOOTINHIBIT_CLEAR, 0) != OK)
		panic("VMCTL_BOOTINHIBIT_CLEAR failed");
}
Пример #9
0
/*===========================================================================*
 *				vm_freepages		     		     *
 *===========================================================================*/
PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason)
{
	assert(reason >= 0 && reason < VMP_CATEGORIES);
	if(vir >= vmprocess->vm_stacktop) {
		assert(!(vir % I386_PAGE_SIZE)); 
		assert(!(phys % I386_PAGE_SIZE)); 
		free_mem(ABS2CLICK(phys), pages);
		if(pt_writemap(vmprocess, &vmprocess->vm_pt, arch_vir2map(vmprocess, vir),
			MAP_NONE, pages*I386_PAGE_SIZE, 0, WMF_OVERWRITE) != OK)
			panic("vm_freepages: pt_writemap failed");
	} else {
		printf("VM: vm_freepages not freeing VM heap pages (%d)\n",
			pages);
	}

#if SANITYCHECKS
	/* If SANITYCHECKS are on, flush tlb so accessing freed pages is
	 * always trapped, also if not in tlb.
	 */
	if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
		panic("VMCTL_FLUSHTLB failed");
	}
#endif
}
Пример #10
0
/*===========================================================================*
 *				sef_cb_init_fresh			     *
 *===========================================================================*/
PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
{
/* Initialize the vm server. */
	int s, i;
	int click, clicksforgotten = 0;
	struct memory mem_chunks[NR_MEMS];
	struct boot_image image[NR_BOOT_PROCS];
	struct boot_image *ip;
	struct rprocpub rprocpub[NR_BOOT_PROCS];
	phys_bytes limit = 0;

#if SANITYCHECKS
	incheck = nocheck = 0;
	FIXME("VM SANITYCHECKS are on");
#endif

	vm_paged = 1;
	env_parse("vm_paged", "d", 0, &vm_paged, 0, 1);
#if SANITYCHECKS
	env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel, 0, SCL_MAX);
#endif

	/* Get chunks of available memory. */
	get_mem_chunks(mem_chunks);

	/* Initialize VM's process table. Request a copy of the system
	 * image table that is defined at the kernel level to see which
	 * slots to fill in.
	 */
	if (OK != (s=sys_getimage(image)))
		vm_panic("couldn't get image table: %d\n", s);

	/* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
	memset(vmproc, 0, sizeof(vmproc));

	for(i = 0; i < ELEMENTS(vmproc); i++) {
		vmproc[i].vm_slot = i;
	}

	/* Walk through boot-time system processes that are alive
	 * now and make valid slot entries for them.
	 */
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
		phys_bytes proclimit;
		struct vmproc *vmp;

		if(ip->proc_nr >= _NR_PROCS) { vm_panic("proc", ip->proc_nr); }
		if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;

#define GETVMP(v, nr)						\
		if(nr >= 0) {					\
			vmp = &vmproc[ip->proc_nr];		\
		} else if(nr == SYSTEM) {			\
			vmp = &vmproc[VMP_SYSTEM];		\
		} else {					\
			vm_panic("init: crazy proc_nr", nr);	\
		}

		/* Initialize normal process table slot or special SYSTEM
		 * table slot. Kernel memory is already reserved.
		 */
		GETVMP(vmp, ip->proc_nr);

		/* reset fields as if exited */
		clear_proc(vmp);

		/* Get memory map for this process from the kernel. */
		if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != OK)
			vm_panic("couldn't get process mem_map",s);

		/* Remove this memory from the free list. */
		reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);

		/* Set memory limit. */
		proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
			vmp->vm_arch.vm_seg[S].mem_len) - 1;

		if(proclimit > limit)
			limit = proclimit;

		vmp->vm_flags = VMF_INUSE;
		vmp->vm_endpoint = ip->endpoint;
		vmp->vm_stacktop =
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len);

		if (vmp->vm_arch.vm_seg[T].mem_len != 0)
			vmp->vm_flags |= VMF_SEPARATE;
	}

	/* Architecture-dependent initialization. */
	pt_init(limit);

	/* Initialize tables to all physical memory. */
	mem_init(mem_chunks);
	meminit_done = 1;

	/* Give these processes their own page table. */
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
		int s;
		struct vmproc *vmp;
		vir_bytes old_stacktop, old_stack;

		if(ip->proc_nr < 0) continue;

		GETVMP(vmp, ip->proc_nr);

               if(!(ip->flags & PROC_FULLVM))
                       continue;

		old_stack = 
			vmp->vm_arch.vm_seg[S].mem_vir +
			vmp->vm_arch.vm_seg[S].mem_len - 
			vmp->vm_arch.vm_seg[D].mem_len;

        	if(pt_new(&vmp->vm_pt) != OK)
			vm_panic("VM: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
		old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len);
		if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
			VM_STACKTOP - old_stacktop) != OK) {
			vm_panic("VM: vmctl for new stack failed", NO_NUM);
		}

		FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
			vmp->vm_arch.vm_seg[D].mem_len,
			old_stack);

		if(proc_new(vmp,
			VM_PROCSTART,
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
			BASICSTACK,
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len -
				vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
				VM_STACKTOP) != OK) {
			vm_panic("failed proc_new for boot process", NO_NUM);
		}
	}

	/* Set up table of calls. */
#define CALLMAP(code, func) { int i;			      \
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
	if(i >= NR_VM_CALLS) { vm_panic(#code " invalid", (code)); } \
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
}

	/* Set call table to 0. This invalidates all calls (clear
	 * vmc_func).
	 */
	memset(vm_calls, 0, sizeof(vm_calls));

	/* Basic VM calls. */
	CALLMAP(VM_MMAP, do_mmap);
	CALLMAP(VM_MUNMAP, do_munmap);
	CALLMAP(VM_MUNMAP_TEXT, do_munmap);
	CALLMAP(VM_MAP_PHYS, do_map_phys);
	CALLMAP(VM_UNMAP_PHYS, do_unmap_phys);

	/* Calls from PM. */
	CALLMAP(VM_EXIT, do_exit);
	CALLMAP(VM_FORK, do_fork);
	CALLMAP(VM_BRK, do_brk);
	CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem);
	CALLMAP(VM_PUSH_SIG, do_push_sig);
	CALLMAP(VM_WILLEXIT, do_willexit);
	CALLMAP(VM_ADDDMA, do_adddma);
	CALLMAP(VM_DELDMA, do_deldma);
	CALLMAP(VM_GETDMA, do_getdma);
	CALLMAP(VM_NOTIFY_SIG, do_notify_sig);

	/* Calls from RS */
	CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv);

	/* Generic calls. */
	CALLMAP(VM_REMAP, do_remap);
	CALLMAP(VM_GETPHYS, do_get_phys);
	CALLMAP(VM_SHM_UNMAP, do_shared_unmap);
	CALLMAP(VM_GETREF, do_get_refcount);
	CALLMAP(VM_INFO, do_info);
	CALLMAP(VM_QUERY_EXIT, do_query_exit);

	/* Sanity checks */
	if(find_kernel_top() >= VM_PROCSTART)
		vm_panic("kernel loaded too high", NO_NUM);

	/* Initialize the structures for queryexit */
	init_query_exit();

	/* Unmap our own low pages. */
	unmap_ok = 1;
	_minix_unmapzero();

	/* Map all the services in the boot image. */
	if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
		(vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK) {
		panic("VM", "sys_safecopyfrom failed", s);
	}
	for(i=0;i < NR_BOOT_PROCS;i++) {
		if(rprocpub[i].in_use) {
			if((s = map_service(&rprocpub[i])) != OK) {
				vm_panic("unable to map service", s);
			}
		}
	}

	return(OK);
}
Пример #11
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
void *vm_allocpages(phys_bytes *phys, int reason, int pages)
{
    /* Allocate a page for use by VM itself. */
    phys_bytes newpage;
    vir_bytes loc;
    pt_t *pt;
    int r;
    static int level = 0;
    void *ret;
    u32_t mem_flags = 0;

    pt = &vmprocess->vm_pt;
    assert(reason >= 0 && reason < VMP_CATEGORIES);

    assert(pages > 0);

    level++;

    assert(level >= 1);
    assert(level <= 2);

    if((level > 1) || !pt_init_done) {
        void *s;

        if(pages == 1) s=vm_getsparepage(phys);
        else if(pages == 4) s=vm_getsparepagedir(phys);
        else panic("%d pages", pages);

        level--;
        if(!s) {
            util_stacktrace();
            printf("VM: warning: out of spare pages\n");
        }
        if(!is_staticaddr(s)) vm_self_pages++;
        return s;
    }

#if defined(__arm__)
    if (reason == VMP_PAGEDIR) {
        mem_flags |= PAF_ALIGN16K;
    }
#endif

    /* VM does have a pagetable, so get a page and map it in there.
     * Where in our virtual address space can we put it?
     */
    loc = findhole(pages);
    if(loc == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: findhole failed\n");
        return NULL;
    }

    /* Allocate page of memory for use by VM. As VM
     * is trusted, we don't have to pre-clear it.
     */
    if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: alloc_mem failed\n");
        return NULL;
    }

    *phys = CLICK2ABS(newpage);

    /* Map this page into our address space. */
    if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages,
                      ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
#if defined(__arm__)
                      | ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
#endif
                      , 0)) != OK) {
        free_mem(newpage, pages);
        printf("vm_allocpage writemap failed\n");
        level--;
        return NULL;
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed: %d", r);
    }

    level--;

    /* Return user-space-ready pointer to it. */
    ret = (void *) loc;

    vm_self_pages++;
    return ret;
}
Пример #12
0
/*===========================================================================*
 *				pt_writemap		     		     *
 *===========================================================================*/
PUBLIC int pt_writemap(struct vmproc * vmp,
			pt_t *pt,
			vir_bytes v,
			phys_bytes physaddr,
			size_t bytes,
			u32_t flags,
			u32_t writemapflags)
{
/* Write mapping into page table. Allocate a new page table if necessary. */
/* Page directory and table entries for this virtual address. */
	int p, r, pages;
	int verify = 0;
	int ret = OK;

	/* FIXME
	 * don't do it everytime, stop the process only on the first change and
	 * resume the execution on the last change. Do in a wrapper of this
	 * function
	 */
	if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
			!(vmp->vm_flags & VMF_EXITING))
		sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0);

	if(writemapflags & WMF_VERIFY)
		verify = 1;

	assert(!(bytes % I386_PAGE_SIZE));
	assert(!(flags & ~(PTF_ALLFLAGS)));

	pages = bytes / I386_PAGE_SIZE;

	/* MAP_NONE means to clear the mapping. It doesn't matter
	 * what's actually written into the PTE if I386_VM_PRESENT
	 * isn't on, so we can just write MAP_NONE into it.
	 */
	assert(physaddr == MAP_NONE || (flags & I386_VM_PRESENT));
	assert(physaddr != MAP_NONE || !flags);

	/* First make sure all the necessary page tables are allocated,
	 * before we start writing in any of them, because it's a pain
	 * to undo our work properly.
	 */
	ret = pt_ptalloc_in_range(pt, v, v + I386_PAGE_SIZE*pages, flags, verify);
	if(ret != OK) {
		goto resume_exit;
	}

	/* Now write in them. */
	for(p = 0; p < pages; p++) {
		u32_t entry;
		int pde = I386_VM_PDE(v);
		int pte = I386_VM_PTE(v);

		assert(!(v % I386_PAGE_SIZE));
		assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
		assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);

		/* Page table has to be there. */
		assert(pt->pt_dir[pde] & I386_VM_PRESENT);

		/* Make sure page directory entry for this page table
		 * is marked present and page table entry is available.
		 */
		assert((pt->pt_dir[pde] & I386_VM_PRESENT));
		assert(pt->pt_pt[pde]);

#if SANITYCHECKS
		/* We don't expect to overwrite a page. */
		if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
			assert(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT));
#endif
		if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
			physaddr = pt->pt_pt[pde][pte] & I386_VM_ADDR_MASK;
		}

		if(writemapflags & WMF_FREE) {
			free_mem(ABS2CLICK(physaddr), 1);
		}

		/* Entry we will write. */
		entry = (physaddr & I386_VM_ADDR_MASK) | flags;

		if(verify) {
			u32_t maskedentry;
			maskedentry = pt->pt_pt[pde][pte];
			maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
			/* Verify pagetable entry. */
			if(entry & I386_VM_WRITE) {
				/* If we expect a writable page, allow a readonly page. */
				maskedentry |= I386_VM_WRITE;
			}
			if(maskedentry != entry) {
				printf("pt_writemap: mismatch: ");
				if((entry & I386_VM_ADDR_MASK) !=
					(maskedentry & I386_VM_ADDR_MASK)) {
					printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ", entry, maskedentry);
				} else printf("phys ok; ");
				printf(" flags: found %s; ",
					ptestr(pt->pt_pt[pde][pte]));
				printf(" masked %s; ",
					ptestr(maskedentry));
				printf(" expected %s\n", ptestr(entry));
				ret = EFAULT;
				goto resume_exit;
			}
		} else {
			/* Write pagetable entry. */
#if SANITYCHECKS
			assert(vm_addrok(pt->pt_pt[pde], 1));
#endif
			pt->pt_pt[pde][pte] = entry;
		}

		physaddr += I386_PAGE_SIZE;
		v += I386_PAGE_SIZE;
	}

resume_exit:

	if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
			!(vmp->vm_flags & VMF_EXITING))
		sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0);

	return ret;
}
Пример #13
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
{
/* Allocate a page for use by VM itself. */
	phys_bytes newpage;
	vir_bytes loc;
	pt_t *pt;
	int r;
	static int level = 0;
	void *ret;

	pt = &vmprocess->vm_pt;
	assert(reason >= 0 && reason < VMP_CATEGORIES);

	level++;

	assert(level >= 1);
	assert(level <= 2);

	if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) {
		int r;
		void *s;
		s=vm_getsparepage(phys);
		level--;
		if(!s) {
			util_stacktrace();
			printf("VM: warning: out of spare pages\n");
		}
		return s;
	}

	/* VM does have a pagetable, so get a page and map it in there.
	 * Where in our virtual address space can we put it?
	 */
	loc = findhole(pt,  arch_vir2map(vmprocess, vmprocess->vm_stacktop),
		vmprocess->vm_arch.vm_data_top);
	if(loc == NO_MEM) {
		level--;
		printf("VM: vm_allocpage: findhole failed\n");
		return NULL;
	}

	/* Allocate page of memory for use by VM. As VM
	 * is trusted, we don't have to pre-clear it.
	 */
	if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) {
		level--;
		printf("VM: vm_allocpage: alloc_mem failed\n");
		return NULL;
	}

	*phys = CLICK2ABS(newpage);

	/* Map this page into our address space. */
	if((r=pt_writemap(vmprocess, pt, loc, *phys, I386_PAGE_SIZE,
		I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) {
		free_mem(newpage, CLICKSPERPAGE);
		printf("vm_allocpage writemap failed\n");
		level--;
		return NULL;
	}

	if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
		panic("VMCTL_FLUSHTLB failed: %d", r);
	}

	level--;

	/* Return user-space-ready pointer to it. */
	ret = (void *) arch_map2vir(vmprocess, loc);

	return ret;
}
Пример #14
0
/*===========================================================================*
 *				vm_init					     *
 *===========================================================================*/
static void vm_init(void)
{
    int s, i;
    int click, clicksforgotten = 0;
    struct memory mem_chunks[NR_MEMS];
    struct boot_image image[NR_BOOT_PROCS];
    struct boot_image *ip;
    phys_bytes limit = 0;

    /* The initrd is put right after boot image */
    if ((s = sys_getbootparam(&bootparam)) != 0) {
        panic("VM","Couldn't get boot parameters!",s);
    }

    /* get what setup found out */
    memcpy(mem_chunks, bootparam.nucleos_kludge.mem, sizeof(bootparam.nucleos_kludge.mem));

    /* Get chunks of available memory. */
    get_mem_chunks(mem_chunks);

    /* Initialize VM's process table. Request a copy of the system
     * image table that is defined at the kernel level to see which
     * slots to fill in.
     */
    if ((s=sys_getimage(image)) != 0)
        vm_panic("couldn't get image table: %d\n", s);

    /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
    memset(vmproc, 0, sizeof(vmproc));

    for(i = 0; i < ELEMENTS(vmproc); i++) {
        vmproc[i].vm_slot = i;
    }

    /* Walk through boot-time system processes that are alive
     * now and make valid slot entries for them.
     */
    for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
        phys_bytes proclimit;
        struct vmproc *vmp;

        if(ip->proc_nr >= NR_PROCS) {
            vm_panic("proc", ip->proc_nr);
        }
        if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;

#define GETVMP(v, nr)						\
		if(nr >= 0) {					\
			vmp = &vmproc[ip->proc_nr];		\
		} else if(nr == SYSTEM) {			\
			vmp = &vmproc[VMP_SYSTEM];		\
		} else {					\
			vm_panic("init: crazy proc_nr", nr);	\
		}

        /* Initialize normal process table slot or special SYSTEM
         * table slot. Kernel memory is already reserved.
         */
        GETVMP(vmp, ip->proc_nr);

        /* reset fields as if exited */
        clear_proc(vmp);

        /* Get memory map for this process from the kernel. */
        if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != 0)
            vm_panic("couldn't get process mem_map",s);

        /* Remove this memory from the free list. */
        reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);

        /* Set memory limit. */
        proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
                              vmp->vm_arch.vm_seg[S].mem_len) - 1;

        if(proclimit > limit)
            limit = proclimit;

        vmp->vm_flags = VMF_INUSE;
        vmp->vm_endpoint = ip->endpoint;
        vmp->vm_stacktop =
            CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                      vmp->vm_arch.vm_seg[S].mem_len);
    }

#ifndef CONFIG_BUILTIN_INITRD
    /* Remove initrd memory from the free list. We must do it right after we
       have reserved memory for boot image otherwise it may happen that initrd
       will be overwritten by other process (in arch_init_vm).
     */
    if ((s = reserve_initrd_mem(mem_chunks, bootparam.hdr.ramdisk_image,
                                bootparam.hdr.ramdisk_size)) < 0) {
        panic("VM", "Couldn't reserve memory for initial ramdisk!", s);
    }
#endif
    /* Architecture-dependent initialization. */
    pt_init(limit);

    /* Initialize tables to all physical memory. */
    mem_init(mem_chunks);
    meminit_done = 1;

    /* Give these processes their own page table. */
    for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
        int s;
        struct vmproc *vmp;
        vir_bytes old_stacktop, old_stack;

        if(ip->proc_nr < 0) continue;

        GETVMP(vmp, ip->proc_nr);

        if(!(ip->flags & PROC_FULLVM))
            continue;

        old_stack =
            vmp->vm_arch.vm_seg[S].mem_vir +
            vmp->vm_arch.vm_seg[S].mem_len -
            vmp->vm_arch.vm_seg[D].mem_len;

        if(pt_new(&vmp->vm_pt) != 0)
            vm_panic("vm_init: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
        old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                                 vmp->vm_arch.vm_seg[S].mem_len);
        if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
                     VM_STACKTOP - old_stacktop) != 0) {
            vm_panic("VM: vmctl for new stack failed", NO_NUM);
        }

        FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
                 vmp->vm_arch.vm_seg[D].mem_len,
                 old_stack);

        if(proc_new(vmp,
                    VM_PROCSTART,
                    CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
                    CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
                    BASICSTACK,
                    CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                              vmp->vm_arch.vm_seg[S].mem_len -
                              vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
                    CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
                    CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
                    VM_STACKTOP) != 0) {
            vm_panic("failed proc_new for boot process", NO_NUM);
        }
    }

    /* Set up table of calls. */
#define CALLMAP(code, func, thecaller) { int i;			      \
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
	if(i >= VM_NCALLS) { vm_panic(#code " invalid", (code)); } \
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
	if(((thecaller) < MINEPM || (thecaller) > MAXEPM) 		\
		&& (thecaller) != ANYEPM				\
		&& (thecaller) != NEEDACL ) {				\
		vm_panic(#thecaller " invalid", (code));  		\
	}								\
	vm_calls[i].vmc_callers |= EPM(thecaller);		      \
}

    /* Set call table to 0. This invalidates all calls (clear
     * vmc_func).
     */
    memset(vm_calls, 0, sizeof(vm_calls));

    /* Requests from PM (restricted to be from PM only). */
    CALLMAP(VM_EXIT, do_exit, PM_PROC_NR);
    CALLMAP(VM_FORK, do_fork, PM_PROC_NR);
    CALLMAP(VM_BRK, do_brk, PM_PROC_NR);
    CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem, PM_PROC_NR);
    CALLMAP(VM_PUSH_SIG, do_push_sig, PM_PROC_NR);
    CALLMAP(VM_WILLEXIT, do_willexit, PM_PROC_NR);
    CALLMAP(VM_ADDDMA, do_adddma, PM_PROC_NR);
    CALLMAP(VM_DELDMA, do_deldma, PM_PROC_NR);
    CALLMAP(VM_GETDMA, do_getdma, PM_PROC_NR);
    CALLMAP(VM_ALLOCMEM, do_allocmem, PM_PROC_NR);
    CALLMAP(VM_NOTIFY_SIG, do_notify_sig, PM_PROC_NR);

    /* Requests from RS */
    CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv, RS_PROC_NR);

    /* Requests from userland (source unrestricted). */
    CALLMAP(VM_MMAP, do_mmap, ANYEPM);
    CALLMAP(VM_MUNMAP, do_munmap, ANYEPM);
    CALLMAP(VM_MUNMAP_TEXT, do_munmap, ANYEPM);
    CALLMAP(VM_MAP_PHYS, do_map_phys, ANYEPM); /* Does its own checking. */
    CALLMAP(VM_UNMAP_PHYS, do_unmap_phys, ANYEPM);

    CALLMAP(NNR_VM_MMAP, scall_mmap, ANYEPM);
    CALLMAP(NNR_VM_MUNMAP, scall_munmap, ANYEPM);
    CALLMAP(NNR_VM_MUNMAP_TEXT, scall_munmap, ANYEPM);

    /* Requests from userland (anyone can call but need an ACL bit). */
    CALLMAP(VM_REMAP, do_remap, NEEDACL);
    CALLMAP(VM_GETPHYS, do_get_phys, NEEDACL);
    CALLMAP(VM_SHM_UNMAP, do_shared_unmap, NEEDACL);
    CALLMAP(VM_GETREF, do_get_refcount, NEEDACL);
    CALLMAP(VM_CTL, do_ctl, NEEDACL);
    CALLMAP(VM_QUERY_EXIT, do_query_exit, NEEDACL);

    /* Sanity checks */
    if(find_kernel_top() >= VM_PROCSTART)
        vm_panic("kernel loaded too high", NO_NUM);

    /* Initialize the structures for queryexit */
    init_query_exit();

    /* Unmap our own low pages. */
    unmap_ok = 1;
    unmap_page_zero();
}
Пример #15
0
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
PUBLIC void pt_init(phys_bytes usedlimit)
{
/* By default, the kernel gives us a data segment with pre-allocated
 * memory that then can't grow. We want to be able to allocate memory
 * dynamically, however. So here we copy the part of the page table
 * that's ours, so we get a private page table. Then we increase the
 * hardware segment size so we can allocate memory above our stack.
 */
        pt_t *newpt;
        int s, r;
        vir_bytes v;
        phys_bytes lo, hi; 
        vir_bytes extra_clicks;
        u32_t moveup = 0;
	int global_bit_ok = 0;
	int free_pde;
	int p;
	struct vm_ep_data ep_data;
	vir_bytes sparepages_mem;
	phys_bytes sparepages_ph;
	vir_bytes ptr;

        /* Shorthand. */
        newpt = &vmprocess->vm_pt;

        /* Get ourselves spare pages. */
        ptr = (vir_bytes) static_sparepages;
        ptr += I386_PAGE_SIZE - (ptr % I386_PAGE_SIZE);
        if(!(sparepages_mem = ptr))
		panic("pt_init: aalloc for spare failed");
        if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem,
                I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK)
                panic("pt_init: sys_umap failed: %d", r);

        missing_spares = 0;
        assert(STATIC_SPAREPAGES < SPAREPAGES);
        for(s = 0; s < SPAREPAGES; s++) {
        	if(s >= STATIC_SPAREPAGES) {
        		sparepages[s].page = NULL;
        		missing_spares++;
        		continue;
        	}
        	sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE);
        	sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE;
        }

	/* global bit and 4MB pages available? */
	global_bit_ok = _cpufeature(_CPUF_I386_PGE);
	bigpage_ok = _cpufeature(_CPUF_I386_PSE);

	/* Set bit for PTE's and PDE's if available. */
	if(global_bit_ok)
		global_bit = I386_VM_GLOBAL;

	/* The kernel and boot time processes need an identity mapping.
	 * We use full PDE's for this without separate page tables.
	 * Figure out which pde we can start using for other purposes.
	 */
	id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE;

	/* We have to make mappings up till here. */
	free_pde = id_map_high_pde+1;

        /* Initial (current) range of our virtual address space. */
        lo = CLICK2ABS(vmprocess->vm_arch.vm_seg[T].mem_phys);
        hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys +
                vmprocess->vm_arch.vm_seg[S].mem_len);
                  
        assert(!(lo % I386_PAGE_SIZE)); 
        assert(!(hi % I386_PAGE_SIZE));
 
        if(lo < VM_PROCSTART) {
                moveup = VM_PROCSTART - lo;
                assert(!(VM_PROCSTART % I386_PAGE_SIZE));
                assert(!(lo % I386_PAGE_SIZE));
                assert(!(moveup % I386_PAGE_SIZE));
        }
        
        /* Make new page table for ourselves, partly copied
         * from the current one.
         */     
        if(pt_new(newpt) != OK)
                panic("pt_init: pt_new failed"); 

        /* Set up mappings for VM process. */
        for(v = lo; v < hi; v += I386_PAGE_SIZE)  {
                phys_bytes addr;
                u32_t flags; 
        
                /* We have to write the new position in the PT,
                 * so we can move our segments.
                 */ 
                if(pt_writemap(vmprocess, newpt, v+moveup, v, I386_PAGE_SIZE,
                        I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
                        panic("pt_init: pt_writemap failed");
        }
       
        /* Move segments up too. */
        vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
       
	/* Allocate us a page table in which to remember page directory
	 * pointers.
	 */
	if(!(page_directories = vm_allocpage(&page_directories_phys,
		VMP_PAGETABLE)))
                panic("no virt addr for vm mappings");

	memset(page_directories, 0, I386_PAGE_SIZE);
       
        /* Increase our hardware data segment to create virtual address
         * space above our stack. We want to increase it to VM_DATATOP,
         * like regular processes have.
         */
        extra_clicks = ABS2CLICK(VM_DATATOP - hi);
        vmprocess->vm_arch.vm_seg[S].mem_len += extra_clicks;
       
        /* We pretend to the kernel we have a huge stack segment to
         * increase our data segment.
         */
        vmprocess->vm_arch.vm_data_top =
                (vmprocess->vm_arch.vm_seg[S].mem_vir +
                vmprocess->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
       
        /* Where our free virtual address space starts.
         * This is only a hint to the VM system.
         */
        newpt->pt_virtop = 0;

        /* Let other functions know VM now has a private page table. */
        vmprocess->vm_flags |= VMF_HASPT;

	/* Now reserve another pde for kernel's own mappings. */
	{
		int kernmap_pde;
		phys_bytes addr, len;
		int flags, index = 0;
		u32_t offset = 0;

		kernmap_pde = free_pde++;
		offset = kernmap_pde * I386_BIG_PAGE_SIZE;

		while(sys_vmctl_get_mapping(index, &addr, &len,
			&flags) == OK)  {
			vir_bytes vir;
			if(index >= MAX_KERNMAPPINGS)
                		panic("VM: too many kernel mappings: %d", index);
			kern_mappings[index].phys_addr = addr;
			kern_mappings[index].len = len;
			kern_mappings[index].flags = flags;
			kern_mappings[index].lin_addr = offset;
			kern_mappings[index].flags =
				I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
				global_bit;
			if(flags & VMMF_UNCACHED)
				kern_mappings[index].flags |= PTF_NOCACHE;
			if(addr % I386_PAGE_SIZE)
                		panic("VM: addr unaligned: %d", addr);
			if(len % I386_PAGE_SIZE)
                		panic("VM: len unaligned: %d", len);
			vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset);
			if(sys_vmctl_reply_mapping(index, vir) != OK)
                		panic("VM: reply failed");
			offset += len;
			index++;
			kernmappings++;
		}
	}

	/* Find a PDE below processes available for mapping in the
	 * page directories (readonly).
	 */
	pagedir_pde = free_pde++;
	pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
			I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;

	/* Tell kernel about free pde's. */
	while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
		if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
			panic("VMCTL_I386_FREEPDE failed: %d", r);
		}
	}

	/* first pde in use by process. */
	proc_pde = free_pde;

        /* Give our process the new, copied, private page table. */
	pt_mapkernel(newpt);	/* didn't know about vm_dir pages earlier */
        pt_bind(newpt, vmprocess);
       
	/* new segment limit for the kernel after paging is enabled */
	ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
	/* the memory map which must be installed after paging is enabled */
	ep_data.mem_map = vmprocess->vm_arch.vm_seg;

	/* Now actually enable paging. */
	if(sys_vmctl_enable_paging(&ep_data) != OK)
        	panic("pt_init: enable paging failed");

        /* Back to reality - this is where the stack actually is. */
        vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks;

        /* Pretend VM stack top is the same as any regular process, not to
         * have discrepancies with new VM instances later on.
         */
        vmprocess->vm_stacktop = VM_STACKTOP;

        /* All OK. */
        return;
}
Пример #16
0
/*===========================================================================*
 *				pt_writemap		     		     *
 *===========================================================================*/
int pt_writemap(struct vmproc * vmp,
                pt_t *pt,
                vir_bytes v,
                phys_bytes physaddr,
                size_t bytes,
                u32_t flags,
                u32_t writemapflags)
{
    /* Write mapping into page table. Allocate a new page table if necessary. */
    /* Page directory and table entries for this virtual address. */
    int p, pages;
    int verify = 0;
    int ret = OK;

#ifdef CONFIG_SMP
    int vminhibit_clear = 0;
    /* FIXME
     * don't do it everytime, stop the process only on the first change and
     * resume the execution on the last change. Do in a wrapper of this
     * function
     */
    if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
            !(vmp->vm_flags & VMF_EXITING)) {
        sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0);
        vminhibit_clear = 1;
    }
#endif

    if(writemapflags & WMF_VERIFY)
        verify = 1;

    assert(!(bytes % VM_PAGE_SIZE));
    assert(!(flags & ~(PTF_ALLFLAGS)));

    pages = bytes / VM_PAGE_SIZE;

    /* MAP_NONE means to clear the mapping. It doesn't matter
     * what's actually written into the PTE if PRESENT
     * isn't on, so we can just write MAP_NONE into it.
     */
    assert(physaddr == MAP_NONE || (flags & ARCH_VM_PTE_PRESENT));
    assert(physaddr != MAP_NONE || !flags);

    /* First make sure all the necessary page tables are allocated,
     * before we start writing in any of them, because it's a pain
     * to undo our work properly.
     */
    ret = pt_ptalloc_in_range(pt, v, v + VM_PAGE_SIZE*pages, flags, verify);
    if(ret != OK) {
        printf("VM: writemap: pt_ptalloc_in_range failed\n");
        goto resume_exit;
    }

    /* Now write in them. */
    for(p = 0; p < pages; p++) {
        u32_t entry;
        int pde = ARCH_VM_PDE(v);
        int pte = ARCH_VM_PTE(v);

        assert(!(v % VM_PAGE_SIZE));
        assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
        assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);

        /* Page table has to be there. */
        assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);

        /* We do not expect it to be a bigpage. */
        assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));

        /* Make sure page directory entry for this page table
         * is marked present and page table entry is available.
         */
        assert(pt->pt_pt[pde]);

#if SANITYCHECKS
        /* We don't expect to overwrite a page. */
        if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
            assert(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT));
#endif
        if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
#if defined(__i386__)
            physaddr = pt->pt_pt[pde][pte] & ARCH_VM_ADDR_MASK;
#elif defined(__arm__)
            physaddr = pt->pt_pt[pde][pte] & ARM_VM_PTE_MASK;
#endif
        }

        if(writemapflags & WMF_FREE) {
            free_mem(ABS2CLICK(physaddr), 1);
        }

        /* Entry we will write. */
#if defined(__i386__)
        entry = (physaddr & ARCH_VM_ADDR_MASK) | flags;
#elif defined(__arm__)
        entry = (physaddr & ARM_VM_PTE_MASK) | flags;
#endif

        if(verify) {
            u32_t maskedentry;
            maskedentry = pt->pt_pt[pde][pte];
#if defined(__i386__)
            maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
#endif
            /* Verify pagetable entry. */
#if defined(__i386__)
            if(entry & ARCH_VM_PTE_RW) {
                /* If we expect a writable page, allow a readonly page. */
                maskedentry |= ARCH_VM_PTE_RW;
            }
#elif defined(__arm__)
            if(!(entry & ARCH_VM_PTE_RO)) {
                /* If we expect a writable page, allow a readonly page. */
                maskedentry &= ~ARCH_VM_PTE_RO;
            }
            maskedentry &= ~(ARM_VM_PTE_WB|ARM_VM_PTE_WT);
#endif
            if(maskedentry != entry) {
                printf("pt_writemap: mismatch: ");
#if defined(__i386__)
                if((entry & ARCH_VM_ADDR_MASK) !=
                        (maskedentry & ARCH_VM_ADDR_MASK)) {
#elif defined(__arm__)
                if((entry & ARM_VM_PTE_MASK) !=
                        (maskedentry & ARM_VM_PTE_MASK)) {
#endif
                    printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ",
                           (long)entry, (long)maskedentry);
                } else printf("phys ok; ");
                printf(" flags: found %s; ",
                       ptestr(pt->pt_pt[pde][pte]));
                printf(" masked %s; ",
                       ptestr(maskedentry));
                printf(" expected %s\n", ptestr(entry));
                printf("found 0x%x, wanted 0x%x\n",
                       pt->pt_pt[pde][pte], entry);
                ret = EFAULT;
                goto resume_exit;
            }
        } else {
            /* Write pagetable entry. */
            pt->pt_pt[pde][pte] = entry;
        }

        physaddr += VM_PAGE_SIZE;
        v += VM_PAGE_SIZE;
    }

resume_exit:

#ifdef CONFIG_SMP
    if (vminhibit_clear) {
        assert(vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
               !(vmp->vm_flags & VMF_EXITING));
        sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0);
    }
#endif

    return ret;
}

/*===========================================================================*
 *				pt_checkrange		     		     *
 *===========================================================================*/
int pt_checkrange(pt_t *pt, vir_bytes v,  size_t bytes,
                  int write)
{
    int p, pages;

    assert(!(bytes % VM_PAGE_SIZE));

    pages = bytes / VM_PAGE_SIZE;

    for(p = 0; p < pages; p++) {
        int pde = ARCH_VM_PDE(v);
        int pte = ARCH_VM_PTE(v);

        assert(!(v % VM_PAGE_SIZE));
        assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
        assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);

        /* Page table has to be there. */
        if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT))
            return EFAULT;

        /* Make sure page directory entry for this page table
         * is marked present and page table entry is available.
         */
        assert((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && pt->pt_pt[pde]);

        if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
            return EFAULT;
        }

#if defined(__i386__)
        if(write && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
#elif defined(__arm__)
        if(write && (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
#endif
            return EFAULT;
        }

        v += VM_PAGE_SIZE;
    }

    return OK;
}

/*===========================================================================*
 *				pt_new			     		     *
 *===========================================================================*/
int pt_new(pt_t *pt)
{
    /* Allocate a pagetable root. Allocate a page-aligned page directory
     * and set them to 0 (indicating no page tables are allocated). Lookup
     * its physical address as we'll need that in the future. Verify it's
     * page-aligned.
     */
    int i, r;

    /* Don't ever re-allocate/re-move a certain process slot's
     * page directory once it's been created. This is a fraction
     * faster, but also avoids having to invalidate the page
     * mappings from in-kernel page tables pointing to
     * the page directories (the page_directories data).
     */
    if(!pt->pt_dir &&
            !(pt->pt_dir = vm_allocpages((phys_bytes *)&pt->pt_dir_phys,
                                         VMP_PAGEDIR, ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE))) {
        return ENOMEM;
    }

    assert(!((u32_t)pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));

    for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) {
        pt->pt_dir[i] = 0; /* invalid entry (PRESENT bit = 0) */
        pt->pt_pt[i] = NULL;
    }

    /* Where to start looking for free virtual address space? */
    pt->pt_virtop = 0;

    /* Map in kernel. */
    if((r=pt_mapkernel(pt)) != OK)
        return r;

    return OK;
}
Пример #17
0
/*===========================================================================*
 *				findhole		     		     *
 *===========================================================================*/
static u32_t findhole(int pages)
{
    /* Find a space in the virtual address space of VM. */
    u32_t curv;
    int pde = 0, try_restart;
    static u32_t lastv = 0;
    pt_t *pt = &vmprocess->vm_pt;
    vir_bytes vmin, vmax;
#if defined(__arm__)
    u32_t holev;
#endif

    vmin = (vir_bytes) (&_end); /* marks end of VM BSS */
    vmin += 1024*1024*1024;	/* reserve 1GB virtual address space for VM heap */
    vmin &= ARCH_VM_ADDR_MASK;
    vmax = VM_STACKTOP;

    /* Input sanity check. */
    assert(vmin + VM_PAGE_SIZE >= vmin);
    assert(vmax >= vmin + VM_PAGE_SIZE);
    assert((vmin % VM_PAGE_SIZE) == 0);
    assert((vmax % VM_PAGE_SIZE) == 0);
#if defined(__arm__)
    assert(pages > 0);
#endif

#if SANITYCHECKS
    curv = ((u32_t) random()) % ((vmax - vmin)/VM_PAGE_SIZE);
    curv *= VM_PAGE_SIZE;
    curv += vmin;
#else
    curv = lastv;
    if(curv < vmin || curv >= vmax)
        curv = vmin;
#endif
    try_restart = 1;

    /* Start looking for a free page starting at vmin. */
    while(curv < vmax) {
        int pte;
#if defined(__arm__)
        int i, nohole;
#endif

        assert(curv >= vmin);
        assert(curv < vmax);

#if defined(__i386__)
        pde = I386_VM_PDE(curv);
        pte = I386_VM_PTE(curv);
#elif defined(__arm__)
        holev = curv; /* the candidate hole */
        nohole = 0;
        for (i = 0; i < pages && !nohole; ++i) {
            if(curv >= vmax) {
                break;
            }
#endif

#if defined(__i386__)
        if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) ||
                !(pt->pt_pt[pde][pte] & ARCH_VM_PAGE_PRESENT)) {
#elif defined(__arm__)
        pde = ARM_VM_PDE(curv);
        pte = ARM_VM_PTE(curv);

        /* if page present, no hole */
        if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) &&
                (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT))
            nohole = 1;

        /* if not contiguous, no hole */
        if (curv != holev + i * VM_PAGE_SIZE)
            nohole = 1;

        curv+=VM_PAGE_SIZE;
    }

    /* there's a large enough hole */
    if (!nohole && i == pages) {
#endif
            lastv = curv;
#if defined(__i386__)
            return curv;
#elif defined(__arm__)
            return holev;
#endif
        }

#if defined(__i386__)
        curv+=VM_PAGE_SIZE;

#elif defined(__arm__)
        /* Reset curv */
#endif
        if(curv >= vmax && try_restart) {
            curv = vmin;
            try_restart = 0;
        }
    }

    printf("VM: out of virtual address space in vm\n");

    return NO_MEM;
}

/*===========================================================================*
 *				vm_freepages		     		     *
 *===========================================================================*/
void vm_freepages(vir_bytes vir, int pages)
{
    assert(!(vir % VM_PAGE_SIZE));

    if(is_staticaddr(vir)) {
        printf("VM: not freeing static page\n");
        return;
    }

    if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
                   MAP_NONE, pages*VM_PAGE_SIZE, 0,
                   WMF_OVERWRITE | WMF_FREE) != OK)
        panic("vm_freepages: pt_writemap failed");

    vm_self_pages--;

#if SANITYCHECKS
    /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
     * always trapped, also if not in tlb.
     */
    if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed");
    }
#endif
}