Exemplo n.º 1
0
/*===========================================================================*
 *				vm_pagelock		     		     *
 *===========================================================================*/
PUBLIC void vm_pagelock(void *vir, int lockflag)
{
    /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
    vir_bytes m;
    int r;
    u32_t flags = I386_VM_PRESENT | I386_VM_USER;
    pt_t *pt;

    pt = &vmp->vm_pt;
    m = arch_vir2map(vmp, (vir_bytes) vir);

    vm_assert(!(m % I386_PAGE_SIZE));

    if(!lockflag)
        flags |= I386_VM_WRITE;

    /* Update flags. */
    if((r=pt_writemap(pt, m, 0, I386_PAGE_SIZE,
                      flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
        vm_panic("vm_lockpage: pt_writemap failed\n", NO_NUM);
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        vm_panic("VMCTL_FLUSHTLB failed", r);
    }

    return;
}
Exemplo n.º 2
0
Arquivo: main.c Projeto: dev-zzo/mette
int main(int argc, char *argv[])
{
	int fd;
	vm_module_t *module;
	vm_context_t *context;

	argv++; argc--;

	if (argc == 0) {
		rtl_print_fd(2, "main: no file name provided.\n");
		return 1;
	}

	rtl_print_fd(2, "main: reading '%s'\n", argv[0]);
	
	fd = vm_open(argv[0], O_RDONLY, 0);
	if (fd < 0) {
		vm_panic("vm_load_exec: failed to open the image.");
	}
	
	module = vm_load_fd(fd);
	vm_close(fd);
	if (!module) {
		vm_panic("vm_load_exec: failed to load the image.");
	}

	context = vm_context_create(module);
	
	for (;;) {
		vm_step(context);
	}
	
	rtl_print_fd(1, "\n\nDone.\n");
	return 0;
}
Exemplo n.º 3
0
/*===========================================================================*
 *				pt_new			     		     *
 *===========================================================================*/
PUBLIC int pt_new(pt_t *pt)
{
    /* Allocate a pagetable root. On i386, allocate a page-aligned page directory
     * and set them to 0 (indicating no page tables are allocated). Lookup
     * its physical address as we'll need that in the future. Verify it's
     * page-aligned.
     */
    int i;

    /* Don't ever re-allocate/re-move a certain process slot's
     * page directory once it's been created. This is a fraction
     * faster, but also avoids having to invalidate the page
     * mappings from in-kernel page tables pointing to
     * the page directories (the page_directories data).
     */
    if(!pt->pt_dir &&
            !(pt->pt_dir = vm_allocpage(&pt->pt_dir_phys, VMP_PAGEDIR))) {
        return ENOMEM;
    }

    for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
        pt->pt_dir[i] = 0; /* invalid entry (I386_VM_PRESENT bit = 0) */
        pt->pt_pt[i] = NULL;
    }

    /* Where to start looking for free virtual address space? */
    pt->pt_virtop = 0;

    /* Map in kernel. */
    if(pt_mapkernel(pt) != OK)
        vm_panic("pt_new: pt_mapkernel failed", NO_NUM);

    return OK;
}
Exemplo n.º 4
0
/*===========================================================================*
 *				pt_sanitycheck		     		     *
 *===========================================================================*/
PUBLIC void pt_sanitycheck(pt_t *pt, char *file, int line)
{
    /* Basic pt sanity check. */
    int i;
    int slot;

    MYASSERT(pt);
    MYASSERT(pt->pt_dir);
    MYASSERT(pt->pt_dir_phys);

    for(slot = 0; slot < ELEMENTS(vmproc); slot++) {
        if(pt == &vmproc[slot].vm_pt)
            break;
    }

    if(slot >= ELEMENTS(vmproc)) {
        vm_panic("pt_sanitycheck: passed pt not in any proc", NO_NUM);
    }

    MYASSERT(usedpages_add(pt->pt_dir_phys, I386_PAGE_SIZE) == OK);

    for(i = proc_pde; i < I386_VM_DIR_ENTRIES; i++) {
        if(pt->pt_pt[i]) {
            if(!(pt->pt_dir[i] & I386_VM_PRESENT)) {
                printf("slot %d: pt->pt_pt[%d] = 0x%lx, but pt_dir entry 0x%lx\n",
                       slot, i, pt->pt_pt[i], pt->pt_dir[i]);
            }
            MYASSERT(pt->pt_dir[i] & I386_VM_PRESENT);
            MYASSERT(usedpages_add(I386_VM_PFA(pt->pt_dir[i]),
                                   I386_PAGE_SIZE) == OK);
        } else {
            MYASSERT(!(pt->pt_dir[i] & I386_VM_PRESENT));
        }
    }
}
Exemplo n.º 5
0
/*===========================================================================*
 *				do_push_sig				     *
 *===========================================================================*/
PUBLIC int do_push_sig(message *msg)
{
	int r, n;
	endpoint_t ep;
	vir_bytes sp;
	struct vmproc *vmp;

	ep = msg->VMPS_ENDPOINT;

	if((r=vm_isokendpt(ep, &n)) != OK) {
		printf("VM: bogus endpoint %d from %d\n", ep, msg->m_source);
		return r;
	}
	vmp = &vmproc[n];

        if ((r=get_stack_ptr(ep, &sp)) != OK)
                vm_panic("couldn't get new stack pointer (for sig)",r);
	
	/* Save old SP for caller */
	msg->VMPS_OLD_SP = (char *) sp;
   
        /* Make room for the sigcontext and sigframe struct. */
        sp -= sizeof(struct sigcontext)
                                 + 3 * sizeof(char *) + 2 * sizeof(int);

        if ((r=adjust(vmp, vmp->vm_arch.vm_seg[D].mem_len, sp)) != OK) {
		printf("VM: do_push_sig: adjust() failed: %d\n", r);
		return r;
	}

	return OK;
}
Exemplo n.º 6
0
/*===========================================================================*
 *				do_vfs_reply			     	*
 *===========================================================================*/
PUBLIC int do_vfs_reply(message *m)
{
/* Reply to a request has been received from vfs. Handle it. First verify
 * and look up which process, identified by endpoint, this is about.
 * Then call the callback function that was registered when the request
 * was done. Return result to vfs.
 */
	endpoint_t ep;
	struct vmproc *vmp;
	int procno;
	callback_t cb;
	ep = m->VMV_ENDPOINT;
	if(vm_isokendpt(ep, &procno) != OK) {
		printf("VM:do_vfs_reply: reply %d about invalid endpoint %d\n",
			m->m_type, ep);
		vm_panic("do_vfs_reply: invalid endpoint from vfs", NO_NUM);
	}
	vmp = &vmproc[procno];
	if(!vmp->vm_callback) {
		printf("VM:do_vfs_reply: reply %d: endpoint %d not waiting\n",
			m->m_type, ep);
		vm_panic("do_vfs_reply: invalid endpoint from vfs", NO_NUM);
	}
	if(vmp->vm_callback_type != m->m_type) {
		printf("VM:do_vfs_reply: reply %d unexpected for endpoint %d\n"
		  " (expecting %d)\n", m->m_type, ep, vmp->vm_callback_type);
		vm_panic("do_vfs_reply: invalid reply from vfs", NO_NUM);
	}
	if(vmp->vm_flags & VMF_EXITING) {
		/* This is not fatal or impossible, but the callback
		 * function has to realize it shouldn't do any PM or
		 * VFS calls for this process.
		 */
		printf("VM:do_vfs_reply: reply %d for EXITING endpoint %d\n",
		  m->m_type, ep);
	}

	/* All desired callback state has been used, so save and reset
	 * the callback. This allows the callback to register another
	 * one.
	 */
	cb = vmp->vm_callback;
	vmp->vm_callback = NULL;
	cb(vmp, m);
	return SUSPEND;
}
Exemplo n.º 7
0
int scall_munmap(kipc_msg_t *m)
{
	int r, n;
	struct vmproc *vmp;
	vir_bytes addr, len;
	struct vir_region *vr;

	if((r = vm_isokendpt(m->m_source, &n)) != 0) {
		vm_panic("do_mmap: message from strange source", m->m_source);
	}

	vmp = &vmproc[n];

	if (!(vmp->vm_flags & VMF_HASPT))
		return -ENXIO;

	if (m->m_type == NNR_VM_MUNMAP) {
		addr = (vir_bytes) arch_vir2map(vmp, (vir_bytes) m->VMUM_ADDR);
	} else if(m->m_type == NNR_VM_MUNMAP_TEXT) {
		addr = (vir_bytes) arch_vir2map_text(vmp, (vir_bytes) m->VMUM_ADDR);
	} else {
		vm_panic("do_munmap: strange type", NO_NUM);
	}

	if (!(vr = map_lookup(vmp, addr))) {
		printk("VM: unmap: virtual address 0x%lx not found in %d\n",
			m->VMUM_ADDR, vmp->vm_endpoint);
		return -EFAULT;
	}

	len = m->VMUM_LEN;
	if (len % VM_PAGE_SIZE)
		len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);

	if (addr != vr->vaddr || len > vr->length || len < VM_PAGE_SIZE) {
		return -EFAULT;
	}

	if (map_unmap_region(vmp, vr, len) != 0)
		vm_panic("do_munmap: map_unmap_region failed", NO_NUM);

	return 0;
}
Exemplo n.º 8
0
/*===========================================================================*
 *				aalloc			     		     *
 *===========================================================================*/
PRIVATE void *aalloc(size_t bytes)
{
    /* Page-aligned malloc(). only used if vm_allocpage can't be used.  */
    u32_t b;

    b = (u32_t) malloc(I386_PAGE_SIZE + bytes);
    if(!b) vm_panic("aalloc: out of memory", bytes);
    b += I386_PAGE_SIZE - (b % I386_PAGE_SIZE);

    return (void *) b;
}
Exemplo n.º 9
0
/*===========================================================================*
 *				register_callback			     *
 *===========================================================================*/
PRIVATE void register_callback(struct vmproc *for_who, callback_t callback,
	int callback_type)
{
	if(for_who->vm_callback) {
		vm_panic("register_callback: callback already registered",
			for_who->vm_callback_type);
	}
	for_who->vm_callback = callback;
	for_who->vm_callback_type = callback_type;

	return;
}
Exemplo n.º 10
0
/*===========================================================================*
 *				do_mmap			     		     *
 *===========================================================================*/
int do_mmap(kipc_msg_t *m)
{
	int r, n;
	struct vmproc *vmp;
	int mfflags = 0;
	struct vir_region *vr = NULL;

	if((r=vm_isokendpt(m->m_source, &n)) != 0) {
		vm_panic("do_mmap: message from strange source", m->m_source);
	}

	vmp = &vmproc[n];

	if(!(vmp->vm_flags & VMF_HASPT))
		return -ENXIO;

	if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANONYMOUS)) {
		int s;
		vir_bytes v;
		u32_t vrflags = VR_ANON | VR_WRITABLE;
		size_t len = (vir_bytes) m->VMM_LEN;

		if(m->VMM_FD != -1) {
			return -EINVAL;
		}

		if(m->VMM_FLAGS & MAP_CONTIG) mfflags |= MF_CONTIG;
		if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC;
		if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
		if(m->VMM_FLAGS & MAP_LOWER1M)  vrflags |= VR_LOWER1MB;
		if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
		if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED;

		if(len % VM_PAGE_SIZE)
			len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);

		if(!(vr = map_page_region(vmp,
			arch_vir2map(vmp,
				m->VMM_ADDR ? m->VMM_ADDR : vmp->vm_stacktop),
			VM_DATATOP, len, MAP_NONE, vrflags, mfflags))) {
			return -ENOMEM;
		}
	} else {
		return -ENOSYS;
	}

	/* Return mapping, as seen from process. */
	vm_assert(vr);
	m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr);


	return 0;
}
Exemplo n.º 11
0
/*===========================================================================*
 *				pt_mapkernel		     		     *
 *===========================================================================*/
PUBLIC int pt_mapkernel(pt_t *pt)
{
    int r, i;

    /* Any i386 page table needs to map in the kernel address space. */
    vm_assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);

    if(bigpage_ok) {
        int pde;
        for(pde = 0; pde <= id_map_high_pde; pde++) {
            phys_bytes addr;
            addr = pde * I386_BIG_PAGE_SIZE;
            vm_assert((addr & I386_VM_ADDR_MASK) == addr);
            pt->pt_dir[pde] = addr | I386_VM_PRESENT |
                              I386_VM_BIGPAGE | I386_VM_USER |
                              I386_VM_WRITE | global_bit;
        }
    } else {
        vm_panic("VM: pt_mapkernel: no bigpage", NO_NUM);
    }

    if(pagedir_pde >= 0) {
        /* Kernel also wants to know about all page directories. */
        pt->pt_dir[pagedir_pde] = pagedir_pde_val;
    }

    for(i = 0; i < kernmappings; i++) {
        if(pt_writemap(pt,
                       kern_mappings[i].lin_addr,
                       kern_mappings[i].phys_addr,
                       kern_mappings[i].len,
                       kern_mappings[i].flags, 0) != OK) {
            vm_panic("pt_mapkernel: pt_writemap failed", NO_NUM);
        }
    }

    return OK;
}
Exemplo n.º 12
0
vm_closure* vm_new_closure(vm_context *ctx, vm_descriptor *desc)
{
    vm_closure* closure;
    size_t count = vm_arraybuffer_count(desc->upcopy, uint16_t);

    if (count > 0)
    {
        vm_panic(ctx);
    }

    closure = gc_new(ctx, sizeof(vm_closure) + sizeof(vm_upval*)*count, vm_t_closure, interface_stub);
    closure->desc  = desc;
    closure->count = count;
    return closure;
}
Exemplo n.º 13
0
/*===========================================================================*
 *				vm_freepages		     		     *
 *===========================================================================*/
PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason)
{
    vm_assert(reason >= 0 && reason < VMP_CATEGORIES);
    if(vir >= vmp->vm_stacktop) {
        vm_assert(!(vir % I386_PAGE_SIZE));
        vm_assert(!(phys % I386_PAGE_SIZE));
        FREE_MEM(ABS2CLICK(phys), pages);
        if(pt_writemap(&vmp->vm_pt, arch_vir2map(vmp, vir),
                       MAP_NONE, pages*I386_PAGE_SIZE, 0, WMF_OVERWRITE) != OK)
            vm_panic("vm_freepages: pt_writemap failed",
                     NO_NUM);
    } else {
        printf("VM: vm_freepages not freeing VM heap pages (%d)\n",
               pages);
    }
}
Exemplo n.º 14
0
/*===========================================================================*
 *				vfs_close				     *
 *===========================================================================*/
PUBLIC int vfs_close(struct vmproc *for_who, callback_t callback, int fd)
{
	static message m;
	int r;

	register_callback(for_who, callback, VM_VFS_REPLY_CLOSE);

	m.m_type = VM_VFS_CLOSE;
	m.VMVC_ENDPOINT = for_who->vm_endpoint;
	m.VMVC_FD = fd;

	if((r=asynsend(VFS_PROC_NR, &m)) != OK) {
		vm_panic("vfs_close: asynsend failed", r);
	}

	return r;
}
Exemplo n.º 15
0
vm_context_t *vm_context_create(vm_module_t *module)
{
	vm_context_t *ctx;

	ctx = (vm_context_t *)vm_alloc(sizeof(*ctx));
	if (!ctx) {
		vm_panic("vm_context_create: failed to allocate context.");
	}

	ctx->dstack = vm_stack_create(65536);
	ctx->cstack = vm_stack_create(8192);	
	ctx->pc = module->entry;
	ctx->locals = NULL;
	ctx->module = module;

	return ctx;
}
Exemplo n.º 16
0
/*===========================================================================*
 *				vfs_open				     *
 *===========================================================================*/
PUBLIC int vfs_open(struct vmproc *for_who, callback_t callback,
	cp_grant_id_t filename_gid, int filename_len, int flags, int mode)
{
	static message m;
	int r;

	register_callback(for_who, callback, VM_VFS_REPLY_OPEN);

	m.m_type = VM_VFS_OPEN;
	m.VMVO_NAME_GRANT = filename_gid;
	m.VMVO_NAME_LENGTH = filename_len;
	m.VMVO_FLAGS = flags;
	m.VMVO_MODE = mode;
	m.VMVO_ENDPOINT = for_who->vm_endpoint;

	if((r=asynsend(VFS_PROC_NR, &m)) != OK) {
		vm_panic("vfs_open: asynsend failed", r);
	}

	return r;
}
Exemplo n.º 17
0
/*===========================================================================*
 *				do_shared_unmap		     		     *
 *===========================================================================*/
int do_shared_unmap(kipc_msg_t *m)
{
	int r, n;
	struct vmproc *vmp;
	endpoint_t target;
	struct vir_region *vr;
	vir_bytes addr;

	target = m->VMUN_ENDPT;

	if ((r = vm_isokendpt(target, &n)) != 0)
		return -EINVAL;

	vmp = &vmproc[n];

	addr = arch_vir2map(vmp, m->VMUN_ADDR);

	if(!(vr = map_lookup(vmp, addr))) {
		printk("VM: addr 0x%lx not found.\n", m->VMUN_ADDR);
		return -EFAULT;
	}

	if(vr->vaddr != addr) {
		printk("VM: wrong address for shared_unmap.\n");
		return -EFAULT;
	}

	if(!(vr->flags & VR_SHARED)) {
		printk("VM: address does not point to shared region.\n");
		return -EFAULT;
	}

	if(map_unmap_region(vmp, vr, vr->length) != 0)
		vm_panic("do_shared_unmap: map_unmap_region failed", NO_NUM);

	return 0;
}
Exemplo n.º 18
0
/*===========================================================================*
 *				vm_acl_ok				     *
 *===========================================================================*/
static int vm_acl_ok(endpoint_t caller, int call)
{
    int n, r;

    /* Some calls are always allowed by some, or all, processes. */
    if(EPMOK(vm_calls[call].vmc_callers, caller)) {
        return 0;
    }

    if ((r = vm_isokendpt(caller, &n)) != 0)
        vm_panic("VM: from strange source.", caller);

    /* Other calls need an ACL bit. */
    if (!(vm_calls[call].vmc_callers & EPM(NEEDACL))) {
        return -EPERM;
    }
    if (!GET_BIT(vmproc[n].vm_call_priv_mask, call)) {
        printk("VM: no ACL for %s for %d\n",
               vm_calls[call].vmc_name, caller);
        return -EPERM;
    }

    return 0;
}
Exemplo n.º 19
0
/*===========================================================================*
 *				sef_cb_init_fresh			     *
 *===========================================================================*/
PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
{
/* Initialize the vm server. */
	int s, i;
	int click, clicksforgotten = 0;
	struct memory mem_chunks[NR_MEMS];
	struct boot_image image[NR_BOOT_PROCS];
	struct boot_image *ip;
	struct rprocpub rprocpub[NR_BOOT_PROCS];
	phys_bytes limit = 0;

#if SANITYCHECKS
	incheck = nocheck = 0;
	FIXME("VM SANITYCHECKS are on");
#endif

	vm_paged = 1;
	env_parse("vm_paged", "d", 0, &vm_paged, 0, 1);
#if SANITYCHECKS
	env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel, 0, SCL_MAX);
#endif

	/* Get chunks of available memory. */
	get_mem_chunks(mem_chunks);

	/* Initialize VM's process table. Request a copy of the system
	 * image table that is defined at the kernel level to see which
	 * slots to fill in.
	 */
	if (OK != (s=sys_getimage(image)))
		vm_panic("couldn't get image table: %d\n", s);

	/* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
	memset(vmproc, 0, sizeof(vmproc));

	for(i = 0; i < ELEMENTS(vmproc); i++) {
		vmproc[i].vm_slot = i;
	}

	/* Walk through boot-time system processes that are alive
	 * now and make valid slot entries for them.
	 */
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
		phys_bytes proclimit;
		struct vmproc *vmp;

		if(ip->proc_nr >= _NR_PROCS) { vm_panic("proc", ip->proc_nr); }
		if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;

#define GETVMP(v, nr)						\
		if(nr >= 0) {					\
			vmp = &vmproc[ip->proc_nr];		\
		} else if(nr == SYSTEM) {			\
			vmp = &vmproc[VMP_SYSTEM];		\
		} else {					\
			vm_panic("init: crazy proc_nr", nr);	\
		}

		/* Initialize normal process table slot or special SYSTEM
		 * table slot. Kernel memory is already reserved.
		 */
		GETVMP(vmp, ip->proc_nr);

		/* reset fields as if exited */
		clear_proc(vmp);

		/* Get memory map for this process from the kernel. */
		if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != OK)
			vm_panic("couldn't get process mem_map",s);

		/* Remove this memory from the free list. */
		reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);

		/* Set memory limit. */
		proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
			vmp->vm_arch.vm_seg[S].mem_len) - 1;

		if(proclimit > limit)
			limit = proclimit;

		vmp->vm_flags = VMF_INUSE;
		vmp->vm_endpoint = ip->endpoint;
		vmp->vm_stacktop =
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len);

		if (vmp->vm_arch.vm_seg[T].mem_len != 0)
			vmp->vm_flags |= VMF_SEPARATE;
	}

	/* Architecture-dependent initialization. */
	pt_init(limit);

	/* Initialize tables to all physical memory. */
	mem_init(mem_chunks);
	meminit_done = 1;

	/* Give these processes their own page table. */
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
		int s;
		struct vmproc *vmp;
		vir_bytes old_stacktop, old_stack;

		if(ip->proc_nr < 0) continue;

		GETVMP(vmp, ip->proc_nr);

               if(!(ip->flags & PROC_FULLVM))
                       continue;

		old_stack = 
			vmp->vm_arch.vm_seg[S].mem_vir +
			vmp->vm_arch.vm_seg[S].mem_len - 
			vmp->vm_arch.vm_seg[D].mem_len;

        	if(pt_new(&vmp->vm_pt) != OK)
			vm_panic("VM: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
		old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len);
		if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
			VM_STACKTOP - old_stacktop) != OK) {
			vm_panic("VM: vmctl for new stack failed", NO_NUM);
		}

		FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
			vmp->vm_arch.vm_seg[D].mem_len,
			old_stack);

		if(proc_new(vmp,
			VM_PROCSTART,
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
			BASICSTACK,
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len -
				vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
				VM_STACKTOP) != OK) {
			vm_panic("failed proc_new for boot process", NO_NUM);
		}
	}

	/* Set up table of calls. */
#define CALLMAP(code, func) { int i;			      \
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
	if(i >= NR_VM_CALLS) { vm_panic(#code " invalid", (code)); } \
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
}

	/* Set call table to 0. This invalidates all calls (clear
	 * vmc_func).
	 */
	memset(vm_calls, 0, sizeof(vm_calls));

	/* Basic VM calls. */
	CALLMAP(VM_MMAP, do_mmap);
	CALLMAP(VM_MUNMAP, do_munmap);
	CALLMAP(VM_MUNMAP_TEXT, do_munmap);
	CALLMAP(VM_MAP_PHYS, do_map_phys);
	CALLMAP(VM_UNMAP_PHYS, do_unmap_phys);

	/* Calls from PM. */
	CALLMAP(VM_EXIT, do_exit);
	CALLMAP(VM_FORK, do_fork);
	CALLMAP(VM_BRK, do_brk);
	CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem);
	CALLMAP(VM_PUSH_SIG, do_push_sig);
	CALLMAP(VM_WILLEXIT, do_willexit);
	CALLMAP(VM_ADDDMA, do_adddma);
	CALLMAP(VM_DELDMA, do_deldma);
	CALLMAP(VM_GETDMA, do_getdma);
	CALLMAP(VM_NOTIFY_SIG, do_notify_sig);

	/* Calls from RS */
	CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv);

	/* Generic calls. */
	CALLMAP(VM_REMAP, do_remap);
	CALLMAP(VM_GETPHYS, do_get_phys);
	CALLMAP(VM_SHM_UNMAP, do_shared_unmap);
	CALLMAP(VM_GETREF, do_get_refcount);
	CALLMAP(VM_INFO, do_info);
	CALLMAP(VM_QUERY_EXIT, do_query_exit);

	/* Sanity checks */
	if(find_kernel_top() >= VM_PROCSTART)
		vm_panic("kernel loaded too high", NO_NUM);

	/* Initialize the structures for queryexit */
	init_query_exit();

	/* Unmap our own low pages. */
	unmap_ok = 1;
	_minix_unmapzero();

	/* Map all the services in the boot image. */
	if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
		(vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK) {
		panic("VM", "sys_safecopyfrom failed", s);
	}
	for(i=0;i < NR_BOOT_PROCS;i++) {
		if(rprocpub[i].in_use) {
			if((s = map_service(&rprocpub[i])) != OK) {
				vm_panic("unable to map service", s);
			}
		}
	}

	return(OK);
}
Exemplo n.º 20
0
/*===========================================================================*
 *				vm_init					     *
 *===========================================================================*/
static void vm_init(void)
{
    int s, i;
    int click, clicksforgotten = 0;
    struct memory mem_chunks[NR_MEMS];
    struct boot_image image[NR_BOOT_PROCS];
    struct boot_image *ip;
    phys_bytes limit = 0;

    /* The initrd is put right after boot image */
    if ((s = sys_getbootparam(&bootparam)) != 0) {
        panic("VM","Couldn't get boot parameters!",s);
    }

    /* get what setup found out */
    memcpy(mem_chunks, bootparam.nucleos_kludge.mem, sizeof(bootparam.nucleos_kludge.mem));

    /* Get chunks of available memory. */
    get_mem_chunks(mem_chunks);

    /* Initialize VM's process table. Request a copy of the system
     * image table that is defined at the kernel level to see which
     * slots to fill in.
     */
    if ((s=sys_getimage(image)) != 0)
        vm_panic("couldn't get image table: %d\n", s);

    /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
    memset(vmproc, 0, sizeof(vmproc));

    for(i = 0; i < ELEMENTS(vmproc); i++) {
        vmproc[i].vm_slot = i;
    }

    /* Walk through boot-time system processes that are alive
     * now and make valid slot entries for them.
     */
    for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
        phys_bytes proclimit;
        struct vmproc *vmp;

        if(ip->proc_nr >= NR_PROCS) {
            vm_panic("proc", ip->proc_nr);
        }
        if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;

#define GETVMP(v, nr)						\
		if(nr >= 0) {					\
			vmp = &vmproc[ip->proc_nr];		\
		} else if(nr == SYSTEM) {			\
			vmp = &vmproc[VMP_SYSTEM];		\
		} else {					\
			vm_panic("init: crazy proc_nr", nr);	\
		}

        /* Initialize normal process table slot or special SYSTEM
         * table slot. Kernel memory is already reserved.
         */
        GETVMP(vmp, ip->proc_nr);

        /* reset fields as if exited */
        clear_proc(vmp);

        /* Get memory map for this process from the kernel. */
        if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != 0)
            vm_panic("couldn't get process mem_map",s);

        /* Remove this memory from the free list. */
        reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);

        /* Set memory limit. */
        proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
                              vmp->vm_arch.vm_seg[S].mem_len) - 1;

        if(proclimit > limit)
            limit = proclimit;

        vmp->vm_flags = VMF_INUSE;
        vmp->vm_endpoint = ip->endpoint;
        vmp->vm_stacktop =
            CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                      vmp->vm_arch.vm_seg[S].mem_len);
    }

#ifndef CONFIG_BUILTIN_INITRD
    /* Remove initrd memory from the free list. We must do it right after we
       have reserved memory for boot image otherwise it may happen that initrd
       will be overwritten by other process (in arch_init_vm).
     */
    if ((s = reserve_initrd_mem(mem_chunks, bootparam.hdr.ramdisk_image,
                                bootparam.hdr.ramdisk_size)) < 0) {
        panic("VM", "Couldn't reserve memory for initial ramdisk!", s);
    }
#endif
    /* Architecture-dependent initialization. */
    pt_init(limit);

    /* Initialize tables to all physical memory. */
    mem_init(mem_chunks);
    meminit_done = 1;

    /* Give these processes their own page table. */
    for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
        int s;
        struct vmproc *vmp;
        vir_bytes old_stacktop, old_stack;

        if(ip->proc_nr < 0) continue;

        GETVMP(vmp, ip->proc_nr);

        if(!(ip->flags & PROC_FULLVM))
            continue;

        old_stack =
            vmp->vm_arch.vm_seg[S].mem_vir +
            vmp->vm_arch.vm_seg[S].mem_len -
            vmp->vm_arch.vm_seg[D].mem_len;

        if(pt_new(&vmp->vm_pt) != 0)
            vm_panic("vm_init: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
        old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                                 vmp->vm_arch.vm_seg[S].mem_len);
        if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
                     VM_STACKTOP - old_stacktop) != 0) {
            vm_panic("VM: vmctl for new stack failed", NO_NUM);
        }

        FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
                 vmp->vm_arch.vm_seg[D].mem_len,
                 old_stack);

        if(proc_new(vmp,
                    VM_PROCSTART,
                    CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
                    CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
                    BASICSTACK,
                    CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                              vmp->vm_arch.vm_seg[S].mem_len -
                              vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
                    CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
                    CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
                    VM_STACKTOP) != 0) {
            vm_panic("failed proc_new for boot process", NO_NUM);
        }
    }

    /* Set up table of calls. */
#define CALLMAP(code, func, thecaller) { int i;			      \
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
	if(i >= VM_NCALLS) { vm_panic(#code " invalid", (code)); } \
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
	if(((thecaller) < MINEPM || (thecaller) > MAXEPM) 		\
		&& (thecaller) != ANYEPM				\
		&& (thecaller) != NEEDACL ) {				\
		vm_panic(#thecaller " invalid", (code));  		\
	}								\
	vm_calls[i].vmc_callers |= EPM(thecaller);		      \
}

    /* Set call table to 0. This invalidates all calls (clear
     * vmc_func).
     */
    memset(vm_calls, 0, sizeof(vm_calls));

    /* Requests from PM (restricted to be from PM only). */
    CALLMAP(VM_EXIT, do_exit, PM_PROC_NR);
    CALLMAP(VM_FORK, do_fork, PM_PROC_NR);
    CALLMAP(VM_BRK, do_brk, PM_PROC_NR);
    CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem, PM_PROC_NR);
    CALLMAP(VM_PUSH_SIG, do_push_sig, PM_PROC_NR);
    CALLMAP(VM_WILLEXIT, do_willexit, PM_PROC_NR);
    CALLMAP(VM_ADDDMA, do_adddma, PM_PROC_NR);
    CALLMAP(VM_DELDMA, do_deldma, PM_PROC_NR);
    CALLMAP(VM_GETDMA, do_getdma, PM_PROC_NR);
    CALLMAP(VM_ALLOCMEM, do_allocmem, PM_PROC_NR);
    CALLMAP(VM_NOTIFY_SIG, do_notify_sig, PM_PROC_NR);

    /* Requests from RS */
    CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv, RS_PROC_NR);

    /* Requests from userland (source unrestricted). */
    CALLMAP(VM_MMAP, do_mmap, ANYEPM);
    CALLMAP(VM_MUNMAP, do_munmap, ANYEPM);
    CALLMAP(VM_MUNMAP_TEXT, do_munmap, ANYEPM);
    CALLMAP(VM_MAP_PHYS, do_map_phys, ANYEPM); /* Does its own checking. */
    CALLMAP(VM_UNMAP_PHYS, do_unmap_phys, ANYEPM);

    CALLMAP(NNR_VM_MMAP, scall_mmap, ANYEPM);
    CALLMAP(NNR_VM_MUNMAP, scall_munmap, ANYEPM);
    CALLMAP(NNR_VM_MUNMAP_TEXT, scall_munmap, ANYEPM);

    /* Requests from userland (anyone can call but need an ACL bit). */
    CALLMAP(VM_REMAP, do_remap, NEEDACL);
    CALLMAP(VM_GETPHYS, do_get_phys, NEEDACL);
    CALLMAP(VM_SHM_UNMAP, do_shared_unmap, NEEDACL);
    CALLMAP(VM_GETREF, do_get_refcount, NEEDACL);
    CALLMAP(VM_CTL, do_ctl, NEEDACL);
    CALLMAP(VM_QUERY_EXIT, do_query_exit, NEEDACL);

    /* Sanity checks */
    if(find_kernel_top() >= VM_PROCSTART)
        vm_panic("kernel loaded too high", NO_NUM);

    /* Initialize the structures for queryexit */
    init_query_exit();

    /* Unmap our own low pages. */
    unmap_ok = 1;
    unmap_page_zero();
}
Exemplo n.º 21
0
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
PUBLIC void pt_init(phys_bytes usedlimit)
{
    /* By default, the kernel gives us a data segment with pre-allocated
     * memory that then can't grow. We want to be able to allocate memory
     * dynamically, however. So here we copy the part of the page table
     * that's ours, so we get a private page table. Then we increase the
     * hardware segment size so we can allocate memory above our stack.
     */
    pt_t *newpt;
    int s, r;
    vir_bytes v, kpagedir;
    phys_bytes lo, hi;
    vir_bytes extra_clicks;
    u32_t moveup = 0;
    int global_bit_ok = 0;
    int free_pde;
    int p;
    vir_bytes kernlimit;
    vir_bytes sparepages_mem;
    phys_bytes sparepages_ph;

    /* Shorthand. */
    newpt = &vmp->vm_pt;


    /* Get ourselves spare pages. */
    if(!(sparepages_mem = (vir_bytes) aalloc(I386_PAGE_SIZE*SPAREPAGES)))
        vm_panic("pt_init: aalloc for spare failed", NO_NUM);
    if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem,
                   I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK)
        vm_panic("pt_init: sys_umap failed", r);

    for(s = 0; s < SPAREPAGES; s++) {
        sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE);
        sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE;
    }

    missing_spares = 0;

    /* global bit and 4MB pages available? */
    global_bit_ok = _cpufeature(_CPUF_I386_PGE);
    bigpage_ok = _cpufeature(_CPUF_I386_PSE);

    /* Set bit for PTE's and PDE's if available. */
    if(global_bit_ok)
        global_bit = I386_VM_GLOBAL;

    /* The kernel and boot time processes need an identity mapping.
     * We use full PDE's for this without separate page tables.
     * Figure out which pde we can start using for other purposes.
     */
    id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE;

    /* We have to make mappings up till here. */
    free_pde = id_map_high_pde+1;

    /* Initial (current) range of our virtual address space. */
    lo = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
    hi = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
                   vmp->vm_arch.vm_seg[S].mem_len);

    vm_assert(!(lo % I386_PAGE_SIZE));
    vm_assert(!(hi % I386_PAGE_SIZE));

    if(lo < VM_PROCSTART) {
        moveup = VM_PROCSTART - lo;
        vm_assert(!(VM_PROCSTART % I386_PAGE_SIZE));
        vm_assert(!(lo % I386_PAGE_SIZE));
        vm_assert(!(moveup % I386_PAGE_SIZE));
    }

    /* Make new page table for ourselves, partly copied
     * from the current one.
     */
    if(pt_new(newpt) != OK)
        vm_panic("pt_init: pt_new failed", NO_NUM);

    /* Set up mappings for VM process. */
    for(v = lo; v < hi; v += I386_PAGE_SIZE)  {
        phys_bytes addr;
        u32_t flags;

        /* We have to write the new position in the PT,
         * so we can move our segments.
         */
        if(pt_writemap(newpt, v+moveup, v, I386_PAGE_SIZE,
                       I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
            vm_panic("pt_init: pt_writemap failed", NO_NUM);
    }

    /* Move segments up too. */
    vmp->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
    vmp->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
    vmp->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);

    /* Allocate us a page table in which to remember page directory
     * pointers.
     */
    if(!(page_directories = vm_allocpage(&page_directories_phys,
                                         VMP_PAGETABLE)))
        vm_panic("no virt addr for vm mappings", NO_NUM);

    memset(page_directories, 0, I386_PAGE_SIZE);

    /* Increase our hardware data segment to create virtual address
     * space above our stack. We want to increase it to VM_DATATOP,
     * like regular processes have.
     */
    extra_clicks = ABS2CLICK(VM_DATATOP - hi);
    vmp->vm_arch.vm_seg[S].mem_len += extra_clicks;

    /* We pretend to the kernel we have a huge stack segment to
     * increase our data segment.
     */
    vmp->vm_arch.vm_data_top =
        (vmp->vm_arch.vm_seg[S].mem_vir +
         vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;

    /* Where our free virtual address space starts.
     * This is only a hint to the VM system.
     */
    newpt->pt_virtop = 0;

    /* Let other functions know VM now has a private page table. */
    vmp->vm_flags |= VMF_HASPT;

    /* Now reserve another pde for kernel's own mappings. */
    {
        int kernmap_pde;
        phys_bytes addr, len;
        int flags, index = 0;
        u32_t offset = 0;

        kernmap_pde = free_pde++;
        offset = kernmap_pde * I386_BIG_PAGE_SIZE;

        while(sys_vmctl_get_mapping(index, &addr, &len,
                                    &flags) == OK)  {
            vir_bytes vir;
            if(index >= MAX_KERNMAPPINGS)
                vm_panic("VM: too many kernel mappings", index);
            kern_mappings[index].phys_addr = addr;
            kern_mappings[index].len = len;
            kern_mappings[index].flags = flags;
            kern_mappings[index].lin_addr = offset;
            kern_mappings[index].flags =
                I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
                global_bit;
            if(flags & VMMF_UNCACHED)
                kern_mappings[index].flags |=
                    I386_VM_PWT | I386_VM_PCD;
            if(addr % I386_PAGE_SIZE)
                vm_panic("VM: addr unaligned", addr);
            if(len % I386_PAGE_SIZE)
                vm_panic("VM: len unaligned", len);
            vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset);
            if(sys_vmctl_reply_mapping(index, vir) != OK)
                vm_panic("VM: reply failed", NO_NUM);
            offset += len;
            index++;
            kernmappings++;
        }
    }

    /* Find a PDE below processes available for mapping in the
     * page directories (readonly).
     */
    pagedir_pde = free_pde++;
    pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
                      I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;

    /* Tell kernel about free pde's. */
    while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
        if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
            vm_panic("VMCTL_I386_FREEPDE failed", r);
        }
    }

    /* first pde in use by process. */
    proc_pde = free_pde;

    kernlimit = free_pde*I386_BIG_PAGE_SIZE;

    /* Increase kernel segment to address this memory. */
    if((r=sys_vmctl(SELF, VMCTL_I386_KERNELLIMIT, kernlimit)) != OK) {
        vm_panic("VMCTL_I386_KERNELLIMIT failed", r);
    }

    kpagedir = arch_map2vir(&vmproc[VMP_SYSTEM],
                            pagedir_pde*I386_BIG_PAGE_SIZE);

    /* Tell kernel how to get at the page directories. */
    if((r=sys_vmctl(SELF, VMCTL_I386_PAGEDIRS, kpagedir)) != OK) {
        vm_panic("VMCTL_I386_KERNELLIMIT failed", r);
    }

    /* Give our process the new, copied, private page table. */
    pt_mapkernel(newpt);	/* didn't know about vm_dir pages earlier */
    pt_bind(newpt, vmp);

    /* Now actually enable paging. */
    if(sys_vmctl_enable_paging(vmp->vm_arch.vm_seg) != OK)
        vm_panic("pt_init: enable paging failed", NO_NUM);

    /* Back to reality - this is where the stack actually is. */
    vmp->vm_arch.vm_seg[S].mem_len -= extra_clicks;

    /* All OK. */
    return;
}
Exemplo n.º 22
0
/*===========================================================================*
 *				main					     *
 *===========================================================================*/
int main(void)
{
    kipc_msg_t msg;
    int result, who_e;

#if SANITYCHECKS
    incheck = nocheck = 0;
    FIXME("VM SANITYCHECKS are on");
#endif

    vm_paged = 1;
    env_parse("vm_paged", "d", 0, &vm_paged, 0, 1);
#if SANITYCHECKS
    env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel, 0, SCL_MAX);
#endif

    vm_init();

    /* This is VM's main loop. */
    while (TRUE) {
        int r, c;

        SANITYCHECK(SCL_TOP);
        if(missing_spares > 0) {
            pt_cycle();	/* pagetable code wants to be called */
        }
        SANITYCHECK(SCL_DETAIL);

        if ((r=kipc_module_call(KIPC_RECEIVE, 0, ENDPT_ANY, &msg)) != 0)
            vm_panic("receive() error", r);

        SANITYCHECK(SCL_DETAIL);

        if(msg.m_type & NOTIFY_MESSAGE) {
            switch(msg.m_source) {
            case SYSTEM:
                /* Kernel wants to have memory ranges
                 * verified, and/or pagefaults handled.
                 */
                do_memory();
                break;
            case HARDWARE:
                do_pagefaults();
                break;
            case PM_PROC_NR:
                /* PM sends a notify() on shutdown, which
                 * is OK and we ignore.
                 */
                break;
            default:
                /* No-one else should send us notifies. */
                printk("VM: ignoring notify() from %d\n",
                       msg.m_source);
                break;
            }
            continue;
        }
        who_e = msg.m_source;
        c = CALLNUMBER(msg.m_type);
        result = -ENOSYS; /* Out of range or restricted calls return this. */
        if(c < 0 || !vm_calls[c].vmc_func) {
            printk("VM: out of range or missing callnr %d from %d\n",
                   msg.m_type, who_e);
        } else if (vm_acl_ok(who_e, c) != 0) {
            printk("VM: unauthorized %s by %d\n",
                   vm_calls[c].vmc_name, who_e);
        } else {
            SANITYCHECK(SCL_FUNCTIONS);
            result = vm_calls[c].vmc_func(&msg);
            SANITYCHECK(SCL_FUNCTIONS);
        }

        /* Send reply message, unless the return code is SUSPEND,
         * which is a pseudo-result suppressing the reply message.
         */
        if(result != SUSPEND) {
            SANITYCHECK(SCL_DETAIL);
            msg.m_type = result;
            if((r=kipc_module_call(KIPC_SEND, 0, who_e, &msg)) != 0) {
                printk("VM: couldn't send %d to %d (err %d)\n",
                       msg.m_type, who_e, r);
                vm_panic("send() error", NO_NUM);
            }
            SANITYCHECK(SCL_DETAIL);
        }
        SANITYCHECK(SCL_DETAIL);
    }
    return 0;
}
Exemplo n.º 23
0
/*===========================================================================*
 *				pt_writemap		     		     *
 *===========================================================================*/
PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
                       size_t bytes, u32_t flags, u32_t writemapflags)
{
    /* Write mapping into page table. Allocate a new page table if necessary. */
    /* Page directory and table entries for this virtual address. */
    int p, pages, pdecheck;
    int finalpde;
    int verify = 0;

    if(writemapflags & WMF_VERIFY)
        verify = 1;

    vm_assert(!(bytes % I386_PAGE_SIZE));
    vm_assert(!(flags & ~(PTF_ALLFLAGS)));

    pages = bytes / I386_PAGE_SIZE;

    /* MAP_NONE means to clear the mapping. It doesn't matter
     * what's actually written into the PTE if I386_VM_PRESENT
     * isn't on, so we can just write MAP_NONE into it.
     */
#if SANITYCHECKS
    if(physaddr != MAP_NONE && !(flags & I386_VM_PRESENT)) {
        vm_panic("pt_writemap: writing dir with !P\n", NO_NUM);
    }
    if(physaddr == MAP_NONE && flags) {
        vm_panic("pt_writemap: writing 0 with flags\n", NO_NUM);
    }
#endif

    finalpde = I386_VM_PDE(v + I386_PAGE_SIZE * pages);

    /* First make sure all the necessary page tables are allocated,
     * before we start writing in any of them, because it's a pain
     * to undo our work properly. Walk the range in page-directory-entry
     * sized leaps.
     */
    for(pdecheck = I386_VM_PDE(v); pdecheck <= finalpde; pdecheck++) {
        vm_assert(pdecheck >= 0 && pdecheck < I386_VM_DIR_ENTRIES);
        if(pt->pt_dir[pdecheck] & I386_VM_BIGPAGE) {
            printf("pt_writemap: trying to write 0x%lx into 0x%lx\n",
                   physaddr, v);
            vm_panic("pt_writemap: BIGPAGE found", NO_NUM);
        }
        if(!(pt->pt_dir[pdecheck] & I386_VM_PRESENT)) {
            int r;
            if(verify) {
                printf("pt_writemap verify: no pde %d\n", pdecheck);
                return EFAULT;
            }
            vm_assert(!pt->pt_dir[pdecheck]);
            if((r=pt_ptalloc(pt, pdecheck, flags)) != OK) {
                /* Couldn't do (complete) mapping.
                 * Don't bother freeing any previously
                 * allocated page tables, they're
                 * still writable, don't point to nonsense,
                 * and pt_ptalloc leaves the directory
                 * and other data in a consistent state.
                 */
                printf("pt_writemap: pt_ptalloc failed\n", pdecheck);
                return r;
            }
        }
        vm_assert(pt->pt_dir[pdecheck] & I386_VM_PRESENT);
    }

    /* Now write in them. */
    for(p = 0; p < pages; p++) {
        u32_t entry;
        int pde = I386_VM_PDE(v);
        int pte = I386_VM_PTE(v);

        vm_assert(!(v % I386_PAGE_SIZE));
        vm_assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
        vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);

        /* Page table has to be there. */
        vm_assert(pt->pt_dir[pde] & I386_VM_PRESENT);

        /* Make sure page directory entry for this page table
         * is marked present and page table entry is available.
         */
        vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]);

#if SANITYCHECKS
        /* We don't expect to overwrite a page. */
        if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
            vm_assert(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT));
#endif
        if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
            physaddr = pt->pt_pt[pde][pte] & I386_VM_ADDR_MASK;
        }

        if(writemapflags & WMF_FREE) {
            FREE_MEM(ABS2CLICK(physaddr), 1);
        }

        /* Entry we will write. */
        entry = (physaddr & I386_VM_ADDR_MASK) | flags;

        if(verify) {
            u32_t maskedentry;
            maskedentry = pt->pt_pt[pde][pte];
            maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
            /* Verify pagetable entry. */
            if(maskedentry != entry) {
                printf("pt_writemap: 0x%lx found, masked 0x%lx, 0x%lx expected\n",
                       pt->pt_pt[pde][pte], maskedentry, entry);
                return EFAULT;
            }
        } else {
            /* Write pagetable entry. */
            pt->pt_pt[pde][pte] = entry;
        }

        physaddr += I386_PAGE_SIZE;
        v += I386_PAGE_SIZE;
    }

    return OK;
}
Exemplo n.º 24
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
{
    /* Allocate a page for use by VM itself. */
    phys_bytes newpage;
    vir_bytes loc;
    pt_t *pt;
    int r;
    static int level = 0;
    void *ret;

    pt = &vmp->vm_pt;
    vm_assert(reason >= 0 && reason < VMP_CATEGORIES);

    level++;

    vm_assert(level >= 1);
    vm_assert(level <= 2);

    if(level > 1 || !(vmp->vm_flags & VMF_HASPT) || !meminit_done) {
        int r;
        void *s;
        s=vm_getsparepage(phys);
        level--;
        if(!s) {
            util_stacktrace();
            printf("VM: warning: out of spare pages\n");
        }
        return s;
    }

    /* VM does have a pagetable, so get a page and map it in there.
     * Where in our virtual address space can we put it?
     */
    loc = findhole(pt,  arch_vir2map(vmp, vmp->vm_stacktop),
                   vmp->vm_arch.vm_data_top);
    if(loc == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: findhole failed\n");
        return NULL;
    }

    /* Allocate page of memory for use by VM. As VM
     * is trusted, we don't have to pre-clear it.
     */
    if((newpage = ALLOC_MEM(CLICKSPERPAGE, 0)) == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: ALLOC_MEM failed\n");
        return NULL;
    }

    *phys = CLICK2ABS(newpage);

    /* Map this page into our address space. */
    if((r=pt_writemap(pt, loc, *phys, I386_PAGE_SIZE,
                      I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) {
        FREE_MEM(newpage, CLICKSPERPAGE);
        printf("vm_allocpage writemap failed\n");
        level--;
        return NULL;
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        vm_panic("VMCTL_FLUSHTLB failed", r);
    }

    level--;

    /* Return user-space-ready pointer to it. */
    ret = (void *) arch_map2vir(vmp, loc);

    return ret;
}
Exemplo n.º 25
0
/*===========================================================================*
 *				main					     *
 *===========================================================================*/
PUBLIC int main(void)
{
  message msg;
  int result, who_e;

  /* SEF local startup. */
  sef_local_startup();

  /* This is VM's main loop. */
  while (TRUE) {
	int r, c;

	SANITYCHECK(SCL_TOP);
	if(missing_spares > 0) {
		pt_cycle();	/* pagetable code wants to be called */
	}
	SANITYCHECK(SCL_DETAIL);

  	if ((r=sef_receive(ANY, &msg)) != OK)
		vm_panic("sef_receive() error", r);

	SANITYCHECK(SCL_DETAIL);

	if(msg.m_type & NOTIFY_MESSAGE) {
		switch(msg.m_source) {
			case SYSTEM:
				/* Kernel wants to have memory ranges
				 * verified, and/or pagefaults handled.
				 */
				do_memory();
				break;
			case HARDWARE:
				do_pagefaults();
				break;
			case PM_PROC_NR:
				/* PM sends a notify() on shutdown, which
				 * is OK and we ignore.
				 */
				break;
			default:
				/* No-one else should send us notifies. */
				printf("VM: ignoring notify() from %d\n",
					msg.m_source);
				break;
		}
		continue;
	}
	who_e = msg.m_source;
	c = CALLNUMBER(msg.m_type);
	result = ENOSYS; /* Out of range or restricted calls return this. */
	if(c < 0 || !vm_calls[c].vmc_func) {
		printf("VM: out of range or missing callnr %d from %d\n",
			msg.m_type, who_e);
	} else if (vm_acl_ok(who_e, c) != OK) {
		printf("VM: unauthorized %s by %d\n",
			vm_calls[c].vmc_name, who_e);
	} else {
	SANITYCHECK(SCL_FUNCTIONS);
		result = vm_calls[c].vmc_func(&msg);
	SANITYCHECK(SCL_FUNCTIONS);
	}

	/* Send reply message, unless the return code is SUSPEND,
	 * which is a pseudo-result suppressing the reply message.
	 */
	if(result != SUSPEND) {
	SANITYCHECK(SCL_DETAIL);
		msg.m_type = result;
		if((r=send(who_e, &msg)) != OK) {
			printf("VM: couldn't send %d to %d (err %d)\n",
				msg.m_type, who_e, r);
			vm_panic("send() error", NO_NUM);
		}
	SANITYCHECK(SCL_DETAIL);
	}
	SANITYCHECK(SCL_DETAIL);
  }
  return(OK);
}
Exemplo n.º 26
0
/*===========================================================================*
 *				void *slaballoc				     *
 *===========================================================================*/
void *slaballoc(int bytes)
{
	int i;
	int count = 0;
	struct slabheader *s;
	struct slabdata *firstused;

	SLABSANITYCHECK(SCL_FUNCTIONS);

	/* Retrieve entry in slabs[]. */
	GETSLAB(bytes, s);
	vm_assert(s);

	/* To make the common case more common, make space in the 'used'
	 * queue first.
	 */
	if(!LH(s, LIST_USED)) {
		/* Make sure there is something on the freelist. */
	SLABSANITYCHECK(SCL_DETAIL);
		if(!LH(s, LIST_FREE)) {
			struct slabdata *nd = newslabdata(LIST_FREE);
	SLABSANITYCHECK(SCL_DETAIL);
			if(!nd) return NULL;
			ADDHEAD(nd, s, LIST_FREE);
	SLABSANITYCHECK(SCL_DETAIL);
		}


	SLABSANITYCHECK(SCL_DETAIL);
		MOVEHEAD(s, LIST_FREE, LIST_USED);
	SLABSANITYCHECK(SCL_DETAIL);

	}
	SLABSANITYCHECK(SCL_DETAIL);

	vm_assert(s);
	firstused = LH(s, LIST_USED);
	vm_assert(firstused);
	vm_assert(firstused->sdh.magic1 == MAGIC1);
	vm_assert(firstused->sdh.magic2 == MAGIC2);
	vm_assert(firstused->sdh.nused < ITEMSPERPAGE(bytes));

	for(i = firstused->sdh.freeguess;
		count < ITEMSPERPAGE(bytes); count++, i++) {
	SLABSANITYCHECK(SCL_DETAIL);
		i = i % ITEMSPERPAGE(bytes);

		if(!GETBIT(firstused, i)) {
			struct slabdata *f;
			char *ret;
			SETBIT(firstused, i);
	SLABSANITYCHECK(SCL_DETAIL);
			if(firstused->sdh.nused == ITEMSPERPAGE(bytes)) {
	SLABSANITYCHECK(SCL_DETAIL);
				MOVEHEAD(s, LIST_USED, LIST_FULL);
	SLABSANITYCHECK(SCL_DETAIL);
			}
	SLABSANITYCHECK(SCL_DETAIL);
			ret = ((char *) firstused->data) + i*bytes;

#if SANITYCHECKS
			nojunkwarning++;
			slabunlock(ret, bytes);
			nojunkwarning--;
			vm_assert(!nojunkwarning);
			*(u32_t *) ret = NOJUNK;
			slablock(ret, bytes);
#endif
			SLABSANITYCHECK(SCL_FUNCTIONS);
			SLABDATAUSE(firstused, firstused->sdh.freeguess = i+1;);

#if SANITYCHECKS
	if(bytes >= SLABSIZES+MINSIZE) {
		printk("slaballoc: odd, bytes %d?\n", bytes);
	}
			if(!slabsane_f(__FILE__, __LINE__, ret, bytes))
				vm_panic("slaballoc: slabsane failed", NO_NUM);
#endif

			return ret;
		}

	SLABSANITYCHECK(SCL_DETAIL);

	}
Exemplo n.º 27
0
/* Emulate one VM insn. */
void vm_step(vm_context_t *ctx)
{
#include "vm_opcodes.switch.tab"
	vm_operand_t r0, r1;
	vm_operand_t buf[3];
	uint8_t *pc = ctx->pc;
	unsigned ops_in, ops_out = 0;
	uint8_t opcode;
	
	opcode = *pc;
	DBGPRINT("vm_step: %08x -> %02x ", pc, opcode);
	pc += 1;
	ops_in = opcode >> 6; /* 2 highest bits make for operand count. */
	opcode &= 0x3F;
	DBGPRINT("(%s / %d)\n", vm_insn_to_name[opcode], ops_in);
	vm_stack_pop3(ctx->dstack, buf, ops_in);

	goto *(&&op_invalid + offtab[opcode]);
	
op_invalid:
	vm_panic("vm_step: unknown opcode.");

op_ADD:
	r0 = ARG2 + ARG1;
	goto push_1;
op_SUB:
	r0 = ARG2 - ARG1;
	goto push_1;
op_MULU: {
	TARGET_MULU(ARG1, ARG2, r0, r1);
	goto push_2;
}
op_MULS: {
	TARGET_MULS(ARG1, ARG2, r0, r1);
	goto push_2;
}
op_DIVU: {
	TARGET_DIVU(ARG2, ARG1);
	goto push_2;
}
op_DIVS: {
	TARGET_DIVS(ARG2, ARG1);
	goto push_2;
}
op_AND:
	r0 = ARG1 & ARG2;
	goto push_1;
op_OR:
	r0 = ARG1 | ARG2;
	goto push_1;
op_XOR:
	r0 = ARG1 ^ ARG2;
	goto push_1;
op_NOT:
	r0 = ~ARG1;
	goto push_1;
op_LSL:
	r0 = ARG2 << ARG1;
	goto push_1;
op_LSR:
	r0 = ((vm_uoperand_t)ARG2) >> ARG1;
	goto push_1;
op_ASR:
	r0 = ((vm_soperand_t)ARG2) >> ARG1;
	goto push_1;
op_CMP_LT:
	r0 = ((vm_soperand_t)ARG1) < ((vm_soperand_t)ARG2);
	goto push_1;
op_CMP_GT:
	r0 = ((vm_soperand_t)ARG1) > ((vm_soperand_t)ARG2);
	goto push_1;
op_CMP_B:
	r0 = ((vm_uoperand_t)ARG1) < ((vm_uoperand_t)ARG2);
	goto push_1;
op_CMP_A:
	r0 = ((vm_uoperand_t)ARG1) > ((vm_uoperand_t)ARG2);
	goto push_1;
op_CMP_EQ:
	r0 = ARG2 == ARG1;
	goto push_1;
op_LDC_0:
	r0 = 0;
	goto push_1;
op_LDC_1: 
	r0 = 1;
	goto push_1;
op_LDC_2: 
	r0 = 2;
	goto push_1;
op_LDC_UB:
	r0 = (vm_uoperand_t)*(uint8_t *)(pc);
	pc += 1;
	goto push_1;
op_LDC_SB:
	r0 = (vm_soperand_t)*(int8_t *)(pc);
	pc += 1;
	goto push_1;
op_LDC_W:
	r0 = vm_fetch32_ua(pc);
	pc += 4;
	goto push_1;
op_LEA:
	r0 = vm_fetch32_ua(pc);
	DBGPRINT("vm_step: PCREL offset %08x -> ", r0);
	pc += 4;
	r0 += (vm_uoperand_t)pc;
	DBGPRINT("%08x\n", r0);
	goto push_1;
op_LDM_UB:
	r0 = (vm_uoperand_t)*(uint8_t *)(ARG1);
	goto push_1;
op_LDM_SB:
	r0 = (vm_soperand_t)*(int8_t *)(ARG1);
	goto push_1;
op_LDM_UH:
	r0 = (vm_uoperand_t)*(uint16_t *)(ARG1);
	goto push_1;
op_LDM_SH:
	r0 = (vm_soperand_t)*(int16_t *)(ARG1);
	goto push_1;
op_LDM_W:
	r0 = *(vm_operand_t *)(ARG1);
	goto push_1;
op_STM_B:
	*(uint8_t *)(ARG1) = (uint8_t)ARG2;
	DBGPRINT("vm_step: %08x -> %08x\n", ARG2, ARG1);
	goto push_none;
op_STM_H:
	*(uint16_t *)(ARG1) = (uint16_t)ARG2;
	DBGPRINT("vm_step: %08x -> %08x\n", ARG2, ARG1);
	goto push_none;
op_STM_W:
	*(vm_operand_t *)(ARG1) = ARG2;
	DBGPRINT("vm_step: %08x -> %08x\n", ARG2, ARG1);
	goto push_none;
op_LOCALS: {
	uint8_t count = *(uint8_t *)(pc);
	pc += 1;
	ctx->locals = (vm_locals_t *)vm_alloc(sizeof(vm_locals_t) + (count - 1) * sizeof(vm_operand_t));
	ctx->locals->count = count;
	goto push_none;
}
op_LDLOC: {
	uint8_t index = *(uint8_t *)(pc);
	pc += 1;
	if (!ctx->locals) {
		vm_panic("vm_step: accessing locals where none have been allocated.");
	}
	if (index >= ctx->locals->count) {
		vm_panic("vm_step: local index out of bounds.");
	}
	r0 = ctx->locals->data[index];
	goto push_1;
}
op_STLOC: {
	uint8_t index = *(uint8_t *)(pc);
	pc += 1;
	if (!ctx->locals) {
		vm_panic("vm_step: accessing locals where none have been allocated.");
	}
	if (index >= ctx->locals->count) {
		vm_panic("vm_step: local index out of bounds.");
	}
	ctx->locals->data[index] = ARG1;
	goto push_none;
}
op_DUP:
	r1 = r0 = ARG1;
	goto push_2;
op_SWAP: {
	r1 = ARG1;
	r0 = ARG2;
	goto push_2;
}
op_POP:
	goto push_none;
op_BR: {
	int8_t offset = *(int8_t *)(pc);
	pc += 1;
	pc = pc + offset;
	goto push_none;
}
op_BR_T: {
	int8_t offset = *(int8_t *)(pc);
	pc += 1;
	if (ARG1) {
		pc = pc + offset;
	}
	goto push_none;
}
op_BR_F: {
	int8_t offset = *(int8_t *)(pc);
	pc += 1;
	if (!ARG1) {
		pc = pc + offset;
	}
	goto push_none;
}
op_CALL: {
	int32_t offset = (int32_t)vm_fetch32_ua(pc);
	pc += 4;
	vm_save_frame(ctx, pc);
	pc += offset;
	DBGPRINT("\n");
	goto push_none;
}
op_RET: {
	vm_callframe_t *frame;

	vm_free(ctx->locals);
	frame = (vm_callframe_t *)vm_stack_pop(ctx->cstack);
	ctx->locals = frame->locals;
	pc = frame->return_pc;

	DBGPRINT("vm_step: stack balance on return: %d\n\n", frame->dstack_top - vm_stack_top(ctx->dstack));
	vm_free(frame);
	goto push_none;
}
op_ICALL: {
	vm_save_frame(ctx, pc);
	pc = (uint8_t *)ARG1;
	goto push_none;
}
op_IJMP: {
	pc = (uint8_t *)ARG1;
	goto push_none;
}
op_NCALL: {
	int index = vm_fetch16_ua(pc); /* thunk index */
	pc += 2;
	if (!ctx->module->ncalls_table) {
		vm_panic("vm_step: no ncalls defined, but a ncall insn encountered.");
	}
	vm_thunk_t thunk = (vm_thunk_t)ctx->module->ncalls_table[index];
	DBGPRINT("vm_step: NCALL: calling %d @%08X\n", index, thunk);
	thunk(ctx);
	goto push_none;
}

push_2:
	vm_stack_push(ctx->dstack, r1);
push_1:
	vm_stack_push(ctx->dstack, r0);
push_none:
	
	ctx->pc = pc;
	return;
}