コード例 #1
0
ファイル: exit.c プロジェクト: kl07/minix-course
int do_procctl(message *msg)
{
	endpoint_t proc;
	struct vmproc *vmp;

	if(vm_isokendpt(msg->VMPCTL_WHO, &proc) != OK) {
		printf("VM: bogus endpoint VM_PROCCTL %d\n",
			msg->VMPCTL_WHO);
		return EINVAL;
	}
	vmp = &vmproc[proc];

	switch(msg->VMPCTL_PARAM) {
		case VMPPARAM_CLEAR:
			if(msg->m_source != RS_PROC_NR
				&& msg->m_source != VFS_PROC_NR)
				return EPERM;
			free_proc(vmp);
			if(pt_new(&vmp->vm_pt) != OK)
				panic("VMPPARAM_CLEAR: pt_new failed");
			pt_bind(&vmp->vm_pt, vmp);
			return OK;
		default:
			return EINVAL;
	}


	return OK;
}
コード例 #2
0
ファイル: main.c プロジェクト: junzhe/minix_pandaboard_es
void exec_bootproc(struct vmproc *vmp, struct boot_image *ip)
{
	struct vm_exec_info vmexeci;
	struct exec_info *execi = &vmexeci.execi;
	char hdr[VM_PAGE_SIZE];

	memset(&vmexeci, 0, sizeof(vmexeci));

	if(pt_new(&vmp->vm_pt) != OK)
		panic("VM: no new pagetable");

	if(pt_bind(&vmp->vm_pt, vmp) != OK)
		panic("VM: pt_bind failed");

	if(sys_physcopy(NONE, ip->start_addr, SELF,
		(vir_bytes) hdr, sizeof(hdr)) != OK)
		panic("can't look at boot proc header");

        execi->stack_high = kernel_boot_info.user_sp;
        execi->stack_size = DEFAULT_STACK_LIMIT;
        execi->proc_e = vmp->vm_endpoint;
        execi->hdr = hdr;
        execi->hdr_len = sizeof(hdr);
        strlcpy(execi->progname, ip->proc_name, sizeof(execi->progname));
        execi->frame_len = 0;
	execi->opaque = &vmexeci;
	execi->filesize = ip->len;

	vmexeci.ip = ip;
	vmexeci.vmp = vmp;

        /* callback functions and data */
        execi->copymem = libexec_copy_physcopy;
        execi->clearproc = NULL;
        execi->clearmem = libexec_clear_sys_memset;
	execi->allocmem_prealloc_junk = libexec_alloc_vm_prealloc;
	execi->allocmem_prealloc_cleared = libexec_alloc_vm_prealloc;
        execi->allocmem_ondemand = libexec_alloc_vm_ondemand;

	if(libexec_load_elf(execi) != OK)
		panic("vm: boot process load of process %s (ep=%d) failed\n", 
			execi->progname,vmp->vm_endpoint);

        if(sys_exec(vmp->vm_endpoint, (char *) execi->stack_high - 12,
		(char *) ip->proc_name, execi->pc) != OK)
		panic("vm: boot process exec of process %s (ep=%d) failed\n", 
			execi->progname,vmp->vm_endpoint);

	/* make it runnable */
	if(sys_vmctl(vmp->vm_endpoint, VMCTL_BOOTINHIBIT_CLEAR, 0) != OK)
		panic("VMCTL_BOOTINHIBIT_CLEAR failed");
}
コード例 #3
0
ファイル: fork.c プロジェクト: 0xenvision/minix
/*===========================================================================*
 *				do_fork					     *
 *===========================================================================*/
int do_fork(message *msg)
{
  int r, proc, childproc;
  struct vmproc *vmp, *vmc;
  pt_t origpt;
  vir_bytes msgaddr;

  SANITYCHECK(SCL_FUNCTIONS);

  if(vm_isokendpt(msg->VMF_ENDPOINT, &proc) != OK) {
	printf("VM: bogus endpoint VM_FORK %d\n", msg->VMF_ENDPOINT);
  SANITYCHECK(SCL_FUNCTIONS);
	return EINVAL;
  }

  childproc = msg->VMF_SLOTNO;
  if(childproc < 0 || childproc >= NR_PROCS) {
	printf("VM: bogus slotno VM_FORK %d\n", msg->VMF_SLOTNO);
  SANITYCHECK(SCL_FUNCTIONS);
	return EINVAL;
  }

  vmp = &vmproc[proc];		/* parent */
  vmc = &vmproc[childproc];	/* child */
  assert(vmc->vm_slot == childproc);

  /* The child is basically a copy of the parent. */
  origpt = vmc->vm_pt;
  *vmc = *vmp;
  vmc->vm_slot = childproc;
  region_init(&vmc->vm_regions_avl);
  vmc->vm_endpoint = NONE;	/* In case someone tries to use it. */
  vmc->vm_pt = origpt;

#if VMSTATS
  vmc->vm_bytecopies = 0;
#endif

  if(pt_new(&vmc->vm_pt) != OK) {
	printf("VM: fork: pt_new failed\n");
	return ENOMEM;
  }

  SANITYCHECK(SCL_DETAIL);

  if(map_proc_copy(vmc, vmp) != OK) {
	printf("VM: fork: map_proc_copy failed\n");
	pt_free(&vmc->vm_pt);
	return(ENOMEM);
  }

  /* Only inherit these flags. */
  vmc->vm_flags &= VMF_INUSE;

  /* inherit the priv call bitmaps */
  memcpy(&vmc->vm_call_mask, &vmp->vm_call_mask, sizeof(vmc->vm_call_mask));

  /* Tell kernel about the (now successful) FORK. */
  if((r=sys_fork(vmp->vm_endpoint, childproc,
	&vmc->vm_endpoint, PFF_VMINHIBIT, &msgaddr)) != OK) {
        panic("do_fork can't sys_fork: %d", r);
  }

  if((r=pt_bind(&vmc->vm_pt, vmc)) != OK)
	panic("fork can't pt_bind: %d", r);

  {
	vir_bytes vir;
	/* making these messages writable is an optimisation
	 * and its return value needn't be checked.
	 */
	vir = msgaddr;
	if (handle_memory(vmc, vir, sizeof(message), 1) != OK)
	    panic("do_fork: handle_memory for child failed\n");
	vir = msgaddr;
	if (handle_memory(vmp, vir, sizeof(message), 1) != OK)
	    panic("do_fork: handle_memory for parent failed\n");
  }

  /* Inform caller of new child endpoint. */
  msg->VMF_CHILD_ENDPOINT = vmc->vm_endpoint;

  SANITYCHECK(SCL_FUNCTIONS);
  return OK;
}
コード例 #4
0
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
void pt_init(void)
{
    pt_t *newpt;
    int s, r, p;
    vir_bytes sparepages_mem;
#if defined(__arm__)
    vir_bytes sparepagedirs_mem;
#endif
    static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES];
    int m = kernel_boot_info.kern_mod;
#if defined(__i386__)
    int global_bit_ok = 0;
    u32_t mypdbr; /* Page Directory Base Register (cr3) value */
#elif defined(__arm__)
    u32_t myttbr;
#endif

    /* Find what the physical location of the kernel is. */
    assert(m >= 0);
    assert(m < kernel_boot_info.mods_with_kernel);
    assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS);
    kern_mb_mod = &kernel_boot_info.module_list[m];
    kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start;
    assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE));
    assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE));
    kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE;

    /* Get ourselves spare pages. */
    sparepages_mem = (vir_bytes) static_sparepages;
    assert(!(sparepages_mem % VM_PAGE_SIZE));

#if defined(__arm__)
    /* Get ourselves spare pagedirs. */
    sparepagedirs_mem = (vir_bytes) static_sparepagedirs;
    assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE));
#endif

    /* Spare pages are used to allocate memory before VM has its own page
     * table that things (i.e. arbitrary physical memory) can be mapped into.
     * We get it by pre-allocating it in our bss (allocated and mapped in by
     * the kernel) in static_sparepages. We also need the physical addresses
     * though; we look them up now so they are ready for use.
     */
#if defined(__arm__)
    missing_sparedirs = 0;
    assert(STATIC_SPAREPAGEDIRS <= SPAREPAGEDIRS);
    for(s = 0; s < SPAREPAGEDIRS; s++) {
        vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);;
        phys_bytes ph;
        if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
                       ARCH_PAGEDIR_SIZE, &ph)) != OK)
            panic("pt_init: sys_umap failed: %d", r);
        if(s >= STATIC_SPAREPAGEDIRS) {
            sparepagedirs[s].pagedir = NULL;
            missing_sparedirs++;
            continue;
        }
        sparepagedirs[s].pagedir = (void *) v;
        sparepagedirs[s].phys = ph;
    }
#endif

    if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0)))
        panic("reservedqueue_new for single pages failed");

    assert(STATIC_SPAREPAGES < SPAREPAGES);
    for(s = 0; s < STATIC_SPAREPAGES; s++) {
        void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE);
        phys_bytes ph;
        if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
                       VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
            panic("pt_init: sys_umap failed: %d", r);
        reservedqueue_add(spare_pagequeue, v, ph);
    }

#if defined(__i386__)
    /* global bit and 4MB pages available? */
    global_bit_ok = _cpufeature(_CPUF_I386_PGE);
    bigpage_ok = _cpufeature(_CPUF_I386_PSE);

    /* Set bit for PTE's and PDE's if available. */
    if(global_bit_ok)
        global_bit = I386_VM_GLOBAL;
#endif

    /* Now reserve another pde for kernel's own mappings. */
    {
        int kernmap_pde;
        phys_bytes addr, len;
        int flags, index = 0;
        u32_t offset = 0;

        kernmap_pde = freepde();
        offset = kernmap_pde * ARCH_BIG_PAGE_SIZE;

        while(sys_vmctl_get_mapping(index, &addr, &len,
                                    &flags) == OK)  {
            int usedpde;
            vir_bytes vir;
            if(index >= MAX_KERNMAPPINGS)
                panic("VM: too many kernel mappings: %d", index);
            kern_mappings[index].phys_addr = addr;
            kern_mappings[index].len = len;
            kern_mappings[index].flags = flags;
            kern_mappings[index].vir_addr = offset;
            kern_mappings[index].flags =
                ARCH_VM_PTE_PRESENT;
            if(flags & VMMF_UNCACHED)
#if defined(__i386__)
                kern_mappings[index].flags |= PTF_NOCACHE;
#elif defined(__arm__)
                kern_mappings[index].flags |= ARM_VM_PTE_DEVICE;
#endif
            if(flags & VMMF_USER)
                kern_mappings[index].flags |= ARCH_VM_PTE_USER;
#if defined(__arm__)
            else
                kern_mappings[index].flags |= ARM_VM_PTE_SUPER;
#endif
            if(flags & VMMF_WRITE)
                kern_mappings[index].flags |= ARCH_VM_PTE_RW;
#if defined(__i386__)
            if(flags & VMMF_GLO)
                kern_mappings[index].flags |= I386_VM_GLOBAL;
#elif defined(__arm__)
            else
                kern_mappings[index].flags |= ARCH_VM_PTE_RO;
#endif
            if(addr % VM_PAGE_SIZE)
                panic("VM: addr unaligned: %d", addr);
            if(len % VM_PAGE_SIZE)
                panic("VM: len unaligned: %d", len);
            vir = offset;
            if(sys_vmctl_reply_mapping(index, vir) != OK)
                panic("VM: reply failed");
            offset += len;
            index++;
            kernmappings++;

            usedpde = ARCH_VM_PDE(offset);
            while(usedpde > kernmap_pde) {
                int newpde = freepde();
                assert(newpde == kernmap_pde+1);
                kernmap_pde = newpde;
            }
        }
    }

    /* Reserve PDEs available for mapping in the page directories. */
    {
        int pd;
        for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
            struct pdm *pdm = &pagedir_mappings[pd];
            pdm->pdeno = freepde();
            phys_bytes ph;

            /* Allocate us a page table in which to
             * remember page directory pointers.
             */
            if(!(pdm->page_directories =
                        vm_allocpage(&ph, VMP_PAGETABLE))) {
                panic("no virt addr for vm mappings");
            }
            memset(pdm->page_directories, 0, VM_PAGE_SIZE);
            pdm->phys = ph;

#if defined(__i386__)
            pdm->val = (ph & ARCH_VM_ADDR_MASK) |
                       ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
#elif defined(__arm__)
            pdm->val = (ph & ARCH_VM_PDE_MASK)
                       | ARCH_VM_PDE_PRESENT
                       | ARM_VM_PDE_DOMAIN; //LSC FIXME
#endif
        }
    }

    /* Allright. Now. We have to make our own page directory and page tables,
     * that the kernel has already set up, accessible to us. It's easier to
     * understand if we just copy all the required pages (i.e. page directory
     * and page tables), and set up the pointers as if VM had done it itself.
     *
     * This allocation will happen without using any page table, and just
     * uses spare pages.
     */
    newpt = &vmprocess->vm_pt;
    if(pt_new(newpt) != OK)
        panic("vm pt_new failed");

    /* Get our current pagedir so we can see it. */
#if defined(__i386__)
    if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK)
#elif defined(__arm__)
    if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK)
#endif
        panic("VM: sys_vmctl_get_pdbr failed");
#if defined(__i386__)
    if(sys_vircopy(NONE, mypdbr, SELF,
                   (vir_bytes) currentpagedir, VM_PAGE_SIZE) != OK)
#elif defined(__arm__)
    if(sys_vircopy(NONE, myttbr, SELF,
                   (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE) != OK)
#endif
        panic("VM: sys_vircopy failed");

    /* We have mapped in kernel ourselves; now copy mappings for VM
     * that kernel made, including allocations for BSS. Skip identity
     * mapping bits; just map in VM.
     */
    for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) {
        u32_t entry = currentpagedir[p];
        phys_bytes ptaddr_kern, ptaddr_us;

        /* BIGPAGEs are kernel mapping (do ourselves) or boot
         * identity mapping (don't want).
         */
        if(!(entry & ARCH_VM_PDE_PRESENT)) continue;
        if((entry & ARCH_VM_BIGPAGE)) continue;

        if(pt_ptalloc(newpt, p, 0) != OK)
            panic("pt_ptalloc failed");
        assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT);

#if defined(__i386__)
        ptaddr_kern = entry & ARCH_VM_ADDR_MASK;
        ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK;
#elif defined(__arm__)
        ptaddr_kern = entry & ARCH_VM_PDE_MASK;
        ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK;
#endif

        /* Copy kernel-initialized pagetable contents into our
         * normally accessible pagetable.
         */
        if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK)
            panic("pt_init: abscopy failed");
    }

    /* Inform kernel vm has a newly built page table. */
    assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR);
    pt_bind(newpt, &vmproc[VM_PROC_NR]);

    pt_init_done = 1;

    /* All OK. */
    return;
}
コード例 #5
0
ファイル: fork.c プロジェクト: Spenser309/CS551
/*===========================================================================*
 *				do_fork					     *
 *===========================================================================*/
PUBLIC int do_fork(message *msg)
{
  int r, proc, s, childproc, fullvm;
  struct vmproc *vmp, *vmc;
  pt_t origpt;
  vir_bytes msgaddr;

  SANITYCHECK(SCL_FUNCTIONS);

  if(vm_isokendpt(msg->VMF_ENDPOINT, &proc) != OK) {
	printf("VM: bogus endpoint VM_FORK %d\n", msg->VMF_ENDPOINT);
  SANITYCHECK(SCL_FUNCTIONS);
	return EINVAL;
  }

  childproc = msg->VMF_SLOTNO;
  if(childproc < 0 || childproc >= NR_PROCS) {
	printf("VM: bogus slotno VM_FORK %d\n", msg->VMF_SLOTNO);
  SANITYCHECK(SCL_FUNCTIONS);
	return EINVAL;
  }

  vmp = &vmproc[proc];		/* parent */
  vmc = &vmproc[childproc];	/* child */
  assert(vmc->vm_slot == childproc);

  if(vmp->vm_flags & VMF_HAS_DMA) {
	printf("VM: %d has DMA memory and may not fork\n", msg->VMF_ENDPOINT);
	return EINVAL;
  }

  fullvm = vmp->vm_flags & VMF_HASPT;

  /* The child is basically a copy of the parent. */
  origpt = vmc->vm_pt;
  *vmc = *vmp;
  vmc->vm_slot = childproc;
  vmc->vm_regions = NULL;
  yielded_init(&vmc->vm_yielded_blocks);
  vmc->vm_endpoint = NONE;	/* In case someone tries to use it. */
  vmc->vm_pt = origpt;
  vmc->vm_flags &= ~VMF_HASPT;

#if VMSTATS
  vmc->vm_bytecopies = 0;
#endif

  if(pt_new(&vmc->vm_pt) != OK) {
	printf("VM: fork: pt_new failed\n");
	return ENOMEM;
  }

  vmc->vm_flags |= VMF_HASPT;

  if(fullvm) {
	SANITYCHECK(SCL_DETAIL);

	if(map_proc_copy(vmc, vmp) != OK) {
		printf("VM: fork: map_proc_copy failed\n");
		pt_free(&vmc->vm_pt);
		return(ENOMEM);
	}

	if(vmp->vm_heap) {
		vmc->vm_heap = map_region_lookup_tag(vmc, VRT_HEAP);
		assert(vmc->vm_heap);
	}

	SANITYCHECK(SCL_DETAIL);
  } else {
	vir_bytes sp;
	struct vir_region *heap, *stack;
	vir_bytes text_bytes, data_bytes, stack_bytes, parent_gap_bytes,
		child_gap_bytes;

	/* Get SP of new process (using parent). */
	if(get_stack_ptr(vmp->vm_endpoint, &sp) != OK) {
		printf("VM: fork: get_stack_ptr failed for %d\n",
			vmp->vm_endpoint);
		return ENOMEM;
	}

	/* Update size of stack segment using current SP. */
	if(adjust(vmp, vmp->vm_arch.vm_seg[D].mem_len, sp) != OK) {
		printf("VM: fork: adjust failed for %d\n",
			vmp->vm_endpoint);
		return ENOMEM;
	}

	/* Copy newly adjust()ed stack segment size to child. */
	vmc->vm_arch.vm_seg[S] = vmp->vm_arch.vm_seg[S];

        text_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[T].mem_len);
        data_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[D].mem_len);
	stack_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_len);

	/* how much space after break and before lower end (which is the
	 * logical top) of stack for the parent
	 */
	parent_gap_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_vir -
		vmc->vm_arch.vm_seg[D].mem_len);

	/* how much space can the child stack grow downwards, below
	 * the current SP? The rest of the gap is available for the
	 * heap to grow upwards.
	 */
	child_gap_bytes = VM_PAGE_SIZE;

        if((r=proc_new(vmc, VM_PROCSTART,
		text_bytes, data_bytes, stack_bytes, child_gap_bytes, 0, 0,
		CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_vir +
                        vmc->vm_arch.vm_seg[S].mem_len), 1)) != OK) {
			printf("VM: fork: proc_new failed\n");
			return r;
	}

	if(!(heap = map_region_lookup_tag(vmc, VRT_HEAP)))
		panic("couldn't lookup heap");
	assert(heap->phys);
	if(!(stack = map_region_lookup_tag(vmc, VRT_STACK)))
		panic("couldn't lookup stack");
	assert(stack->phys);

	/* Now copy the memory regions. */

	if(vmc->vm_arch.vm_seg[T].mem_len > 0) {
		struct vir_region *text;
		if(!(text = map_region_lookup_tag(vmc, VRT_TEXT)))
			panic("couldn't lookup text");
		assert(text->phys);
		if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
			text, 0, text_bytes) != OK)
				panic("couldn't copy text");
	}

	if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
		heap, 0, data_bytes) != OK)
			panic("couldn't copy heap");

	if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys +
			vmc->vm_arch.vm_seg[D].mem_len) + parent_gap_bytes,
			stack, child_gap_bytes, stack_bytes) != OK)
			panic("couldn't copy stack");
  }

  /* Only inherit these flags. */
  vmc->vm_flags &= (VMF_INUSE|VMF_SEPARATE|VMF_HASPT);

  /* inherit the priv call bitmaps */
  memcpy(&vmc->vm_call_mask, &vmp->vm_call_mask, sizeof(vmc->vm_call_mask));

  /* Tell kernel about the (now successful) FORK. */
  if((r=sys_fork(vmp->vm_endpoint, childproc,
	&vmc->vm_endpoint, vmc->vm_arch.vm_seg,
	PFF_VMINHIBIT, &msgaddr)) != OK) {
        panic("do_fork can't sys_fork: %d", r);
  }

  if(fullvm) {
	vir_bytes vir;
	/* making these messages writable is an optimisation
	 * and its return value needn't be checked.
	 */
	vir = arch_vir2map(vmc, msgaddr);
	handle_memory(vmc, vir, sizeof(message), 1);
	vir = arch_vir2map(vmp, msgaddr);
	handle_memory(vmp, vir, sizeof(message), 1);
  }

  if((r=pt_bind(&vmc->vm_pt, vmc)) != OK)
	panic("fork can't pt_bind: %d", r);

  /* Inform caller of new child endpoint. */
  msg->VMF_CHILD_ENDPOINT = vmc->vm_endpoint;

  SANITYCHECK(SCL_FUNCTIONS);
  return OK;
}
コード例 #6
0
ファイル: main.c プロジェクト: michalwojciech/cs170s10proj2
/*===========================================================================*
 *				sef_cb_init_fresh			     *
 *===========================================================================*/
PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
{
/* Initialize the vm server. */
	int s, i;
	int click, clicksforgotten = 0;
	struct memory mem_chunks[NR_MEMS];
	struct boot_image image[NR_BOOT_PROCS];
	struct boot_image *ip;
	struct rprocpub rprocpub[NR_BOOT_PROCS];
	phys_bytes limit = 0;

#if SANITYCHECKS
	incheck = nocheck = 0;
	FIXME("VM SANITYCHECKS are on");
#endif

	vm_paged = 1;
	env_parse("vm_paged", "d", 0, &vm_paged, 0, 1);
#if SANITYCHECKS
	env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel, 0, SCL_MAX);
#endif

	/* Get chunks of available memory. */
	get_mem_chunks(mem_chunks);

	/* Initialize VM's process table. Request a copy of the system
	 * image table that is defined at the kernel level to see which
	 * slots to fill in.
	 */
	if (OK != (s=sys_getimage(image)))
		vm_panic("couldn't get image table: %d\n", s);

	/* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
	memset(vmproc, 0, sizeof(vmproc));

	for(i = 0; i < ELEMENTS(vmproc); i++) {
		vmproc[i].vm_slot = i;
	}

	/* Walk through boot-time system processes that are alive
	 * now and make valid slot entries for them.
	 */
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
		phys_bytes proclimit;
		struct vmproc *vmp;

		if(ip->proc_nr >= _NR_PROCS) { vm_panic("proc", ip->proc_nr); }
		if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;

#define GETVMP(v, nr)						\
		if(nr >= 0) {					\
			vmp = &vmproc[ip->proc_nr];		\
		} else if(nr == SYSTEM) {			\
			vmp = &vmproc[VMP_SYSTEM];		\
		} else {					\
			vm_panic("init: crazy proc_nr", nr);	\
		}

		/* Initialize normal process table slot or special SYSTEM
		 * table slot. Kernel memory is already reserved.
		 */
		GETVMP(vmp, ip->proc_nr);

		/* reset fields as if exited */
		clear_proc(vmp);

		/* Get memory map for this process from the kernel. */
		if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != OK)
			vm_panic("couldn't get process mem_map",s);

		/* Remove this memory from the free list. */
		reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);

		/* Set memory limit. */
		proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
			vmp->vm_arch.vm_seg[S].mem_len) - 1;

		if(proclimit > limit)
			limit = proclimit;

		vmp->vm_flags = VMF_INUSE;
		vmp->vm_endpoint = ip->endpoint;
		vmp->vm_stacktop =
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len);

		if (vmp->vm_arch.vm_seg[T].mem_len != 0)
			vmp->vm_flags |= VMF_SEPARATE;
	}

	/* Architecture-dependent initialization. */
	pt_init(limit);

	/* Initialize tables to all physical memory. */
	mem_init(mem_chunks);
	meminit_done = 1;

	/* Give these processes their own page table. */
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
		int s;
		struct vmproc *vmp;
		vir_bytes old_stacktop, old_stack;

		if(ip->proc_nr < 0) continue;

		GETVMP(vmp, ip->proc_nr);

               if(!(ip->flags & PROC_FULLVM))
                       continue;

		old_stack = 
			vmp->vm_arch.vm_seg[S].mem_vir +
			vmp->vm_arch.vm_seg[S].mem_len - 
			vmp->vm_arch.vm_seg[D].mem_len;

        	if(pt_new(&vmp->vm_pt) != OK)
			vm_panic("VM: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
		old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len);
		if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
			VM_STACKTOP - old_stacktop) != OK) {
			vm_panic("VM: vmctl for new stack failed", NO_NUM);
		}

		FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
			vmp->vm_arch.vm_seg[D].mem_len,
			old_stack);

		if(proc_new(vmp,
			VM_PROCSTART,
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
			BASICSTACK,
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len -
				vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
				VM_STACKTOP) != OK) {
			vm_panic("failed proc_new for boot process", NO_NUM);
		}
	}

	/* Set up table of calls. */
#define CALLMAP(code, func) { int i;			      \
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
	if(i >= NR_VM_CALLS) { vm_panic(#code " invalid", (code)); } \
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
}

	/* Set call table to 0. This invalidates all calls (clear
	 * vmc_func).
	 */
	memset(vm_calls, 0, sizeof(vm_calls));

	/* Basic VM calls. */
	CALLMAP(VM_MMAP, do_mmap);
	CALLMAP(VM_MUNMAP, do_munmap);
	CALLMAP(VM_MUNMAP_TEXT, do_munmap);
	CALLMAP(VM_MAP_PHYS, do_map_phys);
	CALLMAP(VM_UNMAP_PHYS, do_unmap_phys);

	/* Calls from PM. */
	CALLMAP(VM_EXIT, do_exit);
	CALLMAP(VM_FORK, do_fork);
	CALLMAP(VM_BRK, do_brk);
	CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem);
	CALLMAP(VM_PUSH_SIG, do_push_sig);
	CALLMAP(VM_WILLEXIT, do_willexit);
	CALLMAP(VM_ADDDMA, do_adddma);
	CALLMAP(VM_DELDMA, do_deldma);
	CALLMAP(VM_GETDMA, do_getdma);
	CALLMAP(VM_NOTIFY_SIG, do_notify_sig);

	/* Calls from RS */
	CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv);

	/* Generic calls. */
	CALLMAP(VM_REMAP, do_remap);
	CALLMAP(VM_GETPHYS, do_get_phys);
	CALLMAP(VM_SHM_UNMAP, do_shared_unmap);
	CALLMAP(VM_GETREF, do_get_refcount);
	CALLMAP(VM_INFO, do_info);
	CALLMAP(VM_QUERY_EXIT, do_query_exit);

	/* Sanity checks */
	if(find_kernel_top() >= VM_PROCSTART)
		vm_panic("kernel loaded too high", NO_NUM);

	/* Initialize the structures for queryexit */
	init_query_exit();

	/* Unmap our own low pages. */
	unmap_ok = 1;
	_minix_unmapzero();

	/* Map all the services in the boot image. */
	if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
		(vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK) {
		panic("VM", "sys_safecopyfrom failed", s);
	}
	for(i=0;i < NR_BOOT_PROCS;i++) {
		if(rprocpub[i].in_use) {
			if((s = map_service(&rprocpub[i])) != OK) {
				vm_panic("unable to map service", s);
			}
		}
	}

	return(OK);
}
コード例 #7
0
ファイル: pagetable.c プロジェクト: mwilbur/minix
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
PUBLIC void pt_init(phys_bytes usedlimit)
{
/* By default, the kernel gives us a data segment with pre-allocated
 * memory that then can't grow. We want to be able to allocate memory
 * dynamically, however. So here we copy the part of the page table
 * that's ours, so we get a private page table. Then we increase the
 * hardware segment size so we can allocate memory above our stack.
 */
        pt_t *newpt;
        int s, r;
        vir_bytes v;
        phys_bytes lo, hi; 
        vir_bytes extra_clicks;
        u32_t moveup = 0;
	int global_bit_ok = 0;
	int free_pde;
	int p;
	struct vm_ep_data ep_data;
	vir_bytes sparepages_mem;
	phys_bytes sparepages_ph;
	vir_bytes ptr;

        /* Shorthand. */
        newpt = &vmprocess->vm_pt;

        /* Get ourselves spare pages. */
        ptr = (vir_bytes) static_sparepages;
        ptr += I386_PAGE_SIZE - (ptr % I386_PAGE_SIZE);
        if(!(sparepages_mem = ptr))
		panic("pt_init: aalloc for spare failed");
        if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem,
                I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK)
                panic("pt_init: sys_umap failed: %d", r);

        missing_spares = 0;
        assert(STATIC_SPAREPAGES < SPAREPAGES);
        for(s = 0; s < SPAREPAGES; s++) {
        	if(s >= STATIC_SPAREPAGES) {
        		sparepages[s].page = NULL;
        		missing_spares++;
        		continue;
        	}
        	sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE);
        	sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE;
        }

	/* global bit and 4MB pages available? */
	global_bit_ok = _cpufeature(_CPUF_I386_PGE);
	bigpage_ok = _cpufeature(_CPUF_I386_PSE);

	/* Set bit for PTE's and PDE's if available. */
	if(global_bit_ok)
		global_bit = I386_VM_GLOBAL;

	/* The kernel and boot time processes need an identity mapping.
	 * We use full PDE's for this without separate page tables.
	 * Figure out which pde we can start using for other purposes.
	 */
	id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE;

	/* We have to make mappings up till here. */
	free_pde = id_map_high_pde+1;

        /* Initial (current) range of our virtual address space. */
        lo = CLICK2ABS(vmprocess->vm_arch.vm_seg[T].mem_phys);
        hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys +
                vmprocess->vm_arch.vm_seg[S].mem_len);
                  
        assert(!(lo % I386_PAGE_SIZE)); 
        assert(!(hi % I386_PAGE_SIZE));
 
        if(lo < VM_PROCSTART) {
                moveup = VM_PROCSTART - lo;
                assert(!(VM_PROCSTART % I386_PAGE_SIZE));
                assert(!(lo % I386_PAGE_SIZE));
                assert(!(moveup % I386_PAGE_SIZE));
        }
        
        /* Make new page table for ourselves, partly copied
         * from the current one.
         */     
        if(pt_new(newpt) != OK)
                panic("pt_init: pt_new failed"); 

        /* Set up mappings for VM process. */
        for(v = lo; v < hi; v += I386_PAGE_SIZE)  {
                phys_bytes addr;
                u32_t flags; 
        
                /* We have to write the new position in the PT,
                 * so we can move our segments.
                 */ 
                if(pt_writemap(vmprocess, newpt, v+moveup, v, I386_PAGE_SIZE,
                        I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
                        panic("pt_init: pt_writemap failed");
        }
       
        /* Move segments up too. */
        vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
       
	/* Allocate us a page table in which to remember page directory
	 * pointers.
	 */
	if(!(page_directories = vm_allocpage(&page_directories_phys,
		VMP_PAGETABLE)))
                panic("no virt addr for vm mappings");

	memset(page_directories, 0, I386_PAGE_SIZE);
       
        /* Increase our hardware data segment to create virtual address
         * space above our stack. We want to increase it to VM_DATATOP,
         * like regular processes have.
         */
        extra_clicks = ABS2CLICK(VM_DATATOP - hi);
        vmprocess->vm_arch.vm_seg[S].mem_len += extra_clicks;
       
        /* We pretend to the kernel we have a huge stack segment to
         * increase our data segment.
         */
        vmprocess->vm_arch.vm_data_top =
                (vmprocess->vm_arch.vm_seg[S].mem_vir +
                vmprocess->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
       
        /* Where our free virtual address space starts.
         * This is only a hint to the VM system.
         */
        newpt->pt_virtop = 0;

        /* Let other functions know VM now has a private page table. */
        vmprocess->vm_flags |= VMF_HASPT;

	/* Now reserve another pde for kernel's own mappings. */
	{
		int kernmap_pde;
		phys_bytes addr, len;
		int flags, index = 0;
		u32_t offset = 0;

		kernmap_pde = free_pde++;
		offset = kernmap_pde * I386_BIG_PAGE_SIZE;

		while(sys_vmctl_get_mapping(index, &addr, &len,
			&flags) == OK)  {
			vir_bytes vir;
			if(index >= MAX_KERNMAPPINGS)
                		panic("VM: too many kernel mappings: %d", index);
			kern_mappings[index].phys_addr = addr;
			kern_mappings[index].len = len;
			kern_mappings[index].flags = flags;
			kern_mappings[index].lin_addr = offset;
			kern_mappings[index].flags =
				I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
				global_bit;
			if(flags & VMMF_UNCACHED)
				kern_mappings[index].flags |= PTF_NOCACHE;
			if(addr % I386_PAGE_SIZE)
                		panic("VM: addr unaligned: %d", addr);
			if(len % I386_PAGE_SIZE)
                		panic("VM: len unaligned: %d", len);
			vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset);
			if(sys_vmctl_reply_mapping(index, vir) != OK)
                		panic("VM: reply failed");
			offset += len;
			index++;
			kernmappings++;
		}
	}

	/* Find a PDE below processes available for mapping in the
	 * page directories (readonly).
	 */
	pagedir_pde = free_pde++;
	pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
			I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;

	/* Tell kernel about free pde's. */
	while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
		if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
			panic("VMCTL_I386_FREEPDE failed: %d", r);
		}
	}

	/* first pde in use by process. */
	proc_pde = free_pde;

        /* Give our process the new, copied, private page table. */
	pt_mapkernel(newpt);	/* didn't know about vm_dir pages earlier */
        pt_bind(newpt, vmprocess);
       
	/* new segment limit for the kernel after paging is enabled */
	ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
	/* the memory map which must be installed after paging is enabled */
	ep_data.mem_map = vmprocess->vm_arch.vm_seg;

	/* Now actually enable paging. */
	if(sys_vmctl_enable_paging(&ep_data) != OK)
        	panic("pt_init: enable paging failed");

        /* Back to reality - this is where the stack actually is. */
        vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks;

        /* Pretend VM stack top is the same as any regular process, not to
         * have discrepancies with new VM instances later on.
         */
        vmprocess->vm_stacktop = VM_STACKTOP;

        /* All OK. */
        return;
}
コード例 #8
0
/*===========================================================================*
 *				vm_init					     *
 *===========================================================================*/
static void vm_init(void)
{
    int s, i;
    int click, clicksforgotten = 0;
    struct memory mem_chunks[NR_MEMS];
    struct boot_image image[NR_BOOT_PROCS];
    struct boot_image *ip;
    phys_bytes limit = 0;

    /* The initrd is put right after boot image */
    if ((s = sys_getbootparam(&bootparam)) != 0) {
        panic("VM","Couldn't get boot parameters!",s);
    }

    /* get what setup found out */
    memcpy(mem_chunks, bootparam.nucleos_kludge.mem, sizeof(bootparam.nucleos_kludge.mem));

    /* Get chunks of available memory. */
    get_mem_chunks(mem_chunks);

    /* Initialize VM's process table. Request a copy of the system
     * image table that is defined at the kernel level to see which
     * slots to fill in.
     */
    if ((s=sys_getimage(image)) != 0)
        vm_panic("couldn't get image table: %d\n", s);

    /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
    memset(vmproc, 0, sizeof(vmproc));

    for(i = 0; i < ELEMENTS(vmproc); i++) {
        vmproc[i].vm_slot = i;
    }

    /* Walk through boot-time system processes that are alive
     * now and make valid slot entries for them.
     */
    for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
        phys_bytes proclimit;
        struct vmproc *vmp;

        if(ip->proc_nr >= NR_PROCS) {
            vm_panic("proc", ip->proc_nr);
        }
        if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;

#define GETVMP(v, nr)						\
		if(nr >= 0) {					\
			vmp = &vmproc[ip->proc_nr];		\
		} else if(nr == SYSTEM) {			\
			vmp = &vmproc[VMP_SYSTEM];		\
		} else {					\
			vm_panic("init: crazy proc_nr", nr);	\
		}

        /* Initialize normal process table slot or special SYSTEM
         * table slot. Kernel memory is already reserved.
         */
        GETVMP(vmp, ip->proc_nr);

        /* reset fields as if exited */
        clear_proc(vmp);

        /* Get memory map for this process from the kernel. */
        if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != 0)
            vm_panic("couldn't get process mem_map",s);

        /* Remove this memory from the free list. */
        reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);

        /* Set memory limit. */
        proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
                              vmp->vm_arch.vm_seg[S].mem_len) - 1;

        if(proclimit > limit)
            limit = proclimit;

        vmp->vm_flags = VMF_INUSE;
        vmp->vm_endpoint = ip->endpoint;
        vmp->vm_stacktop =
            CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                      vmp->vm_arch.vm_seg[S].mem_len);
    }

#ifndef CONFIG_BUILTIN_INITRD
    /* Remove initrd memory from the free list. We must do it right after we
       have reserved memory for boot image otherwise it may happen that initrd
       will be overwritten by other process (in arch_init_vm).
     */
    if ((s = reserve_initrd_mem(mem_chunks, bootparam.hdr.ramdisk_image,
                                bootparam.hdr.ramdisk_size)) < 0) {
        panic("VM", "Couldn't reserve memory for initial ramdisk!", s);
    }
#endif
    /* Architecture-dependent initialization. */
    pt_init(limit);

    /* Initialize tables to all physical memory. */
    mem_init(mem_chunks);
    meminit_done = 1;

    /* Give these processes their own page table. */
    for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
        int s;
        struct vmproc *vmp;
        vir_bytes old_stacktop, old_stack;

        if(ip->proc_nr < 0) continue;

        GETVMP(vmp, ip->proc_nr);

        if(!(ip->flags & PROC_FULLVM))
            continue;

        old_stack =
            vmp->vm_arch.vm_seg[S].mem_vir +
            vmp->vm_arch.vm_seg[S].mem_len -
            vmp->vm_arch.vm_seg[D].mem_len;

        if(pt_new(&vmp->vm_pt) != 0)
            vm_panic("vm_init: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
        old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                                 vmp->vm_arch.vm_seg[S].mem_len);
        if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
                     VM_STACKTOP - old_stacktop) != 0) {
            vm_panic("VM: vmctl for new stack failed", NO_NUM);
        }

        FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
                 vmp->vm_arch.vm_seg[D].mem_len,
                 old_stack);

        if(proc_new(vmp,
                    VM_PROCSTART,
                    CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
                    CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
                    BASICSTACK,
                    CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                              vmp->vm_arch.vm_seg[S].mem_len -
                              vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
                    CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
                    CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
                    VM_STACKTOP) != 0) {
            vm_panic("failed proc_new for boot process", NO_NUM);
        }
    }

    /* Set up table of calls. */
#define CALLMAP(code, func, thecaller) { int i;			      \
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
	if(i >= VM_NCALLS) { vm_panic(#code " invalid", (code)); } \
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
	if(((thecaller) < MINEPM || (thecaller) > MAXEPM) 		\
		&& (thecaller) != ANYEPM				\
		&& (thecaller) != NEEDACL ) {				\
		vm_panic(#thecaller " invalid", (code));  		\
	}								\
	vm_calls[i].vmc_callers |= EPM(thecaller);		      \
}

    /* Set call table to 0. This invalidates all calls (clear
     * vmc_func).
     */
    memset(vm_calls, 0, sizeof(vm_calls));

    /* Requests from PM (restricted to be from PM only). */
    CALLMAP(VM_EXIT, do_exit, PM_PROC_NR);
    CALLMAP(VM_FORK, do_fork, PM_PROC_NR);
    CALLMAP(VM_BRK, do_brk, PM_PROC_NR);
    CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem, PM_PROC_NR);
    CALLMAP(VM_PUSH_SIG, do_push_sig, PM_PROC_NR);
    CALLMAP(VM_WILLEXIT, do_willexit, PM_PROC_NR);
    CALLMAP(VM_ADDDMA, do_adddma, PM_PROC_NR);
    CALLMAP(VM_DELDMA, do_deldma, PM_PROC_NR);
    CALLMAP(VM_GETDMA, do_getdma, PM_PROC_NR);
    CALLMAP(VM_ALLOCMEM, do_allocmem, PM_PROC_NR);
    CALLMAP(VM_NOTIFY_SIG, do_notify_sig, PM_PROC_NR);

    /* Requests from RS */
    CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv, RS_PROC_NR);

    /* Requests from userland (source unrestricted). */
    CALLMAP(VM_MMAP, do_mmap, ANYEPM);
    CALLMAP(VM_MUNMAP, do_munmap, ANYEPM);
    CALLMAP(VM_MUNMAP_TEXT, do_munmap, ANYEPM);
    CALLMAP(VM_MAP_PHYS, do_map_phys, ANYEPM); /* Does its own checking. */
    CALLMAP(VM_UNMAP_PHYS, do_unmap_phys, ANYEPM);

    CALLMAP(NNR_VM_MMAP, scall_mmap, ANYEPM);
    CALLMAP(NNR_VM_MUNMAP, scall_munmap, ANYEPM);
    CALLMAP(NNR_VM_MUNMAP_TEXT, scall_munmap, ANYEPM);

    /* Requests from userland (anyone can call but need an ACL bit). */
    CALLMAP(VM_REMAP, do_remap, NEEDACL);
    CALLMAP(VM_GETPHYS, do_get_phys, NEEDACL);
    CALLMAP(VM_SHM_UNMAP, do_shared_unmap, NEEDACL);
    CALLMAP(VM_GETREF, do_get_refcount, NEEDACL);
    CALLMAP(VM_CTL, do_ctl, NEEDACL);
    CALLMAP(VM_QUERY_EXIT, do_query_exit, NEEDACL);

    /* Sanity checks */
    if(find_kernel_top() >= VM_PROCSTART)
        vm_panic("kernel loaded too high", NO_NUM);

    /* Initialize the structures for queryexit */
    init_query_exit();

    /* Unmap our own low pages. */
    unmap_ok = 1;
    unmap_page_zero();
}