Exemple #1
0
/*===========================================================================*
 *				arch_map2info				     *
 *===========================================================================*/
PUBLIC vir_bytes arch_map2info(struct vmproc *vmp, vir_bytes addr, int *seg,
	int *prot)
{
	vir_bytes textstart = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
	vir_bytes textend = textstart +
		CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len);
	vir_bytes datastart = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);

	/* The protection to be returned here is that of the segment. */
	if(addr < textstart) {
		*seg = D;
		*prot = PROT_READ | PROT_WRITE | PROT_EXEC;
		return addr;
	} else if(addr < datastart) {
		*seg = T;
		*prot = PROT_READ | PROT_EXEC;
		return addr - textstart;
	} else {
		*seg = D;
		if (textstart == textend)	/* common I&D? */
			*prot = PROT_READ | PROT_WRITE | PROT_EXEC;
		else
			*prot = PROT_READ | PROT_WRITE;
		return addr - datastart;
	}
}
Exemple #2
0
/*===========================================================================*
 *				mem_init				     *
 *===========================================================================*/
void mem_init(struct memory *chunks)
{
/* Initialize hole lists.  There are two lists: 'hole_head' points to a linked
 * list of all the holes (unused memory) in the system; 'free_slots' points to
 * a linked list of table entries that are not in use.  Initially, the former
 * list has one entry for each chunk of physical memory, and the second
 * list links together the remaining table slots.  As memory becomes more
 * fragmented in the course of time (i.e., the initial big holes break up into
 * smaller holes), new table slots are needed to represent them.  These slots
 * are taken from the list headed by 'free_slots'.
 */
  int i, first = 0;

  total_pages = 0;

  memset(free_pages_bitmap, 0, sizeof(free_pages_bitmap));

  /* Use the chunks of physical memory to allocate holes. */
  for (i=NR_MEMS-1; i>=0; i--) {
  	if (chunks[i].size > 0) {
		phys_bytes from = CLICK2ABS(chunks[i].base),
			to = CLICK2ABS(chunks[i].base+chunks[i].size)-1;
		if(first || from < mem_low) mem_low = from;
		if(first || to > mem_high) mem_high = to;
		free_mem(chunks[i].base, chunks[i].size);
		total_pages += chunks[i].size;
		first = 0;
	}
  }
}
Exemple #3
0
/*===========================================================================*
 *				arch_map2vir				     *
 *===========================================================================*/
PUBLIC vir_bytes arch_map2vir(struct vmproc *vmp, vir_bytes addr)
{
	vir_bytes textstart = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
	vir_bytes datastart = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);

	/* Could be a text address. */
	vm_assert(datastart <= addr || textstart <= addr);

	return addr - datastart;
}
Exemple #4
0
/*===========================================================================*
 *				arch_addrok				     *
 *===========================================================================*/
PUBLIC vir_bytes arch_addrok(struct vmproc *vmp, vir_bytes addr)
{
	vir_bytes textstart = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
	vir_bytes textend = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys +
		vmp->vm_arch.vm_seg[T].mem_phys);
	vir_bytes datastart = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);

	if(addr >= textstart && addr < textstart+textend)
		return 1;

	if(addr >= datastart && addr < VM_DATATOP)
		return 1;

	return 0;
}
Exemple #5
0
int _brk(void *addr)
{
	vir_bytes target = roundup((vir_bytes)addr, VM_PAGE_SIZE), v;
	extern char _end;
	extern char *_brksize;
	static vir_bytes prevbrk = (vir_bytes) &_end;
	struct vmproc *vmprocess = &vmproc[VM_PROC_NR];

	for(v = roundup(prevbrk, VM_PAGE_SIZE); v < target;
		v += VM_PAGE_SIZE) {
		phys_bytes mem, newpage = alloc_mem(1, 0);
		if(newpage == NO_MEM) return -1;
		mem = CLICK2ABS(newpage);
		if(pt_writemap(vmprocess, &vmprocess->vm_pt,
			v, mem, VM_PAGE_SIZE,
			  ARCH_VM_PTE_PRESENT
			| ARCH_VM_PTE_USER
			| ARCH_VM_PTE_RW
#if defined(__arm__)
			| ARM_VM_PTE_WB
#endif
			, 0) != OK) {
			free_mem(newpage, 1);
			return -1;
		}
		prevbrk = v + VM_PAGE_SIZE;
	}

        _brksize = (char *) addr;

        if(sys_vmctl(SELF, VMCTL_FLUSHTLB, 0) != OK)
        	panic("flushtlb failed");

	return 0;
}
Exemple #6
0
static int
reservedqueue_addslot(struct reserved_pages *rq)
{
	phys_bytes cl, cl_addr;
	void *vir;
	struct reserved_pageslot *rps;

	sanitycheck_rq(rq);

	if((cl = alloc_mem(rq->npages, rq->allocflags)) == NO_MEM)
		return ENOMEM;

	cl_addr = CLICK2ABS(cl);

	vir = NULL;

	if(rq->mappedin) {
		if(!(vir = vm_mappages(cl_addr, rq->npages))) {
			free_mem(cl, rq->npages);
			printf("reservedqueue_addslot: vm_mappages failed\n");
			return ENOMEM;
		}
	}

	rps = &rq->slots[rq->n_available];

	reservedqueue_fillslot(rq, rps, cl_addr, vir);

	return OK;
}
Exemple #7
0
/*===========================================================================*
 *				arch_vir2map_text			     *
 *===========================================================================*/
vir_bytes arch_vir2map_text(struct vmproc *vmp, vir_bytes addr)
{
    vir_bytes textsegbase = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys -
                                      vmp->vm_arch.vm_seg[T].mem_vir);

    return addr + textsegbase;
}
Exemple #8
0
/*===========================================================================*
 *				arch_vir2map				     *
 *===========================================================================*/
vir_bytes arch_vir2map(struct vmproc *vmp, vir_bytes addr)
{
    vir_bytes datasegbase = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys -
                                      vmp->vm_arch.vm_seg[D].mem_vir);

    return addr + datasegbase;
}
Exemple #9
0
/*===========================================================================*
 *				arch_map2str				     *
 *===========================================================================*/
PUBLIC char *arch_map2str(struct vmproc *vmp, vir_bytes addr)
{
	static char bufstr[100];
	vir_bytes textstart = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
	vir_bytes textend = textstart + CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len);
	vir_bytes datastart = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);

	if(addr < textstart) {
		sprintf(bufstr, "<lin:0x%lx>", addr);
	} else if(addr < datastart) {
		sprintf(bufstr, "0x%lx (codeseg)", addr - textstart);
	} else {
		sprintf(bufstr, "0x%lx (dataseg)", addr - datastart);
	}

	return bufstr;
}
Exemple #10
0
static int anon_contig_new(struct vir_region *region)
{
        u32_t allocflags;
	phys_bytes new_pages, new_page_cl, cur_ph;
	phys_bytes p, pages;

        allocflags = vrallocflags(region->flags);

	pages = region->length/VM_PAGE_SIZE;

	assert(physregions(region) == 0);

	for(p = 0; p < pages; p++) {
		struct phys_block *pb = pb_new(MAP_NONE);
		struct phys_region *pr = NULL;
		if(pb)
			pr = pb_reference(pb, p * VM_PAGE_SIZE, region, &mem_type_anon_contig);
		if(!pr) {
			if(pb) pb_free(pb);
			map_free(region);
			return ENOMEM;
		}
	}

	assert(physregions(region) == pages);

	if((new_page_cl = alloc_mem(pages, allocflags)) == NO_MEM) {
		map_free(region);
		return ENOMEM;
	}

	cur_ph = new_pages = CLICK2ABS(new_page_cl);

	for(p = 0; p < pages; p++) {
		struct phys_region *pr = physblock_get(region, p * VM_PAGE_SIZE);
		assert(pr);
		assert(pr->ph);
		assert(pr->ph->phys == MAP_NONE);
		assert(pr->offset == p * VM_PAGE_SIZE);
		pr->ph->phys = cur_ph + pr->offset;
	}

	return OK;
}
Exemple #11
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
void *vm_allocpages(phys_bytes *phys, int reason, int pages)
{
    /* Allocate a page for use by VM itself. */
    phys_bytes newpage;
    vir_bytes loc;
    pt_t *pt;
    int r;
    static int level = 0;
    void *ret;
    u32_t mem_flags = 0;

    pt = &vmprocess->vm_pt;
    assert(reason >= 0 && reason < VMP_CATEGORIES);

    assert(pages > 0);

    level++;

    assert(level >= 1);
    assert(level <= 2);

    if((level > 1) || !pt_init_done) {
        void *s;

        if(pages == 1) s=vm_getsparepage(phys);
        else if(pages == 4) s=vm_getsparepagedir(phys);
        else panic("%d pages", pages);

        level--;
        if(!s) {
            util_stacktrace();
            printf("VM: warning: out of spare pages\n");
        }
        if(!is_staticaddr(s)) vm_self_pages++;
        return s;
    }

#if defined(__arm__)
    if (reason == VMP_PAGEDIR) {
        mem_flags |= PAF_ALIGN16K;
    }
#endif

    /* VM does have a pagetable, so get a page and map it in there.
     * Where in our virtual address space can we put it?
     */
    loc = findhole(pages);
    if(loc == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: findhole failed\n");
        return NULL;
    }

    /* Allocate page of memory for use by VM. As VM
     * is trusted, we don't have to pre-clear it.
     */
    if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: alloc_mem failed\n");
        return NULL;
    }

    *phys = CLICK2ABS(newpage);

    /* Map this page into our address space. */
    if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages,
                      ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
#if defined(__arm__)
                      | ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
#endif
                      , 0)) != OK) {
        free_mem(newpage, pages);
        printf("vm_allocpage writemap failed\n");
        level--;
        return NULL;
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed: %d", r);
    }

    level--;

    /* Return user-space-ready pointer to it. */
    ret = (void *) loc;

    vm_self_pages++;
    return ret;
}
Exemple #12
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
void *vm_allocpages(phys_bytes *phys, int reason, int pages)
{
    /* Allocate a page for use by VM itself. */
    phys_bytes newpage;
    static int level = 0;
    void *ret;
    u32_t mem_flags = 0;

    assert(reason >= 0 && reason < VMP_CATEGORIES);

    assert(pages > 0);

    level++;

    assert(level >= 1);
    assert(level <= 2);

    if((level > 1) || !pt_init_done) {
        void *s;

        if(pages == 1) s=vm_getsparepage(phys);
        else if(pages == 4) s=vm_getsparepagedir(phys);
        else panic("%d pages", pages);

        level--;
        if(!s) {
            util_stacktrace();
            printf("VM: warning: out of spare pages\n");
        }
        if(!is_staticaddr(s)) vm_self_pages++;
        return s;
    }

#if defined(__arm__)
    if (reason == VMP_PAGEDIR) {
        mem_flags |= PAF_ALIGN16K;
    }
#endif

    /* Allocate page of memory for use by VM. As VM
     * is trusted, we don't have to pre-clear it.
     */
    if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: alloc_mem failed\n");
        return NULL;
    }

    *phys = CLICK2ABS(newpage);

    if(!(ret = vm_mappages(*phys, pages))) {
        level--;
        printf("VM: vm_allocpage: vm_mappages failed\n");
        return NULL;
    }

    level--;
    vm_self_pages++;

    return ret;
}
Exemple #13
0
/*===========================================================================*
 *				vm_init					     *
 *===========================================================================*/
static void vm_init(void)
{
    int s, i;
    int click, clicksforgotten = 0;
    struct memory mem_chunks[NR_MEMS];
    struct boot_image image[NR_BOOT_PROCS];
    struct boot_image *ip;
    phys_bytes limit = 0;

    /* The initrd is put right after boot image */
    if ((s = sys_getbootparam(&bootparam)) != 0) {
        panic("VM","Couldn't get boot parameters!",s);
    }

    /* get what setup found out */
    memcpy(mem_chunks, bootparam.nucleos_kludge.mem, sizeof(bootparam.nucleos_kludge.mem));

    /* Get chunks of available memory. */
    get_mem_chunks(mem_chunks);

    /* Initialize VM's process table. Request a copy of the system
     * image table that is defined at the kernel level to see which
     * slots to fill in.
     */
    if ((s=sys_getimage(image)) != 0)
        vm_panic("couldn't get image table: %d\n", s);

    /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
    memset(vmproc, 0, sizeof(vmproc));

    for(i = 0; i < ELEMENTS(vmproc); i++) {
        vmproc[i].vm_slot = i;
    }

    /* Walk through boot-time system processes that are alive
     * now and make valid slot entries for them.
     */
    for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
        phys_bytes proclimit;
        struct vmproc *vmp;

        if(ip->proc_nr >= NR_PROCS) {
            vm_panic("proc", ip->proc_nr);
        }
        if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;

#define GETVMP(v, nr)						\
		if(nr >= 0) {					\
			vmp = &vmproc[ip->proc_nr];		\
		} else if(nr == SYSTEM) {			\
			vmp = &vmproc[VMP_SYSTEM];		\
		} else {					\
			vm_panic("init: crazy proc_nr", nr);	\
		}

        /* Initialize normal process table slot or special SYSTEM
         * table slot. Kernel memory is already reserved.
         */
        GETVMP(vmp, ip->proc_nr);

        /* reset fields as if exited */
        clear_proc(vmp);

        /* Get memory map for this process from the kernel. */
        if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != 0)
            vm_panic("couldn't get process mem_map",s);

        /* Remove this memory from the free list. */
        reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);

        /* Set memory limit. */
        proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
                              vmp->vm_arch.vm_seg[S].mem_len) - 1;

        if(proclimit > limit)
            limit = proclimit;

        vmp->vm_flags = VMF_INUSE;
        vmp->vm_endpoint = ip->endpoint;
        vmp->vm_stacktop =
            CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                      vmp->vm_arch.vm_seg[S].mem_len);
    }

#ifndef CONFIG_BUILTIN_INITRD
    /* Remove initrd memory from the free list. We must do it right after we
       have reserved memory for boot image otherwise it may happen that initrd
       will be overwritten by other process (in arch_init_vm).
     */
    if ((s = reserve_initrd_mem(mem_chunks, bootparam.hdr.ramdisk_image,
                                bootparam.hdr.ramdisk_size)) < 0) {
        panic("VM", "Couldn't reserve memory for initial ramdisk!", s);
    }
#endif
    /* Architecture-dependent initialization. */
    pt_init(limit);

    /* Initialize tables to all physical memory. */
    mem_init(mem_chunks);
    meminit_done = 1;

    /* Give these processes their own page table. */
    for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
        int s;
        struct vmproc *vmp;
        vir_bytes old_stacktop, old_stack;

        if(ip->proc_nr < 0) continue;

        GETVMP(vmp, ip->proc_nr);

        if(!(ip->flags & PROC_FULLVM))
            continue;

        old_stack =
            vmp->vm_arch.vm_seg[S].mem_vir +
            vmp->vm_arch.vm_seg[S].mem_len -
            vmp->vm_arch.vm_seg[D].mem_len;

        if(pt_new(&vmp->vm_pt) != 0)
            vm_panic("vm_init: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
        old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                                 vmp->vm_arch.vm_seg[S].mem_len);
        if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
                     VM_STACKTOP - old_stacktop) != 0) {
            vm_panic("VM: vmctl for new stack failed", NO_NUM);
        }

        FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
                 vmp->vm_arch.vm_seg[D].mem_len,
                 old_stack);

        if(proc_new(vmp,
                    VM_PROCSTART,
                    CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
                    CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
                    BASICSTACK,
                    CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                              vmp->vm_arch.vm_seg[S].mem_len -
                              vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
                    CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
                    CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
                    VM_STACKTOP) != 0) {
            vm_panic("failed proc_new for boot process", NO_NUM);
        }
    }

    /* Set up table of calls. */
#define CALLMAP(code, func, thecaller) { int i;			      \
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
	if(i >= VM_NCALLS) { vm_panic(#code " invalid", (code)); } \
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
	if(((thecaller) < MINEPM || (thecaller) > MAXEPM) 		\
		&& (thecaller) != ANYEPM				\
		&& (thecaller) != NEEDACL ) {				\
		vm_panic(#thecaller " invalid", (code));  		\
	}								\
	vm_calls[i].vmc_callers |= EPM(thecaller);		      \
}

    /* Set call table to 0. This invalidates all calls (clear
     * vmc_func).
     */
    memset(vm_calls, 0, sizeof(vm_calls));

    /* Requests from PM (restricted to be from PM only). */
    CALLMAP(VM_EXIT, do_exit, PM_PROC_NR);
    CALLMAP(VM_FORK, do_fork, PM_PROC_NR);
    CALLMAP(VM_BRK, do_brk, PM_PROC_NR);
    CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem, PM_PROC_NR);
    CALLMAP(VM_PUSH_SIG, do_push_sig, PM_PROC_NR);
    CALLMAP(VM_WILLEXIT, do_willexit, PM_PROC_NR);
    CALLMAP(VM_ADDDMA, do_adddma, PM_PROC_NR);
    CALLMAP(VM_DELDMA, do_deldma, PM_PROC_NR);
    CALLMAP(VM_GETDMA, do_getdma, PM_PROC_NR);
    CALLMAP(VM_ALLOCMEM, do_allocmem, PM_PROC_NR);
    CALLMAP(VM_NOTIFY_SIG, do_notify_sig, PM_PROC_NR);

    /* Requests from RS */
    CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv, RS_PROC_NR);

    /* Requests from userland (source unrestricted). */
    CALLMAP(VM_MMAP, do_mmap, ANYEPM);
    CALLMAP(VM_MUNMAP, do_munmap, ANYEPM);
    CALLMAP(VM_MUNMAP_TEXT, do_munmap, ANYEPM);
    CALLMAP(VM_MAP_PHYS, do_map_phys, ANYEPM); /* Does its own checking. */
    CALLMAP(VM_UNMAP_PHYS, do_unmap_phys, ANYEPM);

    CALLMAP(NNR_VM_MMAP, scall_mmap, ANYEPM);
    CALLMAP(NNR_VM_MUNMAP, scall_munmap, ANYEPM);
    CALLMAP(NNR_VM_MUNMAP_TEXT, scall_munmap, ANYEPM);

    /* Requests from userland (anyone can call but need an ACL bit). */
    CALLMAP(VM_REMAP, do_remap, NEEDACL);
    CALLMAP(VM_GETPHYS, do_get_phys, NEEDACL);
    CALLMAP(VM_SHM_UNMAP, do_shared_unmap, NEEDACL);
    CALLMAP(VM_GETREF, do_get_refcount, NEEDACL);
    CALLMAP(VM_CTL, do_ctl, NEEDACL);
    CALLMAP(VM_QUERY_EXIT, do_query_exit, NEEDACL);

    /* Sanity checks */
    if(find_kernel_top() >= VM_PROCSTART)
        vm_panic("kernel loaded too high", NO_NUM);

    /* Initialize the structures for queryexit */
    init_query_exit();

    /* Unmap our own low pages. */
    unmap_ok = 1;
    unmap_page_zero();
}
Exemple #14
0
/*===========================================================================*
 *				do_fork					     *
 *===========================================================================*/
PUBLIC int do_fork(message *msg)
{
  int r, proc, s, childproc, fullvm;
  struct vmproc *vmp, *vmc;
  pt_t origpt;
  vir_bytes msgaddr;

  SANITYCHECK(SCL_FUNCTIONS);

  if(vm_isokendpt(msg->VMF_ENDPOINT, &proc) != OK) {
	printf("VM: bogus endpoint VM_FORK %d\n", msg->VMF_ENDPOINT);
  SANITYCHECK(SCL_FUNCTIONS);
	return EINVAL;
  }

  childproc = msg->VMF_SLOTNO;
  if(childproc < 0 || childproc >= NR_PROCS) {
	printf("VM: bogus slotno VM_FORK %d\n", msg->VMF_SLOTNO);
  SANITYCHECK(SCL_FUNCTIONS);
	return EINVAL;
  }

  vmp = &vmproc[proc];		/* parent */
  vmc = &vmproc[childproc];	/* child */
  assert(vmc->vm_slot == childproc);

  if(vmp->vm_flags & VMF_HAS_DMA) {
	printf("VM: %d has DMA memory and may not fork\n", msg->VMF_ENDPOINT);
	return EINVAL;
  }

  fullvm = vmp->vm_flags & VMF_HASPT;

  /* The child is basically a copy of the parent. */
  origpt = vmc->vm_pt;
  *vmc = *vmp;
  vmc->vm_slot = childproc;
  vmc->vm_regions = NULL;
  yielded_init(&vmc->vm_yielded_blocks);
  vmc->vm_endpoint = NONE;	/* In case someone tries to use it. */
  vmc->vm_pt = origpt;
  vmc->vm_flags &= ~VMF_HASPT;

#if VMSTATS
  vmc->vm_bytecopies = 0;
#endif

  if(pt_new(&vmc->vm_pt) != OK) {
	printf("VM: fork: pt_new failed\n");
	return ENOMEM;
  }

  vmc->vm_flags |= VMF_HASPT;

  if(fullvm) {
	SANITYCHECK(SCL_DETAIL);

	if(map_proc_copy(vmc, vmp) != OK) {
		printf("VM: fork: map_proc_copy failed\n");
		pt_free(&vmc->vm_pt);
		return(ENOMEM);
	}

	if(vmp->vm_heap) {
		vmc->vm_heap = map_region_lookup_tag(vmc, VRT_HEAP);
		assert(vmc->vm_heap);
	}

	SANITYCHECK(SCL_DETAIL);
  } else {
	vir_bytes sp;
	struct vir_region *heap, *stack;
	vir_bytes text_bytes, data_bytes, stack_bytes, parent_gap_bytes,
		child_gap_bytes;

	/* Get SP of new process (using parent). */
	if(get_stack_ptr(vmp->vm_endpoint, &sp) != OK) {
		printf("VM: fork: get_stack_ptr failed for %d\n",
			vmp->vm_endpoint);
		return ENOMEM;
	}

	/* Update size of stack segment using current SP. */
	if(adjust(vmp, vmp->vm_arch.vm_seg[D].mem_len, sp) != OK) {
		printf("VM: fork: adjust failed for %d\n",
			vmp->vm_endpoint);
		return ENOMEM;
	}

	/* Copy newly adjust()ed stack segment size to child. */
	vmc->vm_arch.vm_seg[S] = vmp->vm_arch.vm_seg[S];

        text_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[T].mem_len);
        data_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[D].mem_len);
	stack_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_len);

	/* how much space after break and before lower end (which is the
	 * logical top) of stack for the parent
	 */
	parent_gap_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_vir -
		vmc->vm_arch.vm_seg[D].mem_len);

	/* how much space can the child stack grow downwards, below
	 * the current SP? The rest of the gap is available for the
	 * heap to grow upwards.
	 */
	child_gap_bytes = VM_PAGE_SIZE;

        if((r=proc_new(vmc, VM_PROCSTART,
		text_bytes, data_bytes, stack_bytes, child_gap_bytes, 0, 0,
		CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_vir +
                        vmc->vm_arch.vm_seg[S].mem_len), 1)) != OK) {
			printf("VM: fork: proc_new failed\n");
			return r;
	}

	if(!(heap = map_region_lookup_tag(vmc, VRT_HEAP)))
		panic("couldn't lookup heap");
	assert(heap->phys);
	if(!(stack = map_region_lookup_tag(vmc, VRT_STACK)))
		panic("couldn't lookup stack");
	assert(stack->phys);

	/* Now copy the memory regions. */

	if(vmc->vm_arch.vm_seg[T].mem_len > 0) {
		struct vir_region *text;
		if(!(text = map_region_lookup_tag(vmc, VRT_TEXT)))
			panic("couldn't lookup text");
		assert(text->phys);
		if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
			text, 0, text_bytes) != OK)
				panic("couldn't copy text");
	}

	if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
		heap, 0, data_bytes) != OK)
			panic("couldn't copy heap");

	if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys +
			vmc->vm_arch.vm_seg[D].mem_len) + parent_gap_bytes,
			stack, child_gap_bytes, stack_bytes) != OK)
			panic("couldn't copy stack");
  }

  /* Only inherit these flags. */
  vmc->vm_flags &= (VMF_INUSE|VMF_SEPARATE|VMF_HASPT);

  /* inherit the priv call bitmaps */
  memcpy(&vmc->vm_call_mask, &vmp->vm_call_mask, sizeof(vmc->vm_call_mask));

  /* Tell kernel about the (now successful) FORK. */
  if((r=sys_fork(vmp->vm_endpoint, childproc,
	&vmc->vm_endpoint, vmc->vm_arch.vm_seg,
	PFF_VMINHIBIT, &msgaddr)) != OK) {
        panic("do_fork can't sys_fork: %d", r);
  }

  if(fullvm) {
	vir_bytes vir;
	/* making these messages writable is an optimisation
	 * and its return value needn't be checked.
	 */
	vir = arch_vir2map(vmc, msgaddr);
	handle_memory(vmc, vir, sizeof(message), 1);
	vir = arch_vir2map(vmp, msgaddr);
	handle_memory(vmp, vir, sizeof(message), 1);
  }

  if((r=pt_bind(&vmc->vm_pt, vmc)) != OK)
	panic("fork can't pt_bind: %d", r);

  /* Inform caller of new child endpoint. */
  msg->VMF_CHILD_ENDPOINT = vmc->vm_endpoint;

  SANITYCHECK(SCL_FUNCTIONS);
  return OK;
}
Exemple #15
0
/*===========================================================================*
 *				arch_vir2map_text			     *
 *===========================================================================*/
PUBLIC vir_bytes arch_vir2map_text(struct vmproc *vmp, vir_bytes addr)
{
	vir_bytes bottom = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);

	return addr + bottom;
}
Exemple #16
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
{
/* Allocate a page for use by VM itself. */
	phys_bytes newpage;
	vir_bytes loc;
	pt_t *pt;
	int r;
	static int level = 0;
	void *ret;

	pt = &vmprocess->vm_pt;
	assert(reason >= 0 && reason < VMP_CATEGORIES);

	level++;

	assert(level >= 1);
	assert(level <= 2);

	if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) {
		int r;
		void *s;
		s=vm_getsparepage(phys);
		level--;
		if(!s) {
			util_stacktrace();
			printf("VM: warning: out of spare pages\n");
		}
		return s;
	}

	/* VM does have a pagetable, so get a page and map it in there.
	 * Where in our virtual address space can we put it?
	 */
	loc = findhole(pt,  arch_vir2map(vmprocess, vmprocess->vm_stacktop),
		vmprocess->vm_arch.vm_data_top);
	if(loc == NO_MEM) {
		level--;
		printf("VM: vm_allocpage: findhole failed\n");
		return NULL;
	}

	/* Allocate page of memory for use by VM. As VM
	 * is trusted, we don't have to pre-clear it.
	 */
	if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) {
		level--;
		printf("VM: vm_allocpage: alloc_mem failed\n");
		return NULL;
	}

	*phys = CLICK2ABS(newpage);

	/* Map this page into our address space. */
	if((r=pt_writemap(vmprocess, pt, loc, *phys, I386_PAGE_SIZE,
		I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) {
		free_mem(newpage, CLICKSPERPAGE);
		printf("vm_allocpage writemap failed\n");
		level--;
		return NULL;
	}

	if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
		panic("VMCTL_FLUSHTLB failed: %d", r);
	}

	level--;

	/* Return user-space-ready pointer to it. */
	ret = (void *) arch_map2vir(vmprocess, loc);

	return ret;
}
Exemple #17
0
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
PUBLIC void pt_init(phys_bytes usedlimit)
{
/* By default, the kernel gives us a data segment with pre-allocated
 * memory that then can't grow. We want to be able to allocate memory
 * dynamically, however. So here we copy the part of the page table
 * that's ours, so we get a private page table. Then we increase the
 * hardware segment size so we can allocate memory above our stack.
 */
        pt_t *newpt;
        int s, r;
        vir_bytes v;
        phys_bytes lo, hi; 
        vir_bytes extra_clicks;
        u32_t moveup = 0;
	int global_bit_ok = 0;
	int free_pde;
	int p;
	struct vm_ep_data ep_data;
	vir_bytes sparepages_mem;
	phys_bytes sparepages_ph;
	vir_bytes ptr;

        /* Shorthand. */
        newpt = &vmprocess->vm_pt;

        /* Get ourselves spare pages. */
        ptr = (vir_bytes) static_sparepages;
        ptr += I386_PAGE_SIZE - (ptr % I386_PAGE_SIZE);
        if(!(sparepages_mem = ptr))
		panic("pt_init: aalloc for spare failed");
        if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem,
                I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK)
                panic("pt_init: sys_umap failed: %d", r);

        missing_spares = 0;
        assert(STATIC_SPAREPAGES < SPAREPAGES);
        for(s = 0; s < SPAREPAGES; s++) {
        	if(s >= STATIC_SPAREPAGES) {
        		sparepages[s].page = NULL;
        		missing_spares++;
        		continue;
        	}
        	sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE);
        	sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE;
        }

	/* global bit and 4MB pages available? */
	global_bit_ok = _cpufeature(_CPUF_I386_PGE);
	bigpage_ok = _cpufeature(_CPUF_I386_PSE);

	/* Set bit for PTE's and PDE's if available. */
	if(global_bit_ok)
		global_bit = I386_VM_GLOBAL;

	/* The kernel and boot time processes need an identity mapping.
	 * We use full PDE's for this without separate page tables.
	 * Figure out which pde we can start using for other purposes.
	 */
	id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE;

	/* We have to make mappings up till here. */
	free_pde = id_map_high_pde+1;

        /* Initial (current) range of our virtual address space. */
        lo = CLICK2ABS(vmprocess->vm_arch.vm_seg[T].mem_phys);
        hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys +
                vmprocess->vm_arch.vm_seg[S].mem_len);
                  
        assert(!(lo % I386_PAGE_SIZE)); 
        assert(!(hi % I386_PAGE_SIZE));
 
        if(lo < VM_PROCSTART) {
                moveup = VM_PROCSTART - lo;
                assert(!(VM_PROCSTART % I386_PAGE_SIZE));
                assert(!(lo % I386_PAGE_SIZE));
                assert(!(moveup % I386_PAGE_SIZE));
        }
        
        /* Make new page table for ourselves, partly copied
         * from the current one.
         */     
        if(pt_new(newpt) != OK)
                panic("pt_init: pt_new failed"); 

        /* Set up mappings for VM process. */
        for(v = lo; v < hi; v += I386_PAGE_SIZE)  {
                phys_bytes addr;
                u32_t flags; 
        
                /* We have to write the new position in the PT,
                 * so we can move our segments.
                 */ 
                if(pt_writemap(vmprocess, newpt, v+moveup, v, I386_PAGE_SIZE,
                        I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
                        panic("pt_init: pt_writemap failed");
        }
       
        /* Move segments up too. */
        vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
       
	/* Allocate us a page table in which to remember page directory
	 * pointers.
	 */
	if(!(page_directories = vm_allocpage(&page_directories_phys,
		VMP_PAGETABLE)))
                panic("no virt addr for vm mappings");

	memset(page_directories, 0, I386_PAGE_SIZE);
       
        /* Increase our hardware data segment to create virtual address
         * space above our stack. We want to increase it to VM_DATATOP,
         * like regular processes have.
         */
        extra_clicks = ABS2CLICK(VM_DATATOP - hi);
        vmprocess->vm_arch.vm_seg[S].mem_len += extra_clicks;
       
        /* We pretend to the kernel we have a huge stack segment to
         * increase our data segment.
         */
        vmprocess->vm_arch.vm_data_top =
                (vmprocess->vm_arch.vm_seg[S].mem_vir +
                vmprocess->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
       
        /* Where our free virtual address space starts.
         * This is only a hint to the VM system.
         */
        newpt->pt_virtop = 0;

        /* Let other functions know VM now has a private page table. */
        vmprocess->vm_flags |= VMF_HASPT;

	/* Now reserve another pde for kernel's own mappings. */
	{
		int kernmap_pde;
		phys_bytes addr, len;
		int flags, index = 0;
		u32_t offset = 0;

		kernmap_pde = free_pde++;
		offset = kernmap_pde * I386_BIG_PAGE_SIZE;

		while(sys_vmctl_get_mapping(index, &addr, &len,
			&flags) == OK)  {
			vir_bytes vir;
			if(index >= MAX_KERNMAPPINGS)
                		panic("VM: too many kernel mappings: %d", index);
			kern_mappings[index].phys_addr = addr;
			kern_mappings[index].len = len;
			kern_mappings[index].flags = flags;
			kern_mappings[index].lin_addr = offset;
			kern_mappings[index].flags =
				I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
				global_bit;
			if(flags & VMMF_UNCACHED)
				kern_mappings[index].flags |= PTF_NOCACHE;
			if(addr % I386_PAGE_SIZE)
                		panic("VM: addr unaligned: %d", addr);
			if(len % I386_PAGE_SIZE)
                		panic("VM: len unaligned: %d", len);
			vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset);
			if(sys_vmctl_reply_mapping(index, vir) != OK)
                		panic("VM: reply failed");
			offset += len;
			index++;
			kernmappings++;
		}
	}

	/* Find a PDE below processes available for mapping in the
	 * page directories (readonly).
	 */
	pagedir_pde = free_pde++;
	pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
			I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;

	/* Tell kernel about free pde's. */
	while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
		if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
			panic("VMCTL_I386_FREEPDE failed: %d", r);
		}
	}

	/* first pde in use by process. */
	proc_pde = free_pde;

        /* Give our process the new, copied, private page table. */
	pt_mapkernel(newpt);	/* didn't know about vm_dir pages earlier */
        pt_bind(newpt, vmprocess);
       
	/* new segment limit for the kernel after paging is enabled */
	ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
	/* the memory map which must be installed after paging is enabled */
	ep_data.mem_map = vmprocess->vm_arch.vm_seg;

	/* Now actually enable paging. */
	if(sys_vmctl_enable_paging(&ep_data) != OK)
        	panic("pt_init: enable paging failed");

        /* Back to reality - this is where the stack actually is. */
        vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks;

        /* Pretend VM stack top is the same as any regular process, not to
         * have discrepancies with new VM instances later on.
         */
        vmprocess->vm_stacktop = VM_STACKTOP;

        /* All OK. */
        return;
}
Exemple #18
0
/*===========================================================================*
 *				sef_cb_init_fresh			     *
 *===========================================================================*/
PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
{
/* Initialize the vm server. */
	int s, i;
	int click, clicksforgotten = 0;
	struct memory mem_chunks[NR_MEMS];
	struct boot_image image[NR_BOOT_PROCS];
	struct boot_image *ip;
	struct rprocpub rprocpub[NR_BOOT_PROCS];
	phys_bytes limit = 0;

#if SANITYCHECKS
	incheck = nocheck = 0;
	FIXME("VM SANITYCHECKS are on");
#endif

	vm_paged = 1;
	env_parse("vm_paged", "d", 0, &vm_paged, 0, 1);
#if SANITYCHECKS
	env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel, 0, SCL_MAX);
#endif

	/* Get chunks of available memory. */
	get_mem_chunks(mem_chunks);

	/* Initialize VM's process table. Request a copy of the system
	 * image table that is defined at the kernel level to see which
	 * slots to fill in.
	 */
	if (OK != (s=sys_getimage(image)))
		vm_panic("couldn't get image table: %d\n", s);

	/* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
	memset(vmproc, 0, sizeof(vmproc));

	for(i = 0; i < ELEMENTS(vmproc); i++) {
		vmproc[i].vm_slot = i;
	}

	/* Walk through boot-time system processes that are alive
	 * now and make valid slot entries for them.
	 */
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
		phys_bytes proclimit;
		struct vmproc *vmp;

		if(ip->proc_nr >= _NR_PROCS) { vm_panic("proc", ip->proc_nr); }
		if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;

#define GETVMP(v, nr)						\
		if(nr >= 0) {					\
			vmp = &vmproc[ip->proc_nr];		\
		} else if(nr == SYSTEM) {			\
			vmp = &vmproc[VMP_SYSTEM];		\
		} else {					\
			vm_panic("init: crazy proc_nr", nr);	\
		}

		/* Initialize normal process table slot or special SYSTEM
		 * table slot. Kernel memory is already reserved.
		 */
		GETVMP(vmp, ip->proc_nr);

		/* reset fields as if exited */
		clear_proc(vmp);

		/* Get memory map for this process from the kernel. */
		if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != OK)
			vm_panic("couldn't get process mem_map",s);

		/* Remove this memory from the free list. */
		reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);

		/* Set memory limit. */
		proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
			vmp->vm_arch.vm_seg[S].mem_len) - 1;

		if(proclimit > limit)
			limit = proclimit;

		vmp->vm_flags = VMF_INUSE;
		vmp->vm_endpoint = ip->endpoint;
		vmp->vm_stacktop =
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len);

		if (vmp->vm_arch.vm_seg[T].mem_len != 0)
			vmp->vm_flags |= VMF_SEPARATE;
	}

	/* Architecture-dependent initialization. */
	pt_init(limit);

	/* Initialize tables to all physical memory. */
	mem_init(mem_chunks);
	meminit_done = 1;

	/* Give these processes their own page table. */
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
		int s;
		struct vmproc *vmp;
		vir_bytes old_stacktop, old_stack;

		if(ip->proc_nr < 0) continue;

		GETVMP(vmp, ip->proc_nr);

               if(!(ip->flags & PROC_FULLVM))
                       continue;

		old_stack = 
			vmp->vm_arch.vm_seg[S].mem_vir +
			vmp->vm_arch.vm_seg[S].mem_len - 
			vmp->vm_arch.vm_seg[D].mem_len;

        	if(pt_new(&vmp->vm_pt) != OK)
			vm_panic("VM: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
		old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len);
		if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
			VM_STACKTOP - old_stacktop) != OK) {
			vm_panic("VM: vmctl for new stack failed", NO_NUM);
		}

		FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
			vmp->vm_arch.vm_seg[D].mem_len,
			old_stack);

		if(proc_new(vmp,
			VM_PROCSTART,
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
			BASICSTACK,
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len -
				vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
				VM_STACKTOP) != OK) {
			vm_panic("failed proc_new for boot process", NO_NUM);
		}
	}

	/* Set up table of calls. */
#define CALLMAP(code, func) { int i;			      \
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
	if(i >= NR_VM_CALLS) { vm_panic(#code " invalid", (code)); } \
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
}

	/* Set call table to 0. This invalidates all calls (clear
	 * vmc_func).
	 */
	memset(vm_calls, 0, sizeof(vm_calls));

	/* Basic VM calls. */
	CALLMAP(VM_MMAP, do_mmap);
	CALLMAP(VM_MUNMAP, do_munmap);
	CALLMAP(VM_MUNMAP_TEXT, do_munmap);
	CALLMAP(VM_MAP_PHYS, do_map_phys);
	CALLMAP(VM_UNMAP_PHYS, do_unmap_phys);

	/* Calls from PM. */
	CALLMAP(VM_EXIT, do_exit);
	CALLMAP(VM_FORK, do_fork);
	CALLMAP(VM_BRK, do_brk);
	CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem);
	CALLMAP(VM_PUSH_SIG, do_push_sig);
	CALLMAP(VM_WILLEXIT, do_willexit);
	CALLMAP(VM_ADDDMA, do_adddma);
	CALLMAP(VM_DELDMA, do_deldma);
	CALLMAP(VM_GETDMA, do_getdma);
	CALLMAP(VM_NOTIFY_SIG, do_notify_sig);

	/* Calls from RS */
	CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv);

	/* Generic calls. */
	CALLMAP(VM_REMAP, do_remap);
	CALLMAP(VM_GETPHYS, do_get_phys);
	CALLMAP(VM_SHM_UNMAP, do_shared_unmap);
	CALLMAP(VM_GETREF, do_get_refcount);
	CALLMAP(VM_INFO, do_info);
	CALLMAP(VM_QUERY_EXIT, do_query_exit);

	/* Sanity checks */
	if(find_kernel_top() >= VM_PROCSTART)
		vm_panic("kernel loaded too high", NO_NUM);

	/* Initialize the structures for queryexit */
	init_query_exit();

	/* Unmap our own low pages. */
	unmap_ok = 1;
	_minix_unmapzero();

	/* Map all the services in the boot image. */
	if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
		(vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK) {
		panic("VM", "sys_safecopyfrom failed", s);
	}
	for(i=0;i < NR_BOOT_PROCS;i++) {
		if(rprocpub[i].in_use) {
			if((s = map_service(&rprocpub[i])) != OK) {
				vm_panic("unable to map service", s);
			}
		}
	}

	return(OK);
}