Exemple #1
0
/*===========================================================================*
 *				do_mmap			     		     *
 *===========================================================================*/
PUBLIC int do_mmap(message *m)
{
	int r, n;
	struct vmproc *vmp;
	int mfflags = 0;
	struct vir_region *vr = NULL;

	if((r=vm_isokendpt(m->m_source, &n)) != OK) {
		panic("do_mmap: message from strange source: %d", m->m_source);
	}

	vmp = &vmproc[n];

	if(!(vmp->vm_flags & VMF_HASPT))
		return ENXIO;

	if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANON)) {
		int s;
		vir_bytes v;
		u32_t vrflags = VR_ANON | VR_WRITABLE;
		size_t len = (vir_bytes) m->VMM_LEN;

		if(m->VMM_FD != -1) {
			return EINVAL;
		}

		/* Contiguous phys memory has to be preallocated. */
		if((m->VMM_FLAGS & (MAP_CONTIG|MAP_PREALLOC)) == MAP_CONTIG) {
			return EINVAL;
		}

		if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC;
		if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
		if(m->VMM_FLAGS & MAP_LOWER1M)  vrflags |= VR_LOWER1MB;
		if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
		if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED;
		if(m->VMM_FLAGS & MAP_CONTIG) vrflags |= VR_CONTIG;

		if(len % VM_PAGE_SIZE)
			len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);

		if(!(vr = map_page_region(vmp,
			arch_vir2map(vmp,
				m->VMM_ADDR ? m->VMM_ADDR : vmp->vm_stacktop),
			VM_DATATOP, len, MAP_NONE, vrflags, mfflags))) {
			return ENOMEM;
		}
	} else {
		return ENOSYS;
	}

	/* Return mapping, as seen from process. */
	assert(vr);
	m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr);


	return OK;
}
Exemple #2
0
/*===========================================================================*
 *				do_remap		     		     *
 *===========================================================================*/
PUBLIC int do_remap(message *m)
{
	int dn, sn;
	vir_bytes da, sa, startv;
	size_t size;
	struct vir_region *vr, *region;
	struct vmproc *dvmp, *svmp;
	int r;

	da = (vir_bytes) m->VMRE_DA;
	sa = (vir_bytes) m->VMRE_SA;
	size = m->VMRE_SIZE;

	if ((r = vm_isokendpt((endpoint_t) m->VMRE_D, &dn)) != OK)
		return EINVAL;
	if ((r = vm_isokendpt((endpoint_t) m->VMRE_S, &sn)) != OK)
		return EINVAL;

	dvmp = &vmproc[dn];
	svmp = &vmproc[sn];

	/* da is not translated by arch_vir2map(),
	 * it's handled a little differently,
	 * since in map_remap(), we have to know
	 * about whether the user needs to bind to
	 * THAT address or be chosen by the system.
	 */
	sa = arch_vir2map(svmp, sa);

	if (!(region = map_lookup(svmp, sa)))
		return EINVAL;

	if(region->vaddr != sa) {
		printf("VM: do_remap: not start of region.\n");
		return EFAULT;
	}

	if(!(region->flags & VR_SHARED)) {
		printf("VM: do_remap: not shared.\n");
		return EFAULT;
	}

	if (size % VM_PAGE_SIZE)  
		size += VM_PAGE_SIZE - size % VM_PAGE_SIZE;

	if(size != region->length) {
		printf("VM: do_remap: not size of region.\n");
		return EFAULT;
	}

	if ((r = map_remap(dvmp, da, size, region, &startv)) != OK)
		return r;

	m->VMRE_RETA = (char *) arch_map2vir(dvmp, startv);
	return OK;
}
Exemple #3
0
/*===========================================================================*
 *				do_map_phys		     		     *
 *===========================================================================*/
PUBLIC int do_map_phys(message *m)
{
	int r, n;
	struct vmproc *vmp;
	endpoint_t target;
	struct vir_region *vr;
	vir_bytes len;
	phys_bytes startaddr;
	size_t offset;

	target = m->VMMP_EP;
	len = m->VMMP_LEN;

	if (len <= 0) return EINVAL;

	if(target == SELF)
		target = m->m_source;

	if((r=vm_isokendpt(target, &n)) != OK)
		return EINVAL;

	startaddr = (vir_bytes)m->VMMP_PHADDR;

	/* First check permission, then round range down/up. Caller can't
	 * help it if we can't map in lower than page granularity.
	 */
	if(map_perm_check(m->m_source, target, startaddr, len) != OK) {
		printf("VM: unauthorized mapping of 0x%lx by %d\n",
			startaddr, m->m_source);
		return EPERM;
	}

	vmp = &vmproc[n];

	if(!(vmp->vm_flags & VMF_HASPT))
		return ENXIO;

	offset = startaddr % VM_PAGE_SIZE;
	len += offset;
	startaddr -= offset;

	if(len % VM_PAGE_SIZE)
		len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);

	if(!(vr = map_page_region(vmp, arch_vir2map(vmp, vmp->vm_stacktop),
		VM_DATATOP, len, startaddr,
		VR_DIRECT | VR_NOPF | VR_WRITABLE, 0))) {
		return ENOMEM;
	}

	m->VMMP_VADDR_REPLY = (void *) (arch_map2vir(vmp, vr->vaddr) + offset);

	return OK;
}
Exemple #4
0
int scall_mmap(kipc_msg_t *m)
{
	int err, n;
	struct vmproc *vmp;
	int mfflags = 0;
	struct vir_region *vr = NULL;

	if((err = vm_isokendpt(m->m_source, &n)) != 0) {
		vm_panic("do_mmap: message from strange source", m->m_source);
	}

	vmp = &vmproc[n];

	if(!(vmp->vm_flags & VMF_HASPT))
		return -ENXIO;

	if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANONYMOUS)) {
		int s;
		vir_bytes v;
		u32_t vrflags = VR_ANON | VR_WRITABLE;
		size_t len = (vir_bytes) m->VMM_LEN;

		if(m->VMM_FD != -1) {
			return -EINVAL;
		}

		if(m->VMM_FLAGS & MAP_CONTIG) mfflags |= MF_CONTIG;
		if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC;
		if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
		if(m->VMM_FLAGS & MAP_LOWER1M)  vrflags |= VR_LOWER1MB;
		if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
		if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED;

		if(len % VM_PAGE_SIZE)
			len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);

		if(!(vr = map_page_region(vmp,
			arch_vir2map(vmp, m->VMM_ADDR ? m->VMM_ADDR : vmp->vm_stacktop),
			VM_DATATOP, len, MAP_NONE, vrflags, mfflags))) {
			return -ENOMEM;
		}
	} else {
		return -ENOSYS;
	}

	/* Return mapping, as seen from process. */
	vm_assert(vr);
	m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr);

	return m->VMM_RETADDR;
}
Exemple #5
0
/*===========================================================================*
 *				pt_bind			     		     *
 *===========================================================================*/
PUBLIC int pt_bind(pt_t *pt, struct vmproc *who)
{
	int slot, ispt;
	u32_t phys;
	void *pdes;

	/* Basic sanity checks. */
	assert(who);
	assert(who->vm_flags & VMF_INUSE);
	assert(pt);

	assert(pagedir_pde >= 0);

	slot = who->vm_slot;
	assert(slot >= 0);
	assert(slot < ELEMENTS(vmproc));
	assert(slot < I386_VM_PT_ENTRIES);

	phys = pt->pt_dir_phys & I386_VM_ADDR_MASK;
	assert(pt->pt_dir_phys == phys);

	/* Update "page directory pagetable." */
	page_directories[slot] = phys | I386_VM_PRESENT|I386_VM_WRITE;

	/* This is where the PDE's will be visible to the kernel
	 * in its address space.
	 */
	pdes = (void *) arch_map2vir(&vmproc[VMP_SYSTEM],
		pagedir_pde*I386_BIG_PAGE_SIZE + 
			slot * I386_PAGE_SIZE);

#if 0
	printf("VM: slot %d endpoint %d has pde val 0x%lx at kernel address 0x%lx\n",
		slot, who->vm_endpoint, page_directories[slot], pdes);
#endif
	/* Tell kernel about new page table root. */
	return sys_vmctl_set_addrspace(who->vm_endpoint,
		pt ? pt->pt_dir_phys : 0,
		pt ? pdes : 0);
}
Exemple #6
0
/*===========================================================================*
 *				do_remap		     		     *
 *===========================================================================*/
int do_remap(kipc_msg_t *m)
{
	int d, dn, s, sn;
	vir_bytes da, sa, startv;
	size_t size;
	struct vir_region *vr, *region;
	struct vmproc *dvmp, *svmp;
	int r;

	d = m->VMRE_D;
	s = m->VMRE_S;
	da = (vir_bytes) m->VMRE_DA;
	sa = (vir_bytes) m->VMRE_SA;
	size = m->VMRE_SIZE;

	if ((r = vm_isokendpt(d, &dn)) != 0)
		return -EINVAL;
	if ((r = vm_isokendpt(s, &sn)) != 0)
		return -EINVAL;

	dvmp = &vmproc[dn];
	svmp = &vmproc[sn];

	/* da is not translated by arch_vir2map(),
	 * it's handled a little differently,
	 * since in map_remap(), we have to know
	 * about whether the user needs to bind to
	 * THAT address or be chosen by the system.
	 */
	sa = arch_vir2map(svmp, sa);

	if (!(region = map_lookup(svmp, sa)))
		return -EINVAL;

	if ((r = map_remap(dvmp, da, size, region, &startv)) != 0)
		return r;

	m->VMRE_RETA = (char *) arch_map2vir(dvmp, startv);
	return 0;
}
Exemple #7
0
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
PUBLIC void pt_init(phys_bytes usedlimit)
{
/* By default, the kernel gives us a data segment with pre-allocated
 * memory that then can't grow. We want to be able to allocate memory
 * dynamically, however. So here we copy the part of the page table
 * that's ours, so we get a private page table. Then we increase the
 * hardware segment size so we can allocate memory above our stack.
 */
        pt_t *newpt;
        int s, r;
        vir_bytes v;
        phys_bytes lo, hi; 
        vir_bytes extra_clicks;
        u32_t moveup = 0;
	int global_bit_ok = 0;
	int free_pde;
	int p;
	struct vm_ep_data ep_data;
	vir_bytes sparepages_mem;
	phys_bytes sparepages_ph;
	vir_bytes ptr;

        /* Shorthand. */
        newpt = &vmprocess->vm_pt;

        /* Get ourselves spare pages. */
        ptr = (vir_bytes) static_sparepages;
        ptr += I386_PAGE_SIZE - (ptr % I386_PAGE_SIZE);
        if(!(sparepages_mem = ptr))
		panic("pt_init: aalloc for spare failed");
        if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem,
                I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK)
                panic("pt_init: sys_umap failed: %d", r);

        missing_spares = 0;
        assert(STATIC_SPAREPAGES < SPAREPAGES);
        for(s = 0; s < SPAREPAGES; s++) {
        	if(s >= STATIC_SPAREPAGES) {
        		sparepages[s].page = NULL;
        		missing_spares++;
        		continue;
        	}
        	sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE);
        	sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE;
        }

	/* global bit and 4MB pages available? */
	global_bit_ok = _cpufeature(_CPUF_I386_PGE);
	bigpage_ok = _cpufeature(_CPUF_I386_PSE);

	/* Set bit for PTE's and PDE's if available. */
	if(global_bit_ok)
		global_bit = I386_VM_GLOBAL;

	/* The kernel and boot time processes need an identity mapping.
	 * We use full PDE's for this without separate page tables.
	 * Figure out which pde we can start using for other purposes.
	 */
	id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE;

	/* We have to make mappings up till here. */
	free_pde = id_map_high_pde+1;

        /* Initial (current) range of our virtual address space. */
        lo = CLICK2ABS(vmprocess->vm_arch.vm_seg[T].mem_phys);
        hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys +
                vmprocess->vm_arch.vm_seg[S].mem_len);
                  
        assert(!(lo % I386_PAGE_SIZE)); 
        assert(!(hi % I386_PAGE_SIZE));
 
        if(lo < VM_PROCSTART) {
                moveup = VM_PROCSTART - lo;
                assert(!(VM_PROCSTART % I386_PAGE_SIZE));
                assert(!(lo % I386_PAGE_SIZE));
                assert(!(moveup % I386_PAGE_SIZE));
        }
        
        /* Make new page table for ourselves, partly copied
         * from the current one.
         */     
        if(pt_new(newpt) != OK)
                panic("pt_init: pt_new failed"); 

        /* Set up mappings for VM process. */
        for(v = lo; v < hi; v += I386_PAGE_SIZE)  {
                phys_bytes addr;
                u32_t flags; 
        
                /* We have to write the new position in the PT,
                 * so we can move our segments.
                 */ 
                if(pt_writemap(vmprocess, newpt, v+moveup, v, I386_PAGE_SIZE,
                        I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
                        panic("pt_init: pt_writemap failed");
        }
       
        /* Move segments up too. */
        vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
       
	/* Allocate us a page table in which to remember page directory
	 * pointers.
	 */
	if(!(page_directories = vm_allocpage(&page_directories_phys,
		VMP_PAGETABLE)))
                panic("no virt addr for vm mappings");

	memset(page_directories, 0, I386_PAGE_SIZE);
       
        /* Increase our hardware data segment to create virtual address
         * space above our stack. We want to increase it to VM_DATATOP,
         * like regular processes have.
         */
        extra_clicks = ABS2CLICK(VM_DATATOP - hi);
        vmprocess->vm_arch.vm_seg[S].mem_len += extra_clicks;
       
        /* We pretend to the kernel we have a huge stack segment to
         * increase our data segment.
         */
        vmprocess->vm_arch.vm_data_top =
                (vmprocess->vm_arch.vm_seg[S].mem_vir +
                vmprocess->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
       
        /* Where our free virtual address space starts.
         * This is only a hint to the VM system.
         */
        newpt->pt_virtop = 0;

        /* Let other functions know VM now has a private page table. */
        vmprocess->vm_flags |= VMF_HASPT;

	/* Now reserve another pde for kernel's own mappings. */
	{
		int kernmap_pde;
		phys_bytes addr, len;
		int flags, index = 0;
		u32_t offset = 0;

		kernmap_pde = free_pde++;
		offset = kernmap_pde * I386_BIG_PAGE_SIZE;

		while(sys_vmctl_get_mapping(index, &addr, &len,
			&flags) == OK)  {
			vir_bytes vir;
			if(index >= MAX_KERNMAPPINGS)
                		panic("VM: too many kernel mappings: %d", index);
			kern_mappings[index].phys_addr = addr;
			kern_mappings[index].len = len;
			kern_mappings[index].flags = flags;
			kern_mappings[index].lin_addr = offset;
			kern_mappings[index].flags =
				I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
				global_bit;
			if(flags & VMMF_UNCACHED)
				kern_mappings[index].flags |= PTF_NOCACHE;
			if(addr % I386_PAGE_SIZE)
                		panic("VM: addr unaligned: %d", addr);
			if(len % I386_PAGE_SIZE)
                		panic("VM: len unaligned: %d", len);
			vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset);
			if(sys_vmctl_reply_mapping(index, vir) != OK)
                		panic("VM: reply failed");
			offset += len;
			index++;
			kernmappings++;
		}
	}

	/* Find a PDE below processes available for mapping in the
	 * page directories (readonly).
	 */
	pagedir_pde = free_pde++;
	pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
			I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;

	/* Tell kernel about free pde's. */
	while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
		if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
			panic("VMCTL_I386_FREEPDE failed: %d", r);
		}
	}

	/* first pde in use by process. */
	proc_pde = free_pde;

        /* Give our process the new, copied, private page table. */
	pt_mapkernel(newpt);	/* didn't know about vm_dir pages earlier */
        pt_bind(newpt, vmprocess);
       
	/* new segment limit for the kernel after paging is enabled */
	ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
	/* the memory map which must be installed after paging is enabled */
	ep_data.mem_map = vmprocess->vm_arch.vm_seg;

	/* Now actually enable paging. */
	if(sys_vmctl_enable_paging(&ep_data) != OK)
        	panic("pt_init: enable paging failed");

        /* Back to reality - this is where the stack actually is. */
        vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks;

        /* Pretend VM stack top is the same as any regular process, not to
         * have discrepancies with new VM instances later on.
         */
        vmprocess->vm_stacktop = VM_STACKTOP;

        /* All OK. */
        return;
}
Exemple #8
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
{
/* Allocate a page for use by VM itself. */
	phys_bytes newpage;
	vir_bytes loc;
	pt_t *pt;
	int r;
	static int level = 0;
	void *ret;

	pt = &vmprocess->vm_pt;
	assert(reason >= 0 && reason < VMP_CATEGORIES);

	level++;

	assert(level >= 1);
	assert(level <= 2);

	if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) {
		int r;
		void *s;
		s=vm_getsparepage(phys);
		level--;
		if(!s) {
			util_stacktrace();
			printf("VM: warning: out of spare pages\n");
		}
		return s;
	}

	/* VM does have a pagetable, so get a page and map it in there.
	 * Where in our virtual address space can we put it?
	 */
	loc = findhole(pt,  arch_vir2map(vmprocess, vmprocess->vm_stacktop),
		vmprocess->vm_arch.vm_data_top);
	if(loc == NO_MEM) {
		level--;
		printf("VM: vm_allocpage: findhole failed\n");
		return NULL;
	}

	/* Allocate page of memory for use by VM. As VM
	 * is trusted, we don't have to pre-clear it.
	 */
	if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) {
		level--;
		printf("VM: vm_allocpage: alloc_mem failed\n");
		return NULL;
	}

	*phys = CLICK2ABS(newpage);

	/* Map this page into our address space. */
	if((r=pt_writemap(vmprocess, pt, loc, *phys, I386_PAGE_SIZE,
		I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) {
		free_mem(newpage, CLICKSPERPAGE);
		printf("vm_allocpage writemap failed\n");
		level--;
		return NULL;
	}

	if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
		panic("VMCTL_FLUSHTLB failed: %d", r);
	}

	level--;

	/* Return user-space-ready pointer to it. */
	ret = (void *) arch_map2vir(vmprocess, loc);

	return ret;
}