Exemple #1
0
/*===========================================================================*
 *				pt_new			     		     *
 *===========================================================================*/
PUBLIC int pt_new(pt_t *pt)
{
/* Allocate a pagetable root. On i386, allocate a page-aligned page directory
 * and set them to 0 (indicating no page tables are allocated). Lookup
 * its physical address as we'll need that in the future. Verify it's
 * page-aligned.
 */
	int i;

	/* Don't ever re-allocate/re-move a certain process slot's
	 * page directory once it's been created. This is a fraction
	 * faster, but also avoids having to invalidate the page
	 * mappings from in-kernel page tables pointing to
	 * the page directories (the page_directories data).
	 */
        if(!pt->pt_dir &&
          !(pt->pt_dir = vm_allocpage(&pt->pt_dir_phys, VMP_PAGEDIR))) {
		return ENOMEM;
	}

	for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
		pt->pt_dir[i] = 0; /* invalid entry (I386_VM_PRESENT bit = 0) */
		pt->pt_pt[i] = NULL;
	}

	/* Where to start looking for free virtual address space? */
	pt->pt_virtop = 0;

        /* Map in kernel. */
        if(pt_mapkernel(pt) != OK)
                panic("pt_new: pt_mapkernel failed");

	return OK;
}
Exemple #2
0
/*===========================================================================*
 *				pt_writemap		     		     *
 *===========================================================================*/
int pt_writemap(struct vmproc * vmp,
                pt_t *pt,
                vir_bytes v,
                phys_bytes physaddr,
                size_t bytes,
                u32_t flags,
                u32_t writemapflags)
{
    /* Write mapping into page table. Allocate a new page table if necessary. */
    /* Page directory and table entries for this virtual address. */
    int p, pages;
    int verify = 0;
    int ret = OK;

#ifdef CONFIG_SMP
    int vminhibit_clear = 0;
    /* FIXME
     * don't do it everytime, stop the process only on the first change and
     * resume the execution on the last change. Do in a wrapper of this
     * function
     */
    if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
            !(vmp->vm_flags & VMF_EXITING)) {
        sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0);
        vminhibit_clear = 1;
    }
#endif

    if(writemapflags & WMF_VERIFY)
        verify = 1;

    assert(!(bytes % VM_PAGE_SIZE));
    assert(!(flags & ~(PTF_ALLFLAGS)));

    pages = bytes / VM_PAGE_SIZE;

    /* MAP_NONE means to clear the mapping. It doesn't matter
     * what's actually written into the PTE if PRESENT
     * isn't on, so we can just write MAP_NONE into it.
     */
    assert(physaddr == MAP_NONE || (flags & ARCH_VM_PTE_PRESENT));
    assert(physaddr != MAP_NONE || !flags);

    /* First make sure all the necessary page tables are allocated,
     * before we start writing in any of them, because it's a pain
     * to undo our work properly.
     */
    ret = pt_ptalloc_in_range(pt, v, v + VM_PAGE_SIZE*pages, flags, verify);
    if(ret != OK) {
        printf("VM: writemap: pt_ptalloc_in_range failed\n");
        goto resume_exit;
    }

    /* Now write in them. */
    for(p = 0; p < pages; p++) {
        u32_t entry;
        int pde = ARCH_VM_PDE(v);
        int pte = ARCH_VM_PTE(v);

        assert(!(v % VM_PAGE_SIZE));
        assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
        assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);

        /* Page table has to be there. */
        assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);

        /* We do not expect it to be a bigpage. */
        assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));

        /* Make sure page directory entry for this page table
         * is marked present and page table entry is available.
         */
        assert(pt->pt_pt[pde]);

#if SANITYCHECKS
        /* We don't expect to overwrite a page. */
        if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
            assert(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT));
#endif
        if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
#if defined(__i386__)
            physaddr = pt->pt_pt[pde][pte] & ARCH_VM_ADDR_MASK;
#elif defined(__arm__)
            physaddr = pt->pt_pt[pde][pte] & ARM_VM_PTE_MASK;
#endif
        }

        if(writemapflags & WMF_FREE) {
            free_mem(ABS2CLICK(physaddr), 1);
        }

        /* Entry we will write. */
#if defined(__i386__)
        entry = (physaddr & ARCH_VM_ADDR_MASK) | flags;
#elif defined(__arm__)
        entry = (physaddr & ARM_VM_PTE_MASK) | flags;
#endif

        if(verify) {
            u32_t maskedentry;
            maskedentry = pt->pt_pt[pde][pte];
#if defined(__i386__)
            maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
#endif
            /* Verify pagetable entry. */
#if defined(__i386__)
            if(entry & ARCH_VM_PTE_RW) {
                /* If we expect a writable page, allow a readonly page. */
                maskedentry |= ARCH_VM_PTE_RW;
            }
#elif defined(__arm__)
            if(!(entry & ARCH_VM_PTE_RO)) {
                /* If we expect a writable page, allow a readonly page. */
                maskedentry &= ~ARCH_VM_PTE_RO;
            }
            maskedentry &= ~(ARM_VM_PTE_WB|ARM_VM_PTE_WT);
#endif
            if(maskedentry != entry) {
                printf("pt_writemap: mismatch: ");
#if defined(__i386__)
                if((entry & ARCH_VM_ADDR_MASK) !=
                        (maskedentry & ARCH_VM_ADDR_MASK)) {
#elif defined(__arm__)
                if((entry & ARM_VM_PTE_MASK) !=
                        (maskedentry & ARM_VM_PTE_MASK)) {
#endif
                    printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ",
                           (long)entry, (long)maskedentry);
                } else printf("phys ok; ");
                printf(" flags: found %s; ",
                       ptestr(pt->pt_pt[pde][pte]));
                printf(" masked %s; ",
                       ptestr(maskedentry));
                printf(" expected %s\n", ptestr(entry));
                printf("found 0x%x, wanted 0x%x\n",
                       pt->pt_pt[pde][pte], entry);
                ret = EFAULT;
                goto resume_exit;
            }
        } else {
            /* Write pagetable entry. */
            pt->pt_pt[pde][pte] = entry;
        }

        physaddr += VM_PAGE_SIZE;
        v += VM_PAGE_SIZE;
    }

resume_exit:

#ifdef CONFIG_SMP
    if (vminhibit_clear) {
        assert(vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
               !(vmp->vm_flags & VMF_EXITING));
        sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0);
    }
#endif

    return ret;
}

/*===========================================================================*
 *				pt_checkrange		     		     *
 *===========================================================================*/
int pt_checkrange(pt_t *pt, vir_bytes v,  size_t bytes,
                  int write)
{
    int p, pages;

    assert(!(bytes % VM_PAGE_SIZE));

    pages = bytes / VM_PAGE_SIZE;

    for(p = 0; p < pages; p++) {
        int pde = ARCH_VM_PDE(v);
        int pte = ARCH_VM_PTE(v);

        assert(!(v % VM_PAGE_SIZE));
        assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
        assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);

        /* Page table has to be there. */
        if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT))
            return EFAULT;

        /* Make sure page directory entry for this page table
         * is marked present and page table entry is available.
         */
        assert((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && pt->pt_pt[pde]);

        if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
            return EFAULT;
        }

#if defined(__i386__)
        if(write && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
#elif defined(__arm__)
        if(write && (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
#endif
            return EFAULT;
        }

        v += VM_PAGE_SIZE;
    }

    return OK;
}

/*===========================================================================*
 *				pt_new			     		     *
 *===========================================================================*/
int pt_new(pt_t *pt)
{
    /* Allocate a pagetable root. Allocate a page-aligned page directory
     * and set them to 0 (indicating no page tables are allocated). Lookup
     * its physical address as we'll need that in the future. Verify it's
     * page-aligned.
     */
    int i, r;

    /* Don't ever re-allocate/re-move a certain process slot's
     * page directory once it's been created. This is a fraction
     * faster, but also avoids having to invalidate the page
     * mappings from in-kernel page tables pointing to
     * the page directories (the page_directories data).
     */
    if(!pt->pt_dir &&
            !(pt->pt_dir = vm_allocpages((phys_bytes *)&pt->pt_dir_phys,
                                         VMP_PAGEDIR, ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE))) {
        return ENOMEM;
    }

    assert(!((u32_t)pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));

    for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) {
        pt->pt_dir[i] = 0; /* invalid entry (PRESENT bit = 0) */
        pt->pt_pt[i] = NULL;
    }

    /* Where to start looking for free virtual address space? */
    pt->pt_virtop = 0;

    /* Map in kernel. */
    if((r=pt_mapkernel(pt)) != OK)
        return r;

    return OK;
}
Exemple #3
0
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
PUBLIC void pt_init(phys_bytes usedlimit)
{
/* By default, the kernel gives us a data segment with pre-allocated
 * memory that then can't grow. We want to be able to allocate memory
 * dynamically, however. So here we copy the part of the page table
 * that's ours, so we get a private page table. Then we increase the
 * hardware segment size so we can allocate memory above our stack.
 */
        pt_t *newpt;
        int s, r;
        vir_bytes v;
        phys_bytes lo, hi; 
        vir_bytes extra_clicks;
        u32_t moveup = 0;
	int global_bit_ok = 0;
	int free_pde;
	int p;
	struct vm_ep_data ep_data;
	vir_bytes sparepages_mem;
	phys_bytes sparepages_ph;
	vir_bytes ptr;

        /* Shorthand. */
        newpt = &vmprocess->vm_pt;

        /* Get ourselves spare pages. */
        ptr = (vir_bytes) static_sparepages;
        ptr += I386_PAGE_SIZE - (ptr % I386_PAGE_SIZE);
        if(!(sparepages_mem = ptr))
		panic("pt_init: aalloc for spare failed");
        if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem,
                I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK)
                panic("pt_init: sys_umap failed: %d", r);

        missing_spares = 0;
        assert(STATIC_SPAREPAGES < SPAREPAGES);
        for(s = 0; s < SPAREPAGES; s++) {
        	if(s >= STATIC_SPAREPAGES) {
        		sparepages[s].page = NULL;
        		missing_spares++;
        		continue;
        	}
        	sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE);
        	sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE;
        }

	/* global bit and 4MB pages available? */
	global_bit_ok = _cpufeature(_CPUF_I386_PGE);
	bigpage_ok = _cpufeature(_CPUF_I386_PSE);

	/* Set bit for PTE's and PDE's if available. */
	if(global_bit_ok)
		global_bit = I386_VM_GLOBAL;

	/* The kernel and boot time processes need an identity mapping.
	 * We use full PDE's for this without separate page tables.
	 * Figure out which pde we can start using for other purposes.
	 */
	id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE;

	/* We have to make mappings up till here. */
	free_pde = id_map_high_pde+1;

        /* Initial (current) range of our virtual address space. */
        lo = CLICK2ABS(vmprocess->vm_arch.vm_seg[T].mem_phys);
        hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys +
                vmprocess->vm_arch.vm_seg[S].mem_len);
                  
        assert(!(lo % I386_PAGE_SIZE)); 
        assert(!(hi % I386_PAGE_SIZE));
 
        if(lo < VM_PROCSTART) {
                moveup = VM_PROCSTART - lo;
                assert(!(VM_PROCSTART % I386_PAGE_SIZE));
                assert(!(lo % I386_PAGE_SIZE));
                assert(!(moveup % I386_PAGE_SIZE));
        }
        
        /* Make new page table for ourselves, partly copied
         * from the current one.
         */     
        if(pt_new(newpt) != OK)
                panic("pt_init: pt_new failed"); 

        /* Set up mappings for VM process. */
        for(v = lo; v < hi; v += I386_PAGE_SIZE)  {
                phys_bytes addr;
                u32_t flags; 
        
                /* We have to write the new position in the PT,
                 * so we can move our segments.
                 */ 
                if(pt_writemap(vmprocess, newpt, v+moveup, v, I386_PAGE_SIZE,
                        I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
                        panic("pt_init: pt_writemap failed");
        }
       
        /* Move segments up too. */
        vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
       
	/* Allocate us a page table in which to remember page directory
	 * pointers.
	 */
	if(!(page_directories = vm_allocpage(&page_directories_phys,
		VMP_PAGETABLE)))
                panic("no virt addr for vm mappings");

	memset(page_directories, 0, I386_PAGE_SIZE);
       
        /* Increase our hardware data segment to create virtual address
         * space above our stack. We want to increase it to VM_DATATOP,
         * like regular processes have.
         */
        extra_clicks = ABS2CLICK(VM_DATATOP - hi);
        vmprocess->vm_arch.vm_seg[S].mem_len += extra_clicks;
       
        /* We pretend to the kernel we have a huge stack segment to
         * increase our data segment.
         */
        vmprocess->vm_arch.vm_data_top =
                (vmprocess->vm_arch.vm_seg[S].mem_vir +
                vmprocess->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
       
        /* Where our free virtual address space starts.
         * This is only a hint to the VM system.
         */
        newpt->pt_virtop = 0;

        /* Let other functions know VM now has a private page table. */
        vmprocess->vm_flags |= VMF_HASPT;

	/* Now reserve another pde for kernel's own mappings. */
	{
		int kernmap_pde;
		phys_bytes addr, len;
		int flags, index = 0;
		u32_t offset = 0;

		kernmap_pde = free_pde++;
		offset = kernmap_pde * I386_BIG_PAGE_SIZE;

		while(sys_vmctl_get_mapping(index, &addr, &len,
			&flags) == OK)  {
			vir_bytes vir;
			if(index >= MAX_KERNMAPPINGS)
                		panic("VM: too many kernel mappings: %d", index);
			kern_mappings[index].phys_addr = addr;
			kern_mappings[index].len = len;
			kern_mappings[index].flags = flags;
			kern_mappings[index].lin_addr = offset;
			kern_mappings[index].flags =
				I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
				global_bit;
			if(flags & VMMF_UNCACHED)
				kern_mappings[index].flags |= PTF_NOCACHE;
			if(addr % I386_PAGE_SIZE)
                		panic("VM: addr unaligned: %d", addr);
			if(len % I386_PAGE_SIZE)
                		panic("VM: len unaligned: %d", len);
			vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset);
			if(sys_vmctl_reply_mapping(index, vir) != OK)
                		panic("VM: reply failed");
			offset += len;
			index++;
			kernmappings++;
		}
	}

	/* Find a PDE below processes available for mapping in the
	 * page directories (readonly).
	 */
	pagedir_pde = free_pde++;
	pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
			I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;

	/* Tell kernel about free pde's. */
	while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
		if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
			panic("VMCTL_I386_FREEPDE failed: %d", r);
		}
	}

	/* first pde in use by process. */
	proc_pde = free_pde;

        /* Give our process the new, copied, private page table. */
	pt_mapkernel(newpt);	/* didn't know about vm_dir pages earlier */
        pt_bind(newpt, vmprocess);
       
	/* new segment limit for the kernel after paging is enabled */
	ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
	/* the memory map which must be installed after paging is enabled */
	ep_data.mem_map = vmprocess->vm_arch.vm_seg;

	/* Now actually enable paging. */
	if(sys_vmctl_enable_paging(&ep_data) != OK)
        	panic("pt_init: enable paging failed");

        /* Back to reality - this is where the stack actually is. */
        vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks;

        /* Pretend VM stack top is the same as any regular process, not to
         * have discrepancies with new VM instances later on.
         */
        vmprocess->vm_stacktop = VM_STACKTOP;

        /* All OK. */
        return;
}
Exemple #4
0
/*===========================================================================*
 *                             pt_init_mem                                   *
 *===========================================================================*/
PUBLIC void pt_init_mem()
{
/* Architecture-specific memory initialization. Make sure all the pages
 * shared with the kernel and VM's page tables are mapped above the stack,
 * so that we can easily transfer existing mappings for new VM instances.
 */
        u32_t new_page_directories_phys, *new_page_directories;
        u32_t new_pt_dir_phys, *new_pt_dir;
        u32_t new_pt_phys, *new_pt; 
        pt_t *vmpt;
        int i;

        vmpt = &vmprocess->vm_pt;

        /* We should be running this when VM has been assigned a page
         * table and memory initialization has already been performed.
         */
        assert(vmprocess->vm_flags & VMF_HASPT);
        assert(meminit_done);

        /* Throw away static spare pages. */
	vm_checkspares();
	for(i = 0; i < SPAREPAGES; i++) {
		if(sparepages[i].page && (vir_bytes) sparepages[i].page
			< vmprocess->vm_stacktop) {
			sparepages[i].page = NULL;
			missing_spares++;
		}
	}
	vm_checkspares();

        /* Rellocate page for page directories pointers. */
	if(!(new_page_directories = vm_allocpage(&new_page_directories_phys,
		VMP_PAGETABLE)))
                panic("unable to reallocated page for page dir ptrs");
	assert((vir_bytes) new_page_directories >= vmprocess->vm_stacktop);
	memcpy(new_page_directories, page_directories, I386_PAGE_SIZE);
	page_directories = new_page_directories;
	pagedir_pde_val = (new_page_directories_phys & I386_VM_ADDR_MASK) |
			(pagedir_pde_val & ~I386_VM_ADDR_MASK);

	/* Remap in kernel. */
	pt_mapkernel(vmpt);

	/* Reallocate VM's page directory. */
	if((vir_bytes) vmpt->pt_dir < vmprocess->vm_stacktop) {
		if(!(new_pt_dir= vm_allocpage(&new_pt_dir_phys, VMP_PAGEDIR))) {
			panic("unable to reallocate VM's page directory");
		}
		assert((vir_bytes) new_pt_dir >= vmprocess->vm_stacktop);
		memcpy(new_pt_dir, vmpt->pt_dir, I386_PAGE_SIZE);
		vmpt->pt_dir = new_pt_dir;
		vmpt->pt_dir_phys = new_pt_dir_phys;
		pt_bind(vmpt, vmprocess);
	}

	/* Reallocate VM's page tables. */
	for(i = proc_pde; i < I386_VM_DIR_ENTRIES; i++) {
		if(!(vmpt->pt_dir[i] & I386_VM_PRESENT)) {
			continue;
		}
		assert(vmpt->pt_pt[i]);
		if((vir_bytes) vmpt->pt_pt[i] >= vmprocess->vm_stacktop) {
			continue;
		}
		vm_checkspares();
		if(!(new_pt = vm_allocpage(&new_pt_phys, VMP_PAGETABLE)))
			panic("unable to reallocate VM's page table");
		assert((vir_bytes) new_pt >= vmprocess->vm_stacktop);
		memcpy(new_pt, vmpt->pt_pt[i], I386_PAGE_SIZE);
		vmpt->pt_pt[i] = new_pt;
		vmpt->pt_dir[i] = (new_pt_phys & I386_VM_ADDR_MASK) |
			(vmpt->pt_dir[i] & ~I386_VM_ADDR_MASK);
	}
}