Beispiel #1
0
/*===========================================================================*
 *				pt_new			     		     *
 *===========================================================================*/
PUBLIC int pt_new(pt_t *pt)
{
/* Allocate a pagetable root. On i386, allocate a page-aligned page directory
 * and set them to 0 (indicating no page tables are allocated). Lookup
 * its physical address as we'll need that in the future. Verify it's
 * page-aligned.
 */
	int i;

	/* Don't ever re-allocate/re-move a certain process slot's
	 * page directory once it's been created. This is a fraction
	 * faster, but also avoids having to invalidate the page
	 * mappings from in-kernel page tables pointing to
	 * the page directories (the page_directories data).
	 */
        if(!pt->pt_dir &&
          !(pt->pt_dir = vm_allocpage(&pt->pt_dir_phys, VMP_PAGEDIR))) {
		return ENOMEM;
	}

	for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
		pt->pt_dir[i] = 0; /* invalid entry (I386_VM_PRESENT bit = 0) */
		pt->pt_pt[i] = NULL;
	}

	/* Where to start looking for free virtual address space? */
	pt->pt_virtop = 0;

        /* Map in kernel. */
        if(pt_mapkernel(pt) != OK)
                panic("pt_new: pt_mapkernel failed");

	return OK;
}
Beispiel #2
0
struct slabdata *newslabdata(int list)
{
	struct slabdata *n;
	phys_bytes p;

	vm_assert(sizeof(*n) == VM_PAGE_SIZE);

	if(!(n = vm_allocpage(&p, VMP_SLAB))) {
		printk("newslabdata: vm_allocpage failed\n");
		return NULL;
	}
	memset(n->sdh.usebits, 0, sizeof(n->sdh.usebits));
	pages++;

	n->sdh.phys = p;
#if SANITYCHECKS
	n->sdh.magic1 = MAGIC1;
	n->sdh.magic2 = MAGIC2;
#endif
	n->sdh.nused = 0;
	n->sdh.freeguess = 0;
	n->sdh.list = list;

#if SANITYCHECKS
	n->sdh.writable = WRITABLE_HEADER;
	SLABDATAUNWRITABLE(n);
#endif

	return n;
}
Beispiel #3
0
/*===========================================================================*
 *				pt_ptalloc		     		     *
 *===========================================================================*/
PRIVATE int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
{
/* Allocate a page table and write its address into the page directory. */
	int i;
	u32_t pt_phys;

	/* Argument must make sense. */
	assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
	assert(!(flags & ~(PTF_ALLFLAGS)));

	/* We don't expect to overwrite page directory entry, nor
	 * storage for the page table.
	 */
	assert(!(pt->pt_dir[pde] & I386_VM_PRESENT));
	assert(!pt->pt_pt[pde]);

	/* Get storage for the page table. */
        if(!(pt->pt_pt[pde] = vm_allocpage(&pt_phys, VMP_PAGETABLE)))
		return ENOMEM;

	for(i = 0; i < I386_VM_PT_ENTRIES; i++)
		pt->pt_pt[pde][i] = 0;	/* Empty entry. */

	/* Make page directory entry.
	 * The PDE is always 'present,' 'writable,' and 'user accessible,'
	 * relying on the PTE for protection.
	 */
	pt->pt_dir[pde] = (pt_phys & I386_VM_ADDR_MASK) | flags
		| I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;

	return OK;
}
/*===========================================================================*
 *				pt_identity		     		     *
 *===========================================================================*/
PUBLIC int pt_identity(pt_t *pt)
{
    /* Allocate a pagetable that does a 1:1 mapping. */
    int i;

    /* Allocate page directory. */
    if(!pt->pt_dir &&
            !(pt->pt_dir = vm_allocpage(&pt->pt_dir_phys, VMP_PAGEDIR))) {
        return ENOMEM;
    }

    for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
        phys_bytes addr;
        addr = I386_BIG_PAGE_SIZE*i;
        pt->pt_dir[i] = (addr & I386_VM_ADDR_MASK_4MB) |
                        I386_VM_BIGPAGE|
                        I386_VM_USER|
                        I386_VM_PRESENT|I386_VM_WRITE;
        pt->pt_pt[i] = NULL;
    }

    /* Where to start looking for free virtual address space? */
    pt->pt_virtop = 0;

    return OK;
}
Beispiel #5
0
/*===========================================================================*
 *				vm_checkspares		     		     *
 *===========================================================================*/
PRIVATE void *vm_checkspares(void)
{
	int s, n = 0;
	static int total = 0, worst = 0;
	assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
	for(s = 0; s < SPAREPAGES && missing_spares > 0; s++)
	    if(!sparepages[s].page) {
		n++;
		if((sparepages[s].page = vm_allocpage(&sparepages[s].phys, 
			VMP_SPARE))) {
			missing_spares--;
			assert(missing_spares >= 0);
			assert(missing_spares <= SPAREPAGES);
		} else {
			printf("VM: warning: couldn't get new spare page\n");
		}
	}
	if(worst < n) worst = n;
	total += n;

	return NULL;
}
Beispiel #6
0
/*===========================================================================*
 *				vm_addrok		     		     *
 *===========================================================================*/
int vm_addrok(void *vir, int writeflag)
{
    pt_t *pt = &vmprocess->vm_pt;
    int pde, pte;
    vir_bytes v = (vir_bytes) vir;

    pde = ARCH_VM_PDE(v);
    pte = ARCH_VM_PTE(v);

    if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
        printf("addr not ok: missing pde %d\n", pde);
        return 0;
    }

#if defined(__i386__)
    if(writeflag &&
            !(pt->pt_dir[pde] & ARCH_VM_PTE_RW)) {
        printf("addr not ok: pde %d present but pde unwritable\n", pde);
        return 0;
    }
#elif defined(__arm__)
    if(writeflag &&
            (pt->pt_dir[pde] & ARCH_VM_PTE_RO)) {
        printf("addr not ok: pde %d present but pde unwritable\n", pde);
        return 0;
    }

#endif
    if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
        printf("addr not ok: missing pde %d / pte %d\n",
               pde, pte);
        return 0;
    }

#if defined(__i386__)
    if(writeflag &&
            !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
        printf("addr not ok: pde %d / pte %d present but unwritable\n",
               pde, pte);
#elif defined(__arm__)
    if(writeflag &&
            (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
        printf("addr not ok: pde %d / pte %d present but unwritable\n",
               pde, pte);
#endif
        return 0;
    }

    return 1;
}

/*===========================================================================*
 *				pt_ptalloc		     		     *
 *===========================================================================*/
static int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
{
    /* Allocate a page table and write its address into the page directory. */
    int i;
    phys_bytes pt_phys;

    /* Argument must make sense. */
    assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
    assert(!(flags & ~(PTF_ALLFLAGS)));

    /* We don't expect to overwrite page directory entry, nor
     * storage for the page table.
     */
    assert(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT));
    assert(!pt->pt_pt[pde]);

    /* Get storage for the page table. */
    if(!(pt->pt_pt[pde] = vm_allocpage(&pt_phys, VMP_PAGETABLE)))
        return ENOMEM;

    for(i = 0; i < ARCH_VM_PT_ENTRIES; i++)
        pt->pt_pt[pde][i] = 0;	/* Empty entry. */

    /* Make page directory entry.
     * The PDE is always 'present,' 'writable,' and 'user accessible,'
     * relying on the PTE for protection.
     */
#if defined(__i386__)
    pt->pt_dir[pde] = (pt_phys & ARCH_VM_ADDR_MASK) | flags
                      | ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW;
#elif defined(__arm__)
    pt->pt_dir[pde] = (pt_phys & ARCH_VM_PDE_MASK)
                      | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; //LSC FIXME
#endif

    return OK;
}
Beispiel #7
0
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
void pt_init(void)
{
    pt_t *newpt;
    int s, r, p;
    vir_bytes sparepages_mem;
#if defined(__arm__)
    vir_bytes sparepagedirs_mem;
#endif
    static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES];
    int m = kernel_boot_info.kern_mod;
#if defined(__i386__)
    int global_bit_ok = 0;
    u32_t mypdbr; /* Page Directory Base Register (cr3) value */
#elif defined(__arm__)
    u32_t myttbr;
#endif

    /* Find what the physical location of the kernel is. */
    assert(m >= 0);
    assert(m < kernel_boot_info.mods_with_kernel);
    assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS);
    kern_mb_mod = &kernel_boot_info.module_list[m];
    kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start;
    assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE));
    assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE));
    kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE;

    /* Get ourselves spare pages. */
    sparepages_mem = (vir_bytes) static_sparepages;
    assert(!(sparepages_mem % VM_PAGE_SIZE));

#if defined(__arm__)
    /* Get ourselves spare pagedirs. */
    sparepagedirs_mem = (vir_bytes) static_sparepagedirs;
    assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE));
#endif

    /* Spare pages are used to allocate memory before VM has its own page
     * table that things (i.e. arbitrary physical memory) can be mapped into.
     * We get it by pre-allocating it in our bss (allocated and mapped in by
     * the kernel) in static_sparepages. We also need the physical addresses
     * though; we look them up now so they are ready for use.
     */
#if defined(__arm__)
    missing_sparedirs = 0;
    assert(STATIC_SPAREPAGEDIRS <= SPAREPAGEDIRS);
    for(s = 0; s < SPAREPAGEDIRS; s++) {
        vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);;
        phys_bytes ph;
        if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
                       ARCH_PAGEDIR_SIZE, &ph)) != OK)
            panic("pt_init: sys_umap failed: %d", r);
        if(s >= STATIC_SPAREPAGEDIRS) {
            sparepagedirs[s].pagedir = NULL;
            missing_sparedirs++;
            continue;
        }
        sparepagedirs[s].pagedir = (void *) v;
        sparepagedirs[s].phys = ph;
    }
#endif

    if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0)))
        panic("reservedqueue_new for single pages failed");

    assert(STATIC_SPAREPAGES < SPAREPAGES);
    for(s = 0; s < STATIC_SPAREPAGES; s++) {
        void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE);
        phys_bytes ph;
        if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
                       VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
            panic("pt_init: sys_umap failed: %d", r);
        reservedqueue_add(spare_pagequeue, v, ph);
    }

#if defined(__i386__)
    /* global bit and 4MB pages available? */
    global_bit_ok = _cpufeature(_CPUF_I386_PGE);
    bigpage_ok = _cpufeature(_CPUF_I386_PSE);

    /* Set bit for PTE's and PDE's if available. */
    if(global_bit_ok)
        global_bit = I386_VM_GLOBAL;
#endif

    /* Now reserve another pde for kernel's own mappings. */
    {
        int kernmap_pde;
        phys_bytes addr, len;
        int flags, index = 0;
        u32_t offset = 0;

        kernmap_pde = freepde();
        offset = kernmap_pde * ARCH_BIG_PAGE_SIZE;

        while(sys_vmctl_get_mapping(index, &addr, &len,
                                    &flags) == OK)  {
            int usedpde;
            vir_bytes vir;
            if(index >= MAX_KERNMAPPINGS)
                panic("VM: too many kernel mappings: %d", index);
            kern_mappings[index].phys_addr = addr;
            kern_mappings[index].len = len;
            kern_mappings[index].flags = flags;
            kern_mappings[index].vir_addr = offset;
            kern_mappings[index].flags =
                ARCH_VM_PTE_PRESENT;
            if(flags & VMMF_UNCACHED)
#if defined(__i386__)
                kern_mappings[index].flags |= PTF_NOCACHE;
#elif defined(__arm__)
                kern_mappings[index].flags |= ARM_VM_PTE_DEVICE;
#endif
            if(flags & VMMF_USER)
                kern_mappings[index].flags |= ARCH_VM_PTE_USER;
#if defined(__arm__)
            else
                kern_mappings[index].flags |= ARM_VM_PTE_SUPER;
#endif
            if(flags & VMMF_WRITE)
                kern_mappings[index].flags |= ARCH_VM_PTE_RW;
#if defined(__i386__)
            if(flags & VMMF_GLO)
                kern_mappings[index].flags |= I386_VM_GLOBAL;
#elif defined(__arm__)
            else
                kern_mappings[index].flags |= ARCH_VM_PTE_RO;
#endif
            if(addr % VM_PAGE_SIZE)
                panic("VM: addr unaligned: %d", addr);
            if(len % VM_PAGE_SIZE)
                panic("VM: len unaligned: %d", len);
            vir = offset;
            if(sys_vmctl_reply_mapping(index, vir) != OK)
                panic("VM: reply failed");
            offset += len;
            index++;
            kernmappings++;

            usedpde = ARCH_VM_PDE(offset);
            while(usedpde > kernmap_pde) {
                int newpde = freepde();
                assert(newpde == kernmap_pde+1);
                kernmap_pde = newpde;
            }
        }
    }

    /* Reserve PDEs available for mapping in the page directories. */
    {
        int pd;
        for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
            struct pdm *pdm = &pagedir_mappings[pd];
            pdm->pdeno = freepde();
            phys_bytes ph;

            /* Allocate us a page table in which to
             * remember page directory pointers.
             */
            if(!(pdm->page_directories =
                        vm_allocpage(&ph, VMP_PAGETABLE))) {
                panic("no virt addr for vm mappings");
            }
            memset(pdm->page_directories, 0, VM_PAGE_SIZE);
            pdm->phys = ph;

#if defined(__i386__)
            pdm->val = (ph & ARCH_VM_ADDR_MASK) |
                       ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
#elif defined(__arm__)
            pdm->val = (ph & ARCH_VM_PDE_MASK)
                       | ARCH_VM_PDE_PRESENT
                       | ARM_VM_PDE_DOMAIN; //LSC FIXME
#endif
        }
    }

    /* Allright. Now. We have to make our own page directory and page tables,
     * that the kernel has already set up, accessible to us. It's easier to
     * understand if we just copy all the required pages (i.e. page directory
     * and page tables), and set up the pointers as if VM had done it itself.
     *
     * This allocation will happen without using any page table, and just
     * uses spare pages.
     */
    newpt = &vmprocess->vm_pt;
    if(pt_new(newpt) != OK)
        panic("vm pt_new failed");

    /* Get our current pagedir so we can see it. */
#if defined(__i386__)
    if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK)
#elif defined(__arm__)
    if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK)
#endif
        panic("VM: sys_vmctl_get_pdbr failed");
#if defined(__i386__)
    if(sys_vircopy(NONE, mypdbr, SELF,
                   (vir_bytes) currentpagedir, VM_PAGE_SIZE) != OK)
#elif defined(__arm__)
    if(sys_vircopy(NONE, myttbr, SELF,
                   (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE) != OK)
#endif
        panic("VM: sys_vircopy failed");

    /* We have mapped in kernel ourselves; now copy mappings for VM
     * that kernel made, including allocations for BSS. Skip identity
     * mapping bits; just map in VM.
     */
    for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) {
        u32_t entry = currentpagedir[p];
        phys_bytes ptaddr_kern, ptaddr_us;

        /* BIGPAGEs are kernel mapping (do ourselves) or boot
         * identity mapping (don't want).
         */
        if(!(entry & ARCH_VM_PDE_PRESENT)) continue;
        if((entry & ARCH_VM_BIGPAGE)) continue;

        if(pt_ptalloc(newpt, p, 0) != OK)
            panic("pt_ptalloc failed");
        assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT);

#if defined(__i386__)
        ptaddr_kern = entry & ARCH_VM_ADDR_MASK;
        ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK;
#elif defined(__arm__)
        ptaddr_kern = entry & ARCH_VM_PDE_MASK;
        ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK;
#endif

        /* Copy kernel-initialized pagetable contents into our
         * normally accessible pagetable.
         */
        if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK)
            panic("pt_init: abscopy failed");
    }

    /* Inform kernel vm has a newly built page table. */
    assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR);
    pt_bind(newpt, &vmproc[VM_PROC_NR]);

    pt_init_done = 1;

    /* All OK. */
    return;
}
Beispiel #8
0
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
PUBLIC void pt_init(phys_bytes usedlimit)
{
/* By default, the kernel gives us a data segment with pre-allocated
 * memory that then can't grow. We want to be able to allocate memory
 * dynamically, however. So here we copy the part of the page table
 * that's ours, so we get a private page table. Then we increase the
 * hardware segment size so we can allocate memory above our stack.
 */
        pt_t *newpt;
        int s, r;
        vir_bytes v;
        phys_bytes lo, hi; 
        vir_bytes extra_clicks;
        u32_t moveup = 0;
	int global_bit_ok = 0;
	int free_pde;
	int p;
	struct vm_ep_data ep_data;
	vir_bytes sparepages_mem;
	phys_bytes sparepages_ph;
	vir_bytes ptr;

        /* Shorthand. */
        newpt = &vmprocess->vm_pt;

        /* Get ourselves spare pages. */
        ptr = (vir_bytes) static_sparepages;
        ptr += I386_PAGE_SIZE - (ptr % I386_PAGE_SIZE);
        if(!(sparepages_mem = ptr))
		panic("pt_init: aalloc for spare failed");
        if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem,
                I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK)
                panic("pt_init: sys_umap failed: %d", r);

        missing_spares = 0;
        assert(STATIC_SPAREPAGES < SPAREPAGES);
        for(s = 0; s < SPAREPAGES; s++) {
        	if(s >= STATIC_SPAREPAGES) {
        		sparepages[s].page = NULL;
        		missing_spares++;
        		continue;
        	}
        	sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE);
        	sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE;
        }

	/* global bit and 4MB pages available? */
	global_bit_ok = _cpufeature(_CPUF_I386_PGE);
	bigpage_ok = _cpufeature(_CPUF_I386_PSE);

	/* Set bit for PTE's and PDE's if available. */
	if(global_bit_ok)
		global_bit = I386_VM_GLOBAL;

	/* The kernel and boot time processes need an identity mapping.
	 * We use full PDE's for this without separate page tables.
	 * Figure out which pde we can start using for other purposes.
	 */
	id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE;

	/* We have to make mappings up till here. */
	free_pde = id_map_high_pde+1;

        /* Initial (current) range of our virtual address space. */
        lo = CLICK2ABS(vmprocess->vm_arch.vm_seg[T].mem_phys);
        hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys +
                vmprocess->vm_arch.vm_seg[S].mem_len);
                  
        assert(!(lo % I386_PAGE_SIZE)); 
        assert(!(hi % I386_PAGE_SIZE));
 
        if(lo < VM_PROCSTART) {
                moveup = VM_PROCSTART - lo;
                assert(!(VM_PROCSTART % I386_PAGE_SIZE));
                assert(!(lo % I386_PAGE_SIZE));
                assert(!(moveup % I386_PAGE_SIZE));
        }
        
        /* Make new page table for ourselves, partly copied
         * from the current one.
         */     
        if(pt_new(newpt) != OK)
                panic("pt_init: pt_new failed"); 

        /* Set up mappings for VM process. */
        for(v = lo; v < hi; v += I386_PAGE_SIZE)  {
                phys_bytes addr;
                u32_t flags; 
        
                /* We have to write the new position in the PT,
                 * so we can move our segments.
                 */ 
                if(pt_writemap(vmprocess, newpt, v+moveup, v, I386_PAGE_SIZE,
                        I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
                        panic("pt_init: pt_writemap failed");
        }
       
        /* Move segments up too. */
        vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
       
	/* Allocate us a page table in which to remember page directory
	 * pointers.
	 */
	if(!(page_directories = vm_allocpage(&page_directories_phys,
		VMP_PAGETABLE)))
                panic("no virt addr for vm mappings");

	memset(page_directories, 0, I386_PAGE_SIZE);
       
        /* Increase our hardware data segment to create virtual address
         * space above our stack. We want to increase it to VM_DATATOP,
         * like regular processes have.
         */
        extra_clicks = ABS2CLICK(VM_DATATOP - hi);
        vmprocess->vm_arch.vm_seg[S].mem_len += extra_clicks;
       
        /* We pretend to the kernel we have a huge stack segment to
         * increase our data segment.
         */
        vmprocess->vm_arch.vm_data_top =
                (vmprocess->vm_arch.vm_seg[S].mem_vir +
                vmprocess->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
       
        /* Where our free virtual address space starts.
         * This is only a hint to the VM system.
         */
        newpt->pt_virtop = 0;

        /* Let other functions know VM now has a private page table. */
        vmprocess->vm_flags |= VMF_HASPT;

	/* Now reserve another pde for kernel's own mappings. */
	{
		int kernmap_pde;
		phys_bytes addr, len;
		int flags, index = 0;
		u32_t offset = 0;

		kernmap_pde = free_pde++;
		offset = kernmap_pde * I386_BIG_PAGE_SIZE;

		while(sys_vmctl_get_mapping(index, &addr, &len,
			&flags) == OK)  {
			vir_bytes vir;
			if(index >= MAX_KERNMAPPINGS)
                		panic("VM: too many kernel mappings: %d", index);
			kern_mappings[index].phys_addr = addr;
			kern_mappings[index].len = len;
			kern_mappings[index].flags = flags;
			kern_mappings[index].lin_addr = offset;
			kern_mappings[index].flags =
				I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
				global_bit;
			if(flags & VMMF_UNCACHED)
				kern_mappings[index].flags |= PTF_NOCACHE;
			if(addr % I386_PAGE_SIZE)
                		panic("VM: addr unaligned: %d", addr);
			if(len % I386_PAGE_SIZE)
                		panic("VM: len unaligned: %d", len);
			vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset);
			if(sys_vmctl_reply_mapping(index, vir) != OK)
                		panic("VM: reply failed");
			offset += len;
			index++;
			kernmappings++;
		}
	}

	/* Find a PDE below processes available for mapping in the
	 * page directories (readonly).
	 */
	pagedir_pde = free_pde++;
	pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
			I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;

	/* Tell kernel about free pde's. */
	while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
		if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
			panic("VMCTL_I386_FREEPDE failed: %d", r);
		}
	}

	/* first pde in use by process. */
	proc_pde = free_pde;

        /* Give our process the new, copied, private page table. */
	pt_mapkernel(newpt);	/* didn't know about vm_dir pages earlier */
        pt_bind(newpt, vmprocess);
       
	/* new segment limit for the kernel after paging is enabled */
	ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
	/* the memory map which must be installed after paging is enabled */
	ep_data.mem_map = vmprocess->vm_arch.vm_seg;

	/* Now actually enable paging. */
	if(sys_vmctl_enable_paging(&ep_data) != OK)
        	panic("pt_init: enable paging failed");

        /* Back to reality - this is where the stack actually is. */
        vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks;

        /* Pretend VM stack top is the same as any regular process, not to
         * have discrepancies with new VM instances later on.
         */
        vmprocess->vm_stacktop = VM_STACKTOP;

        /* All OK. */
        return;
}
Beispiel #9
0
/*===========================================================================*
 *                             pt_init_mem                                   *
 *===========================================================================*/
PUBLIC void pt_init_mem()
{
/* Architecture-specific memory initialization. Make sure all the pages
 * shared with the kernel and VM's page tables are mapped above the stack,
 * so that we can easily transfer existing mappings for new VM instances.
 */
        u32_t new_page_directories_phys, *new_page_directories;
        u32_t new_pt_dir_phys, *new_pt_dir;
        u32_t new_pt_phys, *new_pt; 
        pt_t *vmpt;
        int i;

        vmpt = &vmprocess->vm_pt;

        /* We should be running this when VM has been assigned a page
         * table and memory initialization has already been performed.
         */
        assert(vmprocess->vm_flags & VMF_HASPT);
        assert(meminit_done);

        /* Throw away static spare pages. */
	vm_checkspares();
	for(i = 0; i < SPAREPAGES; i++) {
		if(sparepages[i].page && (vir_bytes) sparepages[i].page
			< vmprocess->vm_stacktop) {
			sparepages[i].page = NULL;
			missing_spares++;
		}
	}
	vm_checkspares();

        /* Rellocate page for page directories pointers. */
	if(!(new_page_directories = vm_allocpage(&new_page_directories_phys,
		VMP_PAGETABLE)))
                panic("unable to reallocated page for page dir ptrs");
	assert((vir_bytes) new_page_directories >= vmprocess->vm_stacktop);
	memcpy(new_page_directories, page_directories, I386_PAGE_SIZE);
	page_directories = new_page_directories;
	pagedir_pde_val = (new_page_directories_phys & I386_VM_ADDR_MASK) |
			(pagedir_pde_val & ~I386_VM_ADDR_MASK);

	/* Remap in kernel. */
	pt_mapkernel(vmpt);

	/* Reallocate VM's page directory. */
	if((vir_bytes) vmpt->pt_dir < vmprocess->vm_stacktop) {
		if(!(new_pt_dir= vm_allocpage(&new_pt_dir_phys, VMP_PAGEDIR))) {
			panic("unable to reallocate VM's page directory");
		}
		assert((vir_bytes) new_pt_dir >= vmprocess->vm_stacktop);
		memcpy(new_pt_dir, vmpt->pt_dir, I386_PAGE_SIZE);
		vmpt->pt_dir = new_pt_dir;
		vmpt->pt_dir_phys = new_pt_dir_phys;
		pt_bind(vmpt, vmprocess);
	}

	/* Reallocate VM's page tables. */
	for(i = proc_pde; i < I386_VM_DIR_ENTRIES; i++) {
		if(!(vmpt->pt_dir[i] & I386_VM_PRESENT)) {
			continue;
		}
		assert(vmpt->pt_pt[i]);
		if((vir_bytes) vmpt->pt_pt[i] >= vmprocess->vm_stacktop) {
			continue;
		}
		vm_checkspares();
		if(!(new_pt = vm_allocpage(&new_pt_phys, VMP_PAGETABLE)))
			panic("unable to reallocate VM's page table");
		assert((vir_bytes) new_pt >= vmprocess->vm_stacktop);
		memcpy(new_pt, vmpt->pt_pt[i], I386_PAGE_SIZE);
		vmpt->pt_pt[i] = new_pt;
		vmpt->pt_dir[i] = (new_pt_phys & I386_VM_ADDR_MASK) |
			(vmpt->pt_dir[i] & ~I386_VM_ADDR_MASK);
	}
}