예제 #1
0
PRIVATE void fpu_init(void)
{
    unsigned short cw, sw;

    fninit();
    sw = fnstsw();
    fnstcw(&cw);

    if((sw & 0xff) == 0 &&
            (cw & 0x103f) == 0x3f) {
        /* We have some sort of FPU, but don't check exact model.
         * Set CR0_NE and CR0_MP to handle fpu exceptions
         * in native mode. */
        write_cr0(read_cr0() | CR0_MP_NE);
        fpu_presence = 1;
        if(_cpufeature(_CPUF_I386_FXSR)) {
            register struct proc *rp;
            phys_bytes aligned_fp_area;
            u32_t cr4 = read_cr4() | CR4_OSFXSR; /* Enable FXSR. */

            /* OSXMMEXCPT if supported
             * FXSR feature can be available without SSE
             */
            if(_cpufeature(_CPUF_I386_SSE))
                cr4 |= CR4_OSXMMEXCPT;

            write_cr4(cr4);
            osfxsr_feature = 1;

            for (rp = BEG_PROC_ADDR; rp < END_PROC_ADDR; ++rp) {
                /* FXSR requires 16-byte alignment of memory
                 * image, but unfortunately some old tools
                 * (probably linker) ignores ".balign 16"
                 * applied to our memory image.
                 * Thus we have to do manual alignment.
                 */
                aligned_fp_area =
                    (phys_bytes) &rp->p_fpu_state.fpu_image;
                if(aligned_fp_area % FPUALIGN) {
                    aligned_fp_area += FPUALIGN -
                                       (aligned_fp_area % FPUALIGN);
                }
                rp->p_fpu_state.fpu_save_area_p =
                    (void *) aligned_fp_area;
            }
        } else {
            osfxsr_feature = 0;
        }
    } else {
        /* No FPU presents. */
        fpu_presence = 0;
        osfxsr_feature = 0;
        return;
    }
}
예제 #2
0
파일: arch_system.c 프로젝트: chanddu/MINIX
PUBLIC void fpu_init(void)
{
	unsigned short cw, sw;

	fninit();
	sw = fnstsw();
	fnstcw(&cw);

	if((sw & 0xff) == 0 &&
	   (cw & 0x103f) == 0x3f) {
		/* We have some sort of FPU, but don't check exact model.
		 * Set CR0_NE and CR0_MP to handle fpu exceptions
		 * in native mode. */
		write_cr0(read_cr0() | CR0_MP_NE);
		get_cpulocal_var(fpu_presence) = 1;
		if(_cpufeature(_CPUF_I386_FXSR)) {
			u32_t cr4 = read_cr4() | CR4_OSFXSR; /* Enable FXSR. */

			/* OSXMMEXCPT if supported
			 * FXSR feature can be available without SSE
			 */
			if(_cpufeature(_CPUF_I386_SSE))
				cr4 |= CR4_OSXMMEXCPT; 

			write_cr4(cr4);
			osfxsr_feature = 1;
		} else {
			osfxsr_feature = 0;
		}
	} else {
		/* No FPU presents. */
		get_cpulocal_var(fpu_presence) = 0;
                osfxsr_feature = 0;
                return;
        }
}
예제 #3
0
파일: memory.c 프로젝트: Sciumo/minix
static void vm_enable_paging(void)
{
	u32_t cr0, cr4;
	int pgeok;

	psok = _cpufeature(_CPUF_I386_PSE);
	pgeok = _cpufeature(_CPUF_I386_PGE);

	cr0= read_cr0();
	cr4= read_cr4();

	/* First clear PG and PGE flag, as PGE must be enabled after PG. */
	write_cr0(cr0 & ~I386_CR0_PG);
	write_cr4(cr4 & ~(I386_CR4_PGE | I386_CR4_PSE));

	cr0= read_cr0();
	cr4= read_cr4();

	/* Our first page table contains 4MB entries. */
	if(psok)
		cr4 |= I386_CR4_PSE;

	write_cr4(cr4);

	/* First enable paging, then enable global page flag. */
	cr0 |= I386_CR0_PG;
	write_cr0(cr0 );
	cr0 |= I386_CR0_WP;
	write_cr0(cr0);

	/* May we enable these features? */
	if(pgeok)
		cr4 |= I386_CR4_PGE;

	write_cr4(cr4);
}
예제 #4
0
파일: pg_utils.c 프로젝트: mkatri/minix
void vm_enable_paging(void)
{
        u32_t cr0, cr4;
        int pgeok;

        pgeok = _cpufeature(_CPUF_I386_PGE);

        cr0= read_cr0();
        cr4= read_cr4();

	/* The boot loader should have put us in protected mode. */
	assert(cr0 & I386_CR0_PE);

        /* First clear PG and PGE flag, as PGE must be enabled after PG. */
        write_cr0(cr0 & ~I386_CR0_PG);
        write_cr4(cr4 & ~(I386_CR4_PGE | I386_CR4_PSE));

        cr0= read_cr0();
        cr4= read_cr4();

        /* Our page table contains 4MB entries. */
        cr4 |= I386_CR4_PSE;

        write_cr4(cr4);

        /* First enable paging, then enable global page flag. */
        cr0 |= I386_CR0_PG;
        write_cr0(cr0);
        cr0 |= I386_CR0_WP;
        write_cr0(cr0);

        /* May we enable these features? */
        if(pgeok)
                cr4 |= I386_CR4_PGE;

        write_cr4(cr4);
}
예제 #5
0
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
void pt_init(void)
{
    pt_t *newpt;
    int s, r, p;
    vir_bytes sparepages_mem;
#if defined(__arm__)
    vir_bytes sparepagedirs_mem;
#endif
    static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES];
    int m = kernel_boot_info.kern_mod;
#if defined(__i386__)
    int global_bit_ok = 0;
    u32_t mypdbr; /* Page Directory Base Register (cr3) value */
#elif defined(__arm__)
    u32_t myttbr;
#endif

    /* Find what the physical location of the kernel is. */
    assert(m >= 0);
    assert(m < kernel_boot_info.mods_with_kernel);
    assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS);
    kern_mb_mod = &kernel_boot_info.module_list[m];
    kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start;
    assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE));
    assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE));
    kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE;

    /* Get ourselves spare pages. */
    sparepages_mem = (vir_bytes) static_sparepages;
    assert(!(sparepages_mem % VM_PAGE_SIZE));

#if defined(__arm__)
    /* Get ourselves spare pagedirs. */
    sparepagedirs_mem = (vir_bytes) static_sparepagedirs;
    assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE));
#endif

    /* Spare pages are used to allocate memory before VM has its own page
     * table that things (i.e. arbitrary physical memory) can be mapped into.
     * We get it by pre-allocating it in our bss (allocated and mapped in by
     * the kernel) in static_sparepages. We also need the physical addresses
     * though; we look them up now so they are ready for use.
     */
#if defined(__arm__)
    missing_sparedirs = 0;
    assert(STATIC_SPAREPAGEDIRS <= SPAREPAGEDIRS);
    for(s = 0; s < SPAREPAGEDIRS; s++) {
        vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);;
        phys_bytes ph;
        if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
                       ARCH_PAGEDIR_SIZE, &ph)) != OK)
            panic("pt_init: sys_umap failed: %d", r);
        if(s >= STATIC_SPAREPAGEDIRS) {
            sparepagedirs[s].pagedir = NULL;
            missing_sparedirs++;
            continue;
        }
        sparepagedirs[s].pagedir = (void *) v;
        sparepagedirs[s].phys = ph;
    }
#endif

    if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0)))
        panic("reservedqueue_new for single pages failed");

    assert(STATIC_SPAREPAGES < SPAREPAGES);
    for(s = 0; s < STATIC_SPAREPAGES; s++) {
        void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE);
        phys_bytes ph;
        if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
                       VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
            panic("pt_init: sys_umap failed: %d", r);
        reservedqueue_add(spare_pagequeue, v, ph);
    }

#if defined(__i386__)
    /* global bit and 4MB pages available? */
    global_bit_ok = _cpufeature(_CPUF_I386_PGE);
    bigpage_ok = _cpufeature(_CPUF_I386_PSE);

    /* Set bit for PTE's and PDE's if available. */
    if(global_bit_ok)
        global_bit = I386_VM_GLOBAL;
#endif

    /* Now reserve another pde for kernel's own mappings. */
    {
        int kernmap_pde;
        phys_bytes addr, len;
        int flags, index = 0;
        u32_t offset = 0;

        kernmap_pde = freepde();
        offset = kernmap_pde * ARCH_BIG_PAGE_SIZE;

        while(sys_vmctl_get_mapping(index, &addr, &len,
                                    &flags) == OK)  {
            int usedpde;
            vir_bytes vir;
            if(index >= MAX_KERNMAPPINGS)
                panic("VM: too many kernel mappings: %d", index);
            kern_mappings[index].phys_addr = addr;
            kern_mappings[index].len = len;
            kern_mappings[index].flags = flags;
            kern_mappings[index].vir_addr = offset;
            kern_mappings[index].flags =
                ARCH_VM_PTE_PRESENT;
            if(flags & VMMF_UNCACHED)
#if defined(__i386__)
                kern_mappings[index].flags |= PTF_NOCACHE;
#elif defined(__arm__)
                kern_mappings[index].flags |= ARM_VM_PTE_DEVICE;
#endif
            if(flags & VMMF_USER)
                kern_mappings[index].flags |= ARCH_VM_PTE_USER;
#if defined(__arm__)
            else
                kern_mappings[index].flags |= ARM_VM_PTE_SUPER;
#endif
            if(flags & VMMF_WRITE)
                kern_mappings[index].flags |= ARCH_VM_PTE_RW;
#if defined(__i386__)
            if(flags & VMMF_GLO)
                kern_mappings[index].flags |= I386_VM_GLOBAL;
#elif defined(__arm__)
            else
                kern_mappings[index].flags |= ARCH_VM_PTE_RO;
#endif
            if(addr % VM_PAGE_SIZE)
                panic("VM: addr unaligned: %d", addr);
            if(len % VM_PAGE_SIZE)
                panic("VM: len unaligned: %d", len);
            vir = offset;
            if(sys_vmctl_reply_mapping(index, vir) != OK)
                panic("VM: reply failed");
            offset += len;
            index++;
            kernmappings++;

            usedpde = ARCH_VM_PDE(offset);
            while(usedpde > kernmap_pde) {
                int newpde = freepde();
                assert(newpde == kernmap_pde+1);
                kernmap_pde = newpde;
            }
        }
    }

    /* Reserve PDEs available for mapping in the page directories. */
    {
        int pd;
        for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
            struct pdm *pdm = &pagedir_mappings[pd];
            pdm->pdeno = freepde();
            phys_bytes ph;

            /* Allocate us a page table in which to
             * remember page directory pointers.
             */
            if(!(pdm->page_directories =
                        vm_allocpage(&ph, VMP_PAGETABLE))) {
                panic("no virt addr for vm mappings");
            }
            memset(pdm->page_directories, 0, VM_PAGE_SIZE);
            pdm->phys = ph;

#if defined(__i386__)
            pdm->val = (ph & ARCH_VM_ADDR_MASK) |
                       ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
#elif defined(__arm__)
            pdm->val = (ph & ARCH_VM_PDE_MASK)
                       | ARCH_VM_PDE_PRESENT
                       | ARM_VM_PDE_DOMAIN; //LSC FIXME
#endif
        }
    }

    /* Allright. Now. We have to make our own page directory and page tables,
     * that the kernel has already set up, accessible to us. It's easier to
     * understand if we just copy all the required pages (i.e. page directory
     * and page tables), and set up the pointers as if VM had done it itself.
     *
     * This allocation will happen without using any page table, and just
     * uses spare pages.
     */
    newpt = &vmprocess->vm_pt;
    if(pt_new(newpt) != OK)
        panic("vm pt_new failed");

    /* Get our current pagedir so we can see it. */
#if defined(__i386__)
    if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK)
#elif defined(__arm__)
    if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK)
#endif
        panic("VM: sys_vmctl_get_pdbr failed");
#if defined(__i386__)
    if(sys_vircopy(NONE, mypdbr, SELF,
                   (vir_bytes) currentpagedir, VM_PAGE_SIZE) != OK)
#elif defined(__arm__)
    if(sys_vircopy(NONE, myttbr, SELF,
                   (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE) != OK)
#endif
        panic("VM: sys_vircopy failed");

    /* We have mapped in kernel ourselves; now copy mappings for VM
     * that kernel made, including allocations for BSS. Skip identity
     * mapping bits; just map in VM.
     */
    for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) {
        u32_t entry = currentpagedir[p];
        phys_bytes ptaddr_kern, ptaddr_us;

        /* BIGPAGEs are kernel mapping (do ourselves) or boot
         * identity mapping (don't want).
         */
        if(!(entry & ARCH_VM_PDE_PRESENT)) continue;
        if((entry & ARCH_VM_BIGPAGE)) continue;

        if(pt_ptalloc(newpt, p, 0) != OK)
            panic("pt_ptalloc failed");
        assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT);

#if defined(__i386__)
        ptaddr_kern = entry & ARCH_VM_ADDR_MASK;
        ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK;
#elif defined(__arm__)
        ptaddr_kern = entry & ARCH_VM_PDE_MASK;
        ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK;
#endif

        /* Copy kernel-initialized pagetable contents into our
         * normally accessible pagetable.
         */
        if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK)
            panic("pt_init: abscopy failed");
    }

    /* Inform kernel vm has a newly built page table. */
    assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR);
    pt_bind(newpt, &vmproc[VM_PROC_NR]);

    pt_init_done = 1;

    /* All OK. */
    return;
}
예제 #6
0
파일: pagetable.c 프로젝트: mwilbur/minix
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
PUBLIC void pt_init(phys_bytes usedlimit)
{
/* By default, the kernel gives us a data segment with pre-allocated
 * memory that then can't grow. We want to be able to allocate memory
 * dynamically, however. So here we copy the part of the page table
 * that's ours, so we get a private page table. Then we increase the
 * hardware segment size so we can allocate memory above our stack.
 */
        pt_t *newpt;
        int s, r;
        vir_bytes v;
        phys_bytes lo, hi; 
        vir_bytes extra_clicks;
        u32_t moveup = 0;
	int global_bit_ok = 0;
	int free_pde;
	int p;
	struct vm_ep_data ep_data;
	vir_bytes sparepages_mem;
	phys_bytes sparepages_ph;
	vir_bytes ptr;

        /* Shorthand. */
        newpt = &vmprocess->vm_pt;

        /* Get ourselves spare pages. */
        ptr = (vir_bytes) static_sparepages;
        ptr += I386_PAGE_SIZE - (ptr % I386_PAGE_SIZE);
        if(!(sparepages_mem = ptr))
		panic("pt_init: aalloc for spare failed");
        if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem,
                I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK)
                panic("pt_init: sys_umap failed: %d", r);

        missing_spares = 0;
        assert(STATIC_SPAREPAGES < SPAREPAGES);
        for(s = 0; s < SPAREPAGES; s++) {
        	if(s >= STATIC_SPAREPAGES) {
        		sparepages[s].page = NULL;
        		missing_spares++;
        		continue;
        	}
        	sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE);
        	sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE;
        }

	/* global bit and 4MB pages available? */
	global_bit_ok = _cpufeature(_CPUF_I386_PGE);
	bigpage_ok = _cpufeature(_CPUF_I386_PSE);

	/* Set bit for PTE's and PDE's if available. */
	if(global_bit_ok)
		global_bit = I386_VM_GLOBAL;

	/* The kernel and boot time processes need an identity mapping.
	 * We use full PDE's for this without separate page tables.
	 * Figure out which pde we can start using for other purposes.
	 */
	id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE;

	/* We have to make mappings up till here. */
	free_pde = id_map_high_pde+1;

        /* Initial (current) range of our virtual address space. */
        lo = CLICK2ABS(vmprocess->vm_arch.vm_seg[T].mem_phys);
        hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys +
                vmprocess->vm_arch.vm_seg[S].mem_len);
                  
        assert(!(lo % I386_PAGE_SIZE)); 
        assert(!(hi % I386_PAGE_SIZE));
 
        if(lo < VM_PROCSTART) {
                moveup = VM_PROCSTART - lo;
                assert(!(VM_PROCSTART % I386_PAGE_SIZE));
                assert(!(lo % I386_PAGE_SIZE));
                assert(!(moveup % I386_PAGE_SIZE));
        }
        
        /* Make new page table for ourselves, partly copied
         * from the current one.
         */     
        if(pt_new(newpt) != OK)
                panic("pt_init: pt_new failed"); 

        /* Set up mappings for VM process. */
        for(v = lo; v < hi; v += I386_PAGE_SIZE)  {
                phys_bytes addr;
                u32_t flags; 
        
                /* We have to write the new position in the PT,
                 * so we can move our segments.
                 */ 
                if(pt_writemap(vmprocess, newpt, v+moveup, v, I386_PAGE_SIZE,
                        I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
                        panic("pt_init: pt_writemap failed");
        }
       
        /* Move segments up too. */
        vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
        vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
       
	/* Allocate us a page table in which to remember page directory
	 * pointers.
	 */
	if(!(page_directories = vm_allocpage(&page_directories_phys,
		VMP_PAGETABLE)))
                panic("no virt addr for vm mappings");

	memset(page_directories, 0, I386_PAGE_SIZE);
       
        /* Increase our hardware data segment to create virtual address
         * space above our stack. We want to increase it to VM_DATATOP,
         * like regular processes have.
         */
        extra_clicks = ABS2CLICK(VM_DATATOP - hi);
        vmprocess->vm_arch.vm_seg[S].mem_len += extra_clicks;
       
        /* We pretend to the kernel we have a huge stack segment to
         * increase our data segment.
         */
        vmprocess->vm_arch.vm_data_top =
                (vmprocess->vm_arch.vm_seg[S].mem_vir +
                vmprocess->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
       
        /* Where our free virtual address space starts.
         * This is only a hint to the VM system.
         */
        newpt->pt_virtop = 0;

        /* Let other functions know VM now has a private page table. */
        vmprocess->vm_flags |= VMF_HASPT;

	/* Now reserve another pde for kernel's own mappings. */
	{
		int kernmap_pde;
		phys_bytes addr, len;
		int flags, index = 0;
		u32_t offset = 0;

		kernmap_pde = free_pde++;
		offset = kernmap_pde * I386_BIG_PAGE_SIZE;

		while(sys_vmctl_get_mapping(index, &addr, &len,
			&flags) == OK)  {
			vir_bytes vir;
			if(index >= MAX_KERNMAPPINGS)
                		panic("VM: too many kernel mappings: %d", index);
			kern_mappings[index].phys_addr = addr;
			kern_mappings[index].len = len;
			kern_mappings[index].flags = flags;
			kern_mappings[index].lin_addr = offset;
			kern_mappings[index].flags =
				I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
				global_bit;
			if(flags & VMMF_UNCACHED)
				kern_mappings[index].flags |= PTF_NOCACHE;
			if(addr % I386_PAGE_SIZE)
                		panic("VM: addr unaligned: %d", addr);
			if(len % I386_PAGE_SIZE)
                		panic("VM: len unaligned: %d", len);
			vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset);
			if(sys_vmctl_reply_mapping(index, vir) != OK)
                		panic("VM: reply failed");
			offset += len;
			index++;
			kernmappings++;
		}
	}

	/* Find a PDE below processes available for mapping in the
	 * page directories (readonly).
	 */
	pagedir_pde = free_pde++;
	pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
			I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;

	/* Tell kernel about free pde's. */
	while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
		if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
			panic("VMCTL_I386_FREEPDE failed: %d", r);
		}
	}

	/* first pde in use by process. */
	proc_pde = free_pde;

        /* Give our process the new, copied, private page table. */
	pt_mapkernel(newpt);	/* didn't know about vm_dir pages earlier */
        pt_bind(newpt, vmprocess);
       
	/* new segment limit for the kernel after paging is enabled */
	ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
	/* the memory map which must be installed after paging is enabled */
	ep_data.mem_map = vmprocess->vm_arch.vm_seg;

	/* Now actually enable paging. */
	if(sys_vmctl_enable_paging(&ep_data) != OK)
        	panic("pt_init: enable paging failed");

        /* Back to reality - this is where the stack actually is. */
        vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks;

        /* Pretend VM stack top is the same as any regular process, not to
         * have discrepancies with new VM instances later on.
         */
        vmprocess->vm_stacktop = VM_STACKTOP;

        /* All OK. */
        return;
}