Ejemplo n.º 1
0
void
dumpregs(Ureg* ureg)
{
	vlong mca, mct;

	dumpregs2(ureg);

	/*
	 * Processor control registers.
	 * If machine check exception, time stamp counter, page size extensions
	 * or enhanced virtual 8086 mode extensions are supported, there is a
	 * CR4. If there is a CR4 and machine check extensions, read the machine
	 * check address and machine check type registers if RDMSR supported.
	 */
	iprint("  CR0 %8.8lux CR2 %8.8lux CR3 %8.8lux",
		getcr0(), getcr2(), getcr3());
	if(m->cpuiddx & 0x9A){
		iprint(" CR4 %8.8lux", getcr4());
		if((m->cpuiddx & 0xA0) == 0xA0){
			rdmsr(0x00, &mca);
			rdmsr(0x01, &mct);
			iprint("\n  MCA %8.8llux MCT %8.8llux", mca, mct);
		}
	}
	iprint("\n  ur %#p up %#p\n", ureg, up);
}
Ejemplo n.º 2
0
/*ARGSUSED*/
void
rmp_gdt_init(rm_platter_t *rm)
{

#if defined(__amd64)

	if (getcr3() > 0xffffffffUL)
		panic("Cannot initialize CPUs; kernel's 64-bit page tables\n"
		    "located above 4G in physical memory (@ 0x%lx)", getcr3());

	/*
	 * Setup pseudo-descriptors for temporary GDT and IDT for use ONLY
	 * by code in real_mode_start():
	 *
	 * GDT[0]:  NULL selector
	 * GDT[1]:  64-bit CS: Long = 1, Present = 1, bits 12, 11 = 1
	 *
	 * Clear the IDT as interrupts will be off and a limit of 0 will cause
	 * the CPU to triple fault and reset on an NMI, seemingly as reasonable
	 * a course of action as any other, though it may cause the entire
	 * platform to reset in some cases...
	 */
	rm->rm_temp_gdt[0] = 0;
	rm->rm_temp_gdt[TEMPGDT_KCODE64] = 0x20980000000000ULL;

	rm->rm_temp_gdt_lim = (ushort_t)(sizeof (rm->rm_temp_gdt) - 1);
	rm->rm_temp_gdt_base = rm_platter_pa +
	    (uint32_t)offsetof(rm_platter_t, rm_temp_gdt);
	rm->rm_temp_idt_lim = 0;
	rm->rm_temp_idt_base = 0;

	/*
	 * Since the CPU needs to jump to protected mode using an identity
	 * mapped address, we need to calculate it here.
	 */
	rm->rm_longmode64_addr = rm_platter_pa +
	    ((uint32_t)long_mode_64 - (uint32_t)real_mode_start);
#endif	/* __amd64 */
}
Ejemplo n.º 3
0
static void
sanity(void)
{
	uintptr cr3;

	cr3 = (uintptr)KADDR(getcr3());
	if (cr3 == 0)
		panic("zero cr3");
	if ((uintptr)m->pdb != cr3 || (uintptr)mach0pdb != cr3)
		panic("not all same: cr3 %#p m->pdb %#p mach0pdb %#p",
			cr3, m->pdb, mach0pdb);
	if (m != mach0m)
		panic("m %#p != mach0m %#p", m, mach0m);
	if (m->gdt != mach0gdt)
		panic("m->gdt %#p != mach0gdt %#p", m->gdt, mach0gdt);
	if (0)
		iprint("m->pdb %#p m %#p sp %#p m->gdt %#p\n",
			m->pdb, m, &cr3, m->gdt);
}
Ejemplo n.º 4
0
void
dumpregs(Ureg* ureg)
{
	dumpregs2(ureg);

	/*
	 * Processor control registers.
	 * If machine check exception, time stamp counter, page size extensions
	 * or enhanced virtual 8086 mode extensions are supported, there is a
	 * CR4. If there is a CR4 and machine check extensions, read the machine
	 * check address and machine check type registers if RDMSR supported.
	 */
	iprint("  CR0 %8.8lux CR2 %8.8lux CR3 %8.8lux",
		getcr0(), getcr2(), getcr3());
	if(m->cpuiddx & (Mce|Tsc|Pse|Vmex)){
		iprint(" CR4 %8.8lux\n", getcr4());
		if(ureg->trap == 18)
			dumpmcregs();
	}
	iprint("\n  ur %#p up %#p\n", ureg, up);
}
Ejemplo n.º 5
0
int setup_stack(process_t *proc,
                size_t argv_count,
                size_t envp_count)
{
    int ret_code = 0;
    int i;
    uint64_t save_cr3 = getcr3();
    setcr3((uint64_t)proc->page_table_base);

    // First we need to push envps 
    // which will be stored at USER_ENV_VARIABLE_START using demand paging
    // Last entry of envp should be NULL
    proc->registers.rsp -= 8;
    *(uint64_t*)(proc->registers.rsp) = 0;

    for (i = envp_count - 1; i >= 0; i--)
    {
        proc->registers.rsp -= 8;
        *(uint64_t*)(proc->registers.rsp) = USER_ENV_VARIABLE_START + (MAX_ENV_SIZE * i);
    }

    // Second we need to push argvs
    proc->registers.rsp -= 8;
    *(uint64_t*)(proc->registers.rsp) = 0;

    for (i = argv_count - 1; i >=0; i--)
    {
        proc->registers.rsp -= 8;
        *(uint64_t*)(proc->registers.rsp) = USER_ARGV_START + (MAX_ARGV_SIZE * i);
    }

     // Third argc
     proc->registers.rsp -= 8;
    *(uint64_t*)(proc->registers.rsp) = argv_count;

    setcr3(save_cr3);
    return ret_code;
}
Ejemplo n.º 6
0
//Initializes a process with its binary its argument
process_t* create_process_new(void* binary, pid_t ppid, int new_pid, 
                              const char *proc_name,
                              char * const argv[],
                              size_t argv_count,
                              char * const envp[],
                              size_t envp_count)
{
    uint64_t old_cr3 = 0;
    int32_t    rc   = 0;

    process_t *proc = allocate_process(new_pid);
    if (!proc)
    {
        #ifdef LOG
            printf("Failed to allocate memory for process\n");
        #endif
        return NULL;
    }
    
    // First process will be foreground... Need to change this logic
    // May be this will work fine when we will be running just shell
    if (foreground_proc == NULL)
    {
        proc->flags |= FOREGROUND_PROCESS;
        foreground_proc = proc;
    }

    strncpy(proc->name, proc_name, sizeof(proc->name));

    proc->kernel_stack = kmalloc(USER_KERNEL_STACK_SIZE);
    if(proc->kernel_stack == NULL)
    {
        reclaim_process_resources(proc);
        kfree(proc);
        #if LOG
            printf("failed to allocate memory for user-kern stack\n");
        #endif
        
        return NULL;
    }

    rc = setup_pagetable_proc(proc);
    if (rc)
    {
        reclaim_process_resources(proc);
        kfree(proc);
        #ifdef LOG
            printf("setup_pagetable_proc failed\n");
        #endif
        return NULL;
    }
    
    if (binary != NULL)
    {
        //If the process is child of some parent
        //So we don't need any binary
        rc = init_vmas_proc(proc, binary, argv_count, envp_count);
    }

    if (rc)
    {
        reclaim_process_resources(proc);
        kfree(proc);
        #ifdef LOG
            printf("init_vmas_proc failed\n");
        #endif
        return NULL;
    }

    //Setup registers for the process
    setup_registers_proc(proc);

    // Allocate a page for stack
    // Why we are not going for demand paging?
    //  We need to push env variables' addresses on stack while we are in kernel 
    //  and it's not a good idea to have a page fault in kernel
    if (binary != NULL)
    {   //Don't allocate page for stack because it's a child.
        allocate_memory((uint64_t)proc->page_table_base,
                    USER_STACK_TOP - PGSIZE,
                    PGSIZE,
                    PTE_P | PTE_U | PTE_W);
    }
    // This function should be last in this sequence since it uses rsp 
    // which we set in setup_registers_proc
    if (binary != NULL)
    {

        if (argv_count > 0)
        {
            allocate_memory((uint64_t)proc->page_table_base,
                            USER_ARGV_START,
                            argv_count * sizeof(execve_argv[0]),
                            PTE_P | PTE_U | PTE_W);
            old_cr3 = getcr3();
            setcr3((uint64_t)proc->page_table_base);
            memcpy((void *)USER_ARGV_START,
                    argv,
                    argv_count * sizeof(execve_argv[0]));
            setcr3(old_cr3);
        }

        if (envp_count > 0)
        {
            allocate_memory((uint64_t)proc->page_table_base,
                            USER_ENV_VARIABLE_START,
                            envp_count * sizeof(execve_envp[0]),
                            PTE_P | PTE_U | PTE_W);
            old_cr3 = getcr3();
            setcr3((uint64_t)proc->page_table_base);
            memcpy((void *)USER_ENV_VARIABLE_START,
                    envp,
                    envp_count * sizeof(execve_envp[0]));
            setcr3(old_cr3);
        }

        setup_stack(proc, argv_count, envp_count);
    }

    add_process_to_queue(proc, PROCESS_RUNNABLE_QUEUE);
    proc->ppid = ppid;
    //printf("New process PID = %d\n", proc->pid);

    return proc;
}
Ejemplo n.º 7
0
ulong
mmukmap(ulong pa, ulong va, int size)
{
	ulong pae, *table, *pdb, pgsz, *pte, x;
	int pse, sync;
	extern int cpuidax, cpuiddx;

	pdb = KADDR(getcr3());
	if((cpuiddx & 0x08) && (getcr4() & 0x10))
		pse = 1;
	else
		pse = 0;
	sync = 0;

	pa = PPN(pa);
	if(va == 0)
		va = (ulong)KADDR(pa);
	else
		va = PPN(va);

	pae = pa + size;
	lock(&mmukmaplock);
	while(pa < pae){
		table = &pdb[PDX(va)];
		/*
		 * Possibly already mapped.
		 */
		if(*table & PTEVALID){
			if(*table & PTESIZE){
				/*
				 * Big page. Does it fit within?
				 * If it does, adjust pgsz so the correct end can be
				 * returned and get out.
				 * If not, adjust pgsz up to the next 4MiB boundary
				 * and continue.
				 */
				x = PPN(*table);
				if(x != pa)
					panic("mmukmap1: pa 0x%ux  entry 0x%ux\n",
						pa, *table);
				x += 4*MiB;
				if(pae <= x){
					pa = pae;
					break;
				}
				pgsz = x - pa;
				pa += pgsz;
				va += pgsz;

				continue;
			}
			else{
				/*
				 * Little page. Walk to the entry.
				 * If the entry is valid, set pgsz and continue.
				 * If not, make it so, set pgsz, sync and continue.
				 */
				pte = mmuwalk(pdb, va, 2, 0);
				if(pte && *pte & PTEVALID){
					x = PPN(*pte);
					if(x != pa)
						panic("mmukmap2: pa 0x%ux entry 0x%ux\n",
							pa, *pte);
					pgsz = BY2PG;
					pa += pgsz;
					va += pgsz;
					sync++;

					continue;
				}
			}
		}

		/*
		 * Not mapped. Check if it can be mapped using a big page -
		 * starts on a 4MiB boundary, size >= 4MiB and processor can do it.
		 * If not a big page, walk the walk, talk the talk.
		 * Sync is set.
		 */
		if(pse && (pa % (4*MiB)) == 0 && (pae >= pa+4*MiB)){
			*table = pa|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
			pgsz = 4*MiB;
		}
		else{
			pte = mmuwalk(pdb, va, 2, 1);
			*pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
			pgsz = BY2PG;
		}
		pa += pgsz;
		va += pgsz;
		sync++;
	}
	unlock(&mmukmaplock);

	/*
	 * If something was added
	 * then need to sync up.
	 */
	if(sync)
		putcr3(PADDR(pdb));

	return pa;
}
Ejemplo n.º 8
0
void *
mach_cpucontext_alloc(struct cpu *cp)
{
	rm_platter_t *rm = (rm_platter_t *)rm_platter_va;
	struct cpu_tables *ct;
	struct tss *ntss;

	/*
	 * Allocate space for stack, tss, gdt and idt. We round the size
	 * allotted for cpu_tables up, so that the TSS is on a unique page.
	 * This is more efficient when running in virtual machines.
	 */
	ct = kmem_zalloc(P2ROUNDUP(sizeof (*ct), PAGESIZE), KM_SLEEP);
	if ((uintptr_t)ct & PAGEOFFSET)
		panic("mp_startup_init: cpu%d misaligned tables", cp->cpu_id);

	ntss = cp->cpu_tss = &ct->ct_tss;

#if defined(__amd64)

	/*
	 * #DF (double fault).
	 */
	ntss->tss_ist1 = (uint64_t)&ct->ct_stack[sizeof (ct->ct_stack)];

#elif defined(__i386)

	ntss->tss_esp0 = ntss->tss_esp1 = ntss->tss_esp2 = ntss->tss_esp =
	    (uint32_t)&ct->ct_stack[sizeof (ct->ct_stack)];

	ntss->tss_ss0 = ntss->tss_ss1 = ntss->tss_ss2 = ntss->tss_ss = KDS_SEL;

	ntss->tss_eip = (uint32_t)cp->cpu_thread->t_pc;

	ntss->tss_cs = KCS_SEL;
	ntss->tss_ds = ntss->tss_es = KDS_SEL;
	ntss->tss_fs = KFS_SEL;
	ntss->tss_gs = KGS_SEL;

#endif	/* __i386 */

	/*
	 * Set I/O bit map offset equal to size of TSS segment limit
	 * for no I/O permission map. This will cause all user I/O
	 * instructions to generate #gp fault.
	 */
	ntss->tss_bitmapbase = sizeof (*ntss);

	/*
	 * Setup kernel tss.
	 */
	set_syssegd((system_desc_t *)&cp->cpu_gdt[GDT_KTSS], cp->cpu_tss,
	    sizeof (*cp->cpu_tss) - 1, SDT_SYSTSS, SEL_KPL);

	/*
	 * Now copy all that we've set up onto the real mode platter
	 * for the real mode code to digest as part of starting the cpu.
	 */

	rm->rm_idt_base = cp->cpu_idt;
	rm->rm_idt_lim = sizeof (*cp->cpu_idt) * NIDT - 1;
	rm->rm_gdt_base = cp->cpu_gdt;
	rm->rm_gdt_lim = sizeof (*cp->cpu_gdt) * NGDT - 1;

	rm->rm_pdbr = getcr3();
	rm->rm_cpu = cp->cpu_id;
	rm->rm_x86feature = x86_feature;
	rm->rm_cr4 = getcr4();

	rmp_gdt_init(rm);

	return (ct);
}