void cpu_physwindow_init(int cpu) { cpu_data_t *cdp = cpu_data_ptr[cpu]; cpu_desc_index_t *cdi = &cdp->cpu_desc_index; vm_offset_t phys_window; if (vm_allocate(kernel_map, &phys_window, PAGE_SIZE, VM_FLAGS_ANYWHERE) != KERN_SUCCESS) panic("cpu_physwindow_init: couldn't allocate phys map window"); /* * make sure the page that encompasses the * pte pointer we're interested in actually * exists in the page table */ pmap_expand(kernel_pmap, phys_window); cdp->cpu_physwindow_base = phys_window; cdp->cpu_physwindow_ptep = vtopte(phys_window); cdi->cdi_gdt[sel_idx(PHYS_WINDOW_SEL)] = physwindow_desc_pattern; cdi->cdi_gdt[sel_idx(PHYS_WINDOW_SEL)].offset = phys_window; fix_desc(&cdi->cdi_gdt[sel_idx(PHYS_WINDOW_SEL)], 1); }
void cpu_userwindow_init(int cpu) { cpu_data_t *cdp = cpu_data_ptr[cpu]; cpu_desc_index_t *cdi = &cdp->cpu_desc_index; vm_offset_t user_window; vm_offset_t vaddr; int num_cpus; num_cpus = ml_get_max_cpus(); if (cpu >= num_cpus) panic("cpu_userwindow_init: cpu > num_cpus"); if (user_window_base == 0) { if (vm_allocate(kernel_map, &vaddr, (NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE, VM_FLAGS_ANYWHERE) != KERN_SUCCESS) panic("cpu_userwindow_init: " "couldn't allocate user map window"); /* * window must start on a page table boundary * in the virtual address space */ user_window_base = (vaddr + (NBPDE - 1)) & ~(NBPDE - 1); /* * get rid of any allocation leading up to our * starting boundary */ vm_deallocate(kernel_map, vaddr, user_window_base - vaddr); /* * get rid of tail that we don't need */ user_window = user_window_base + (NBPDE * NCOPY_WINDOWS * num_cpus); vm_deallocate(kernel_map, user_window, (vaddr + ((NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE)) - user_window); } user_window = user_window_base + (cpu * NCOPY_WINDOWS * NBPDE); cdp->cpu_copywindow_base = user_window; cdp->cpu_copywindow_pdp = pmap_pde(kernel_pmap, user_window); cdi->cdi_gdt[sel_idx(USER_WINDOW_SEL)] = userwindow_desc_pattern; cdi->cdi_gdt[sel_idx(USER_WINDOW_SEL)].offset = user_window; fix_desc(&cdi->cdi_gdt[sel_idx(USER_WINDOW_SEL)], 1); }
kern_return_t thread_compose_cthread_desc(unsigned int addr, pcb_t pcb) { struct real_descriptor desc; extern struct fake_descriptor *mp_ldt[]; struct real_descriptor *ldtp; int mycpu = cpu_number(); ldtp = (struct real_descriptor *)mp_ldt[mycpu]; desc.limit_low = 1; desc.limit_high = 0; desc.base_low = addr & 0xffff; desc.base_med = (addr >> 16) & 0xff; desc.base_high = (addr >> 24) & 0xff; desc.access = ACC_P|ACC_PL_U|ACC_DATA_W; desc.granularity = SZ_32|SZ_G; pcb->cthread_desc = desc; ldtp[sel_idx(USER_CTHREAD)] = desc; return(KERN_SUCCESS); }
void cpu_desc_init64( cpu_data_t *cdp, boolean_t is_boot_cpu) { cpu_desc_table64_t *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep; cpu_desc_index_t *cdi = &cdp->cpu_desc_index; if (is_boot_cpu) { /* * Master CPU uses the tables built at boot time. * Just set the index pointers to the low memory space. * Note that in 64-bit mode these are addressed in the * double-mapped window (uber-space). */ cdi->cdi_ktss = (struct i386_tss *) &master_ktss64; cdi->cdi_sstk = (vm_offset_t) &master_sstk.top; cdi->cdi_gdt = master_gdt; cdi->cdi_idt = (struct fake_descriptor *) &master_idt64; cdi->cdi_ldt = (struct fake_descriptor *) &master_ldt; /* Replace the expanded LDT and TSS slots in the GDT: */ *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_LDT)] = kernel_ldt_desc64; *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_TSS)] = kernel_tss_desc64; /* * Fix up the expanded descriptors for 64-bit. */ fix_desc64((void *) &master_idt64, IDTSZ); fix_desc64((void *) &master_gdt[sel_idx(KERNEL_LDT)], 1); fix_desc64((void *) &master_gdt[sel_idx(KERNEL_TSS)], 1); /* * Set the double-fault stack as IST1 in the 64-bit TSS */ master_ktss64.ist1 = UBER64(df_task_stack_end); } else { /* * Per-cpu GDT, IDT, KTSS descriptors are allocated in kernel * heap (cpu_desc_table) and double-mapped in uber-space * (over 4GB). * LDT descriptors are mapped into a separate area. */ cdi->cdi_gdt = (struct fake_descriptor *)cdt->gdt; cdi->cdi_idt = (struct fake_descriptor *)cdt->idt; cdi->cdi_ktss = (struct i386_tss *)&cdt->ktss; cdi->cdi_sstk = (vm_offset_t)&cdt->sstk.top; cdi->cdi_ldt = cdp->cpu_ldtp; /* * Copy the tables */ bcopy((char *)master_idt64, (char *)cdt->idt, sizeof(master_idt64)); bcopy((char *)master_gdt, (char *)cdt->gdt, sizeof(master_gdt)); bcopy((char *)master_ldt, (char *)cdp->cpu_ldtp, sizeof(master_ldt)); bcopy((char *)&master_ktss64, (char *)&cdt->ktss, sizeof(struct x86_64_tss)); /* * Fix up the entries in the GDT to point to * this LDT and this TSS. */ kernel_ldt_desc64.offset[0] = (vm_offset_t) cdi->cdi_ldt; *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_LDT)] = kernel_ldt_desc64; fix_desc64(&cdt->gdt[sel_idx(KERNEL_LDT)], 1); kernel_ldt_desc64.offset[0] = (vm_offset_t) cdi->cdi_ldt; *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(USER_LDT)] = kernel_ldt_desc64; fix_desc64(&cdt->gdt[sel_idx(USER_LDT)], 1); kernel_tss_desc64.offset[0] = (vm_offset_t) cdi->cdi_ktss; *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_TSS)] = kernel_tss_desc64; fix_desc64(&cdt->gdt[sel_idx(KERNEL_TSS)], 1); cdt->gdt[sel_idx(CPU_DATA_GS)] = cpudata_desc_pattern; cdt->gdt[sel_idx(CPU_DATA_GS)].offset = (vm_offset_t) cdp; fix_desc(&cdt->gdt[sel_idx(CPU_DATA_GS)], 1); /* Set double-fault stack as IST1 */ cdt->ktss.ist1 = UBER64((unsigned long)cdt->dfstk + sizeof(cdt->dfstk)); /* * Allocate copyio windows. */ cpu_userwindow_init(cdp->cpu_number); cpu_physwindow_init(cdp->cpu_number); } /* Require that the top of the sysenter stack is 16-byte aligned */ if ((cdi->cdi_sstk % 16) != 0) panic("cpu_desc_init64() sysenter stack not 16-byte aligned"); }
void cpu_desc_init( cpu_data_t *cdp, boolean_t is_boot_cpu) { cpu_desc_table_t *cdt = cdp->cpu_desc_tablep; cpu_desc_index_t *cdi = &cdp->cpu_desc_index; if (is_boot_cpu) { /* * Master CPU uses the tables built at boot time. * Just set the index pointers to the high shared-mapping space. * Note that the sysenter stack uses empty space above the ktss * in the HIGH_FIXED_KTSS page. In this case we don't map the * the real master_sstk in low memory. */ cdi->cdi_ktss = (struct i386_tss *) pmap_index_to_virt(HIGH_FIXED_KTSS) ; cdi->cdi_sstk = (vm_offset_t) (cdi->cdi_ktss + 1) + (vm_offset_t) &master_sstk.top - (vm_offset_t) &master_sstk; #if MACH_KDB cdi->cdi_dbtss = (struct i386_tss *) pmap_index_to_virt(HIGH_FIXED_DBTSS); #endif /* MACH_KDB */ cdi->cdi_gdt = (struct fake_descriptor *) pmap_index_to_virt(HIGH_FIXED_GDT); cdi->cdi_idt = (struct fake_descriptor *) pmap_index_to_virt(HIGH_FIXED_IDT); cdi->cdi_ldt = (struct fake_descriptor *) pmap_index_to_virt(HIGH_FIXED_LDT_BEGIN); } else { vm_offset_t cpu_hi_desc; cpu_hi_desc = pmap_cpu_high_shared_remap(cdp->cpu_number, HIGH_CPU_DESC, (vm_offset_t) cdt, 1); /* * Per-cpu GDT, IDT, LDT, KTSS descriptors are allocated in one * block (cpu_desc_table) and double-mapped into high shared space * in one page window. * Also, a transient stack for the fast sysenter path. The top of * which is set at context switch time to point to the PCB using * the high address. */ cdi->cdi_gdt = (struct fake_descriptor *) (cpu_hi_desc + offsetof(cpu_desc_table_t, gdt[0])); cdi->cdi_idt = (struct fake_descriptor *) (cpu_hi_desc + offsetof(cpu_desc_table_t, idt[0])); cdi->cdi_ktss = (struct i386_tss *) (cpu_hi_desc + offsetof(cpu_desc_table_t, ktss)); cdi->cdi_sstk = cpu_hi_desc + offsetof(cpu_desc_table_t, sstk.top); /* * LDT descriptors are mapped into a seperate area. */ cdi->cdi_ldt = (struct fake_descriptor *) pmap_cpu_high_shared_remap( cdp->cpu_number, HIGH_CPU_LDT_BEGIN, (vm_offset_t) cdp->cpu_ldtp, HIGH_CPU_LDT_END - HIGH_CPU_LDT_BEGIN + 1); /* * Copy the tables */ bcopy((char *)master_idt, (char *)cdt->idt, sizeof(master_idt)); bcopy((char *)master_gdt, (char *)cdt->gdt, sizeof(master_gdt)); bcopy((char *)master_ldt, (char *)cdp->cpu_ldtp, sizeof(master_ldt)); bzero((char *)&cdt->ktss, sizeof(struct i386_tss)); #if MACH_KDB cdi->cdi_dbtss = (struct i386_tss *) (cpu_hi_desc + offsetof(cpu_desc_table_t, dbtss)); bcopy((char *)&master_dbtss, (char *)&cdt->dbtss, sizeof(struct i386_tss)); #endif /* MACH_KDB */ /* * Fix up the entries in the GDT to point to * this LDT and this TSS. */ cdt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern; cdt->gdt[sel_idx(KERNEL_LDT)].offset = (vm_offset_t) cdi->cdi_ldt; fix_desc(&cdt->gdt[sel_idx(KERNEL_LDT)], 1); cdt->gdt[sel_idx(USER_LDT)] = ldt_desc_pattern; cdt->gdt[sel_idx(USER_LDT)].offset = (vm_offset_t) cdi->cdi_ldt; fix_desc(&cdt->gdt[sel_idx(USER_LDT)], 1); cdt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern; cdt->gdt[sel_idx(KERNEL_TSS)].offset = (vm_offset_t) cdi->cdi_ktss; fix_desc(&cdt->gdt[sel_idx(KERNEL_TSS)], 1); cdt->gdt[sel_idx(CPU_DATA_GS)] = cpudata_desc_pattern; cdt->gdt[sel_idx(CPU_DATA_GS)].offset = (vm_offset_t) cdp; fix_desc(&cdt->gdt[sel_idx(CPU_DATA_GS)], 1); #if MACH_KDB cdt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern; cdt->gdt[sel_idx(DEBUG_TSS)].offset = (vm_offset_t) cdi->cdi_dbtss; fix_desc(&cdt->gdt[sel_idx(DEBUG_TSS)], 1); cdt->dbtss.esp0 = (int)(db_task_stack_store + (INTSTACK_SIZE * (cdp->cpu_number)) - sizeof (natural_t)); cdt->dbtss.esp = cdt->dbtss.esp0; cdt->dbtss.eip = (int)&db_task_start; #endif /* MACH_KDB */ cdt->ktss.ss0 = KERNEL_DS; cdt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */ cpu_userwindow_init(cdp->cpu_number); cpu_physwindow_init(cdp->cpu_number); } }
struct mp_desc_table * mp_desc_init( int mycpu) { register struct mp_desc_table *mpt; if (mycpu == master_cpu) { /* * Master CPU uses the tables built at boot time. * Just set the TSS and GDT pointers. */ mp_ktss[mycpu] = &ktss; #if MACH_KDB mp_dbtss[mycpu] = &dbtss; #endif /* MACH_KDB */ mp_gdt[mycpu] = gdt; mp_idt[mycpu] = idt; return 0; } else { mpt = mp_desc_table[mycpu]; mp_ktss[mycpu] = &mpt->ktss; mp_gdt[mycpu] = mpt->gdt; mp_idt[mycpu] = mpt->idt; /* * Copy the tables */ bcopy((char *)idt, (char *)mpt->idt, sizeof(idt)); bcopy((char *)gdt, (char *)mpt->gdt, sizeof(gdt)); bcopy((char *)ldt, (char *)mpt->ldt, sizeof(ldt)); bzero((char *)&mpt->ktss, sizeof(struct i386_tss)); bzero((char *)&cpu_data[mycpu], sizeof(cpu_data_t)); #if MACH_KDB mp_dbtss[mycpu] = &mpt->dbtss; bcopy((char *)&dbtss, (char *)&mpt->dbtss, sizeof(struct i386_tss)); #endif /* MACH_KDB */ /* * Fix up the entries in the GDT to point to * this LDT and this TSS. */ mpt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern; mpt->gdt[sel_idx(KERNEL_LDT)].offset = LINEAR_KERNEL_ADDRESS + (unsigned int) mpt->ldt; fix_desc(&mpt->gdt[sel_idx(KERNEL_LDT)], 1); mpt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern; mpt->gdt[sel_idx(KERNEL_TSS)].offset = LINEAR_KERNEL_ADDRESS + (unsigned int) &mpt->ktss; fix_desc(&mpt->gdt[sel_idx(KERNEL_TSS)], 1); mpt->gdt[sel_idx(CPU_DATA)] = cpudata_desc_pattern; mpt->gdt[sel_idx(CPU_DATA)].offset = LINEAR_KERNEL_ADDRESS + (unsigned int) &cpu_data[mycpu]; fix_desc(&mpt->gdt[sel_idx(CPU_DATA)], 1); #if MACH_KDB mpt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern; mpt->gdt[sel_idx(DEBUG_TSS)].offset = LINEAR_KERNEL_ADDRESS + (unsigned int) &mpt->dbtss; fix_desc(&mpt->gdt[sel_idx(DEBUG_TSS)], 1); mpt->dbtss.esp0 = (int)(db_task_stack_store + (INTSTACK_SIZE * (mycpu + 1)) - sizeof (natural_t)); mpt->dbtss.esp = mpt->dbtss.esp0; mpt->dbtss.eip = (int)&db_task_start; #endif /* MACH_KDB */ mpt->ktss.ss0 = KERNEL_DS; mpt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */ return mpt; } }