void cpu_set_tss_gates(struct cpu_info *ci) { struct segment_descriptor sd; ci->ci_doubleflt_stack = (char *)uvm_km_alloc(kernel_map, USPACE); cpu_init_tss(&ci->ci_doubleflt_tss, ci->ci_doubleflt_stack, IDTVEC(tss_trap08)); setsegment(&sd, &ci->ci_doubleflt_tss, sizeof(struct i386tss) - 1, SDT_SYS386TSS, SEL_KPL, 0, 0); ci->ci_gdt[GTRAPTSS_SEL].sd = sd; setgate(&idt[8], NULL, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GTRAPTSS_SEL, SEL_KPL)); #if defined(DDB) && defined(MULTIPROCESSOR) /* * Set up separate handler for the DDB IPI, so that it doesn't * stomp on a possibly corrupted stack. * * XXX overwriting the gate set in db_machine_init. * Should rearrange the code so that it's set only once. */ ci->ci_ddbipi_stack = (char *)uvm_km_alloc(kernel_map, USPACE); cpu_init_tss(&ci->ci_ddbipi_tss, ci->ci_ddbipi_stack, Xintrddbipi); setsegment(&sd, &ci->ci_ddbipi_tss, sizeof(struct i386tss) - 1, SDT_SYS386TSS, SEL_KPL, 0, 0); ci->ci_gdt[GIPITSS_SEL].sd = sd; setgate(&idt[ddb_vec], NULL, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GIPITSS_SEL, SEL_KPL)); #endif }
void trap_init() { int i; extern uint32_t trap_handlers[]; for(i = 0;i < 256; i++) { setgate(idt[i], 0, _KERNEL_CS_, trap_handlers[i], 0); } setgate(idt[T_SYSCALL], 0, _KERNEL_CS_, trap_handlers[T_SYSCALL], 3); trap_init_percpu(); }
void idt_vec_set(int vec, void (*function)(void)) { KASSERT(mutex_owned(&cpu_lock) || !mp_online); KASSERT(idt_allocmap[vec] == 1); setgate(&idt[vec], function, 0, SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); }
void idt_vec_set(int vec, void (*function)(void)) { /* * Vector should be allocated, so no locking needed. */ KASSERT(idt_allocmap[vec] == 1); setgate(&idt[vec], function, 0, SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); }
/* * Fill in default interrupt table (in case of spurious interrupt * during configuration of kernel), setup interrupt control unit */ void intr_default_setup(void) { int i; /* icu vectors */ for (i = 0; i < NUM_LEGACY_IRQS; i++) { idt_allocmap[ICU_OFFSET + i] = 1; setgate(&idt[ICU_OFFSET + i], i8259_stubs[i].ist_entry, 0, SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); } /* * Eventually might want to check if it's actually there. */ i8259_default_setup(); }
/* * Fill in default interrupt table (in case of spurious interrupt * during configuration of kernel, setup interrupt control unit */ void isa_defaultirq(void) { int i; /* icu vectors */ for (i = 0; i < ICU_LEN; i++) setgate(&idt[ICU_OFFSET + i], IDTVEC(intr)[i], 0, SDT_SYS386IGT, SEL_KPL, GICODE_SEL); /* initialize 8259's */ outb(IO_ICU1, 0x11); /* reset; program device, four bytes */ outb(IO_ICU1+1, ICU_OFFSET); /* starting at this vector index */ outb(IO_ICU1+1, 1 << IRQ_SLAVE); /* slave on line 2 */ #ifdef AUTO_EOI_1 outb(IO_ICU1+1, 2 | 1); /* auto EOI, 8086 mode */ #else outb(IO_ICU1+1, 1); /* 8086 mode */ #endif outb(IO_ICU1+1, 0xff); /* leave interrupts masked */ outb(IO_ICU1, 0x68); /* special mask mode (if available) */ outb(IO_ICU1, 0x0a); /* Read IRR by default. */ #ifdef REORDER_IRQ outb(IO_ICU1, 0xc0 | (3 - 1)); /* pri order 3-7, 0-2 (com2 first) */ #endif outb(IO_ICU2, 0x11); /* reset; program device, four bytes */ outb(IO_ICU2+1, ICU_OFFSET+8); /* staring at this vector index */ outb(IO_ICU2+1, IRQ_SLAVE); #ifdef AUTO_EOI_2 outb(IO_ICU2+1, 2 | 1); /* auto EOI, 8086 mode */ #else outb(IO_ICU2+1, 1); /* 8086 mode */ #endif outb(IO_ICU2+1, 0xff); /* leave interrupts masked */ outb(IO_ICU2, 0x68); /* special mask mode (if available) */ outb(IO_ICU2, 0x0a); /* Read IRR by default. */ }
void init_x86_64(paddr_t first_avail) { extern void consinit(void); extern struct extent *iomem_ex; struct region_descriptor region; struct mem_segment_descriptor *ldt_segp; int x, first16q, ist; u_int64_t seg_start, seg_end; u_int64_t seg_start1, seg_end1; cpu_init_msrs(&cpu_info_primary); proc0.p_addr = proc0paddr; cpu_info_primary.ci_curpcb = &proc0.p_addr->u_pcb; x86_bus_space_init(); consinit(); /* XXX SHOULD NOT BE DONE HERE */ /* * Initailize PAGE_SIZE-dependent variables. */ uvm_setpagesize(); #if 0 uvmexp.ncolors = 2; #endif /* * Boot arguments are in a single page specified by /boot. * * We require the "new" vector form, as well as memory ranges * to be given in bytes rather than KB. * * locore copies the data into bootinfo[] for us. */ if ((bootapiver & (BAPIV_VECTOR | BAPIV_BMEMMAP)) == (BAPIV_VECTOR | BAPIV_BMEMMAP)) { if (bootinfo_size >= sizeof(bootinfo)) panic("boot args too big"); getbootinfo(bootinfo, bootinfo_size); } else panic("invalid /boot"); avail_start = PAGE_SIZE; /* BIOS leaves data in low memory */ /* and VM system doesn't work with phys 0 */ #ifdef MULTIPROCESSOR if (avail_start < MP_TRAMPOLINE + PAGE_SIZE) avail_start = MP_TRAMPOLINE + PAGE_SIZE; #endif /* * Call pmap initialization to make new kernel address space. * We must do this before loading pages into the VM system. */ pmap_bootstrap(VM_MIN_KERNEL_ADDRESS, IOM_END + trunc_page(KBTOB(biosextmem))); if (avail_start != PAGE_SIZE) pmap_prealloc_lowmem_ptps(); if (mem_cluster_cnt == 0) { /* * Allocate the physical addresses used by RAM from the iomem * extent map. This is done before the addresses are * page rounded just to make sure we get them all. */ if (extent_alloc_region(iomem_ex, 0, KBTOB(biosbasemem), EX_NOWAIT)) { /* XXX What should we do? */ printf("WARNING: CAN'T ALLOCATE BASE MEMORY FROM " "IOMEM EXTENT MAP!\n"); } mem_clusters[0].start = 0; mem_clusters[0].size = trunc_page(KBTOB(biosbasemem)); physmem += atop(mem_clusters[0].size); if (extent_alloc_region(iomem_ex, IOM_END, KBTOB(biosextmem), EX_NOWAIT)) { /* XXX What should we do? */ printf("WARNING: CAN'T ALLOCATE EXTENDED MEMORY FROM " "IOMEM EXTENT MAP!\n"); } #if 0 #if NISADMA > 0 /* * Some motherboards/BIOSes remap the 384K of RAM that would * normally be covered by the ISA hole to the end of memory * so that it can be used. However, on a 16M system, this * would cause bounce buffers to be allocated and used. * This is not desirable behaviour, as more than 384K of * bounce buffers might be allocated. As a work-around, * we round memory down to the nearest 1M boundary if * we're using any isadma devices and the remapped memory * is what puts us over 16M. */ if (biosextmem > (15*1024) && biosextmem < (16*1024)) { char pbuf[9]; format_bytes(pbuf, sizeof(pbuf), biosextmem - (15*1024)); printf("Warning: ignoring %s of remapped memory\n", pbuf); biosextmem = (15*1024); } #endif #endif mem_clusters[1].start = IOM_END; mem_clusters[1].size = trunc_page(KBTOB(biosextmem)); physmem += atop(mem_clusters[1].size); mem_cluster_cnt = 2; avail_end = IOM_END + trunc_page(KBTOB(biosextmem)); } /* * If we have 16M of RAM or less, just put it all on * the default free list. Otherwise, put the first * 16M of RAM on a lower priority free list (so that * all of the ISA DMA'able memory won't be eaten up * first-off). */ if (avail_end <= (16 * 1024 * 1024)) first16q = VM_FREELIST_DEFAULT; else first16q = VM_FREELIST_FIRST16; /* Make sure the end of the space used by the kernel is rounded. */ first_avail = round_page(first_avail); kern_end = KERNBASE + first_avail; /* * Now, load the memory clusters (which have already been * rounded and truncated) into the VM system. * * NOTE: WE ASSUME THAT MEMORY STARTS AT 0 AND THAT THE KERNEL * IS LOADED AT IOM_END (1M). */ for (x = 0; x < mem_cluster_cnt; x++) { seg_start = mem_clusters[x].start; seg_end = mem_clusters[x].start + mem_clusters[x].size; seg_start1 = 0; seg_end1 = 0; if (seg_start > 0xffffffffULL) { printf("skipping %lld bytes of memory above 4GB\n", seg_end - seg_start); continue; } if (seg_end > 0x100000000ULL) { printf("skipping %lld bytes of memory above 4GB\n", seg_end - 0x100000000ULL); seg_end = 0x100000000ULL; } /* * Skip memory before our available starting point. */ if (seg_end <= avail_start) continue; if (avail_start >= seg_start && avail_start < seg_end) { if (seg_start != 0) panic("init_x86_64: memory doesn't start at 0"); seg_start = avail_start; if (seg_start == seg_end) continue; } /* * If this segment contains the kernel, split it * in two, around the kernel. */ if (seg_start <= IOM_END && first_avail <= seg_end) { seg_start1 = first_avail; seg_end1 = seg_end; seg_end = IOM_END; } /* First hunk */ if (seg_start != seg_end) { if (seg_start <= (16 * 1024 * 1024) && first16q != VM_FREELIST_DEFAULT) { u_int64_t tmp; if (seg_end > (16 * 1024 * 1024)) tmp = (16 * 1024 * 1024); else tmp = seg_end; #if DEBUG_MEMLOAD printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n", (unsigned long long)seg_start, (unsigned long long)tmp, atop(seg_start), atop(tmp)); #endif uvm_page_physload(atop(seg_start), atop(tmp), atop(seg_start), atop(tmp), first16q); seg_start = tmp; } if (seg_start != seg_end) { #if DEBUG_MEMLOAD printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n", (unsigned long long)seg_start, (unsigned long long)seg_end, atop(seg_start), atop(seg_end)); #endif uvm_page_physload(atop(seg_start), atop(seg_end), atop(seg_start), atop(seg_end), VM_FREELIST_DEFAULT); } } /* Second hunk */ if (seg_start1 != seg_end1) { if (seg_start1 <= (16 * 1024 * 1024) && first16q != VM_FREELIST_DEFAULT) { u_int64_t tmp; if (seg_end1 > (16 * 1024 * 1024)) tmp = (16 * 1024 * 1024); else tmp = seg_end1; #if DEBUG_MEMLOAD printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n", (unsigned long long)seg_start1, (unsigned long long)tmp, atop(seg_start1), atop(tmp)); #endif uvm_page_physload(atop(seg_start1), atop(tmp), atop(seg_start1), atop(tmp), first16q); seg_start1 = tmp; } if (seg_start1 != seg_end1) { #if DEBUG_MEMLOAD printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n", (unsigned long long)seg_start1, (unsigned long long)seg_end1, atop(seg_start1), atop(seg_end1)); #endif uvm_page_physload(atop(seg_start1), atop(seg_end1), atop(seg_start1), atop(seg_end1), VM_FREELIST_DEFAULT); } } } /* * Steal memory for the message buffer (at end of core). */ { struct vm_physseg *vps = NULL; psize_t sz = round_page(MSGBUFSIZE); psize_t reqsz = sz; for (x = 0; x < vm_nphysseg; x++) { vps = &vm_physmem[x]; if (ptoa(vps->avail_end) == avail_end) break; } if (x == vm_nphysseg) panic("init_x86_64: can't find end of memory"); /* Shrink so it'll fit in the last segment. */ if ((vps->avail_end - vps->avail_start) < atop(sz)) sz = ptoa(vps->avail_end - vps->avail_start); vps->avail_end -= atop(sz); vps->end -= atop(sz); msgbuf_paddr = ptoa(vps->avail_end); /* Remove the last segment if it now has no pages. */ if (vps->start == vps->end) { for (vm_nphysseg--; x < vm_nphysseg; x++) vm_physmem[x] = vm_physmem[x + 1]; } /* Now find where the new avail_end is. */ for (avail_end = 0, x = 0; x < vm_nphysseg; x++) if (vm_physmem[x].avail_end > avail_end) avail_end = vm_physmem[x].avail_end; avail_end = ptoa(avail_end); /* Warn if the message buffer had to be shrunk. */ if (sz != reqsz) printf("WARNING: %ld bytes not available for msgbuf " "in last cluster (%ld used)\n", reqsz, sz); } /* * XXXfvdl todo: acpi wakeup code. */ pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 32 * 1024 * 1024); pmap_kenter_pa(idt_vaddr, idt_paddr, VM_PROT_READ|VM_PROT_WRITE); pmap_kenter_pa(idt_vaddr + PAGE_SIZE, idt_paddr + PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE); pmap_kenter_pa(lo32_vaddr, lo32_paddr, VM_PROT_READ|VM_PROT_WRITE); idt = (struct gate_descriptor *)idt_vaddr; gdtstore = (char *)(idt + NIDT); ldtstore = gdtstore + DYNSEL_START; /* make gdt gates and memory segments */ set_mem_segment(GDT_ADDR_MEM(gdtstore, GCODE_SEL), 0, 0xfffff, SDT_MEMERA, SEL_KPL, 1, 0, 1); set_mem_segment(GDT_ADDR_MEM(gdtstore, GDATA_SEL), 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 0, 1); set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore, LDT_SIZE - 1, SDT_SYSLDT, SEL_KPL, 0); set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE_SEL), 0, atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 0, 1); set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA_SEL), 0, atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 0, 1); /* make ldt gates and memory segments */ setgate((struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL), &IDTVEC(oosyscall), 0, SDT_SYS386CGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); *(struct mem_segment_descriptor *)(ldtstore + LUCODE_SEL) = *GDT_ADDR_MEM(gdtstore, GUCODE_SEL); *(struct mem_segment_descriptor *)(ldtstore + LUDATA_SEL) = *GDT_ADDR_MEM(gdtstore, GUDATA_SEL); /* * 32 bit GDT entries. */ set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE32_SEL), 0, atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 1, 0); set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA32_SEL), 0, atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0); /* * 32 bit LDT entries. */ ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUCODE32_SEL); set_mem_segment(ldt_segp, 0, atop(VM_MAXUSER_ADDRESS32) - 1, SDT_MEMERA, SEL_UPL, 1, 1, 0); ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUDATA32_SEL); set_mem_segment(ldt_segp, 0, atop(VM_MAXUSER_ADDRESS32) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0); /* * Other entries. */ memcpy((struct gate_descriptor *)(ldtstore + LSOL26CALLS_SEL), (struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL), sizeof (struct gate_descriptor)); memcpy((struct gate_descriptor *)(ldtstore + LBSDICALLS_SEL), (struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL), sizeof (struct gate_descriptor)); /* exceptions */ for (x = 0; x < 32; x++) { ist = (x == 8) ? 1 : 0; setgate(&idt[x], IDTVEC(exceptions)[x], ist, SDT_SYS386IGT, (x == 3 || x == 4) ? SEL_UPL : SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); idt_allocmap[x] = 1; } /* new-style interrupt gate for syscalls */ setgate(&idt[128], &IDTVEC(osyscall), 0, SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); idt_allocmap[128] = 1; setregion(®ion, gdtstore, DYNSEL_START - 1); lgdt(®ion); cpu_init_idt(); #ifdef DDB db_machine_init(); ddb_init(); if (boothowto & RB_KDB) Debugger(); #endif #ifdef KGDB kgdb_port_init(); if (boothowto & RB_KDB) { kgdb_debug_init = 1; kgdb_connect(1); } #endif intr_default_setup(); softintr_init(); splraise(IPL_IPI); enable_intr(); /* Make sure maxproc is sane */ if (maxproc > cpu_maxproc()) maxproc = cpu_maxproc(); }