void cpu_set_tss_gates(struct cpu_info *ci) { struct segment_descriptor sd; ci->ci_doubleflt_stack = (char *)uvm_km_alloc(kernel_map, USPACE); cpu_init_tss(&ci->ci_doubleflt_tss, ci->ci_doubleflt_stack, IDTVEC(tss_trap08)); setsegment(&sd, &ci->ci_doubleflt_tss, sizeof(struct i386tss) - 1, SDT_SYS386TSS, SEL_KPL, 0, 0); ci->ci_gdt[GTRAPTSS_SEL].sd = sd; setgate(&idt[8], NULL, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GTRAPTSS_SEL, SEL_KPL)); #if defined(DDB) && defined(MULTIPROCESSOR) /* * Set up separate handler for the DDB IPI, so that it doesn't * stomp on a possibly corrupted stack. * * XXX overwriting the gate set in db_machine_init. * Should rearrange the code so that it's set only once. */ ci->ci_ddbipi_stack = (char *)uvm_km_alloc(kernel_map, USPACE); cpu_init_tss(&ci->ci_ddbipi_tss, ci->ci_ddbipi_stack, Xintrddbipi); setsegment(&sd, &ci->ci_ddbipi_tss, sizeof(struct i386tss) - 1, SDT_SYS386TSS, SEL_KPL, 0, 0); ci->ci_gdt[GIPITSS_SEL].sd = sd; setgate(&idt[ddb_vec], NULL, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GIPITSS_SEL, SEL_KPL)); #endif }
/* * Allocate shadow GDT for a slave CPU. */ void gdt_alloc_cpu(struct cpu_info *ci) { int max_len = MAXGDTSIZ * sizeof(gdt[0]); int min_len = MINGDTSIZ * sizeof(gdt[0]); struct vm_page *pg; vaddr_t va; ci->ci_gdt = (union descriptor *)uvm_km_alloc(kernel_map, max_len, 0, UVM_KMF_VAONLY); for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + min_len; va += PAGE_SIZE) { while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) == NULL) { uvm_wait("gdt_alloc_cpu"); } pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ | VM_PROT_WRITE); } pmap_update(pmap_kernel()); memset(ci->ci_gdt, 0, min_len); memcpy(ci->ci_gdt, gdt, gdt_count[0] * sizeof(gdt[0])); setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 1); }
/* * Initialize the GDT subsystem. Called from autoconf(). */ void gdt_init(void) { struct vm_page *pg; vaddr_t va; struct cpu_info *ci = &cpu_info_primary; gdt_next = NGDT; gdt_free = GNULL_SEL; gdt = (union descriptor *)uvm_km_valloc(kernel_map, MAXGDTSIZ); for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + MAXGDTSIZ; va += PAGE_SIZE) { pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); if (pg == NULL) panic("gdt_init: no pages"); pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE); } bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor)); ci->ci_gdt = gdt; setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1, SDT_MEMRWA, SEL_KPL, 0, 0); gdt_init_cpu(ci); }
/* * Initialize the GDT subsystem. Called from autoconf(). */ void gdt_init() { size_t max_len, min_len; struct vm_page *pg; vaddr_t va; struct cpu_info *ci = &cpu_info_primary; simple_lock_init(&gdt_simplelock); lockinit(&gdt_lock_store, PZERO, "gdtlck", 0, 0); max_len = MAXGDTSIZ * sizeof(union descriptor); min_len = MINGDTSIZ * sizeof(union descriptor); gdt_size = MINGDTSIZ; gdt_count = NGDT; gdt_next = NGDT; gdt_free = GNULL_SEL; gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len); for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) { pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); if (pg == NULL) panic("gdt_init: no pages"); pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ | VM_PROT_WRITE); } bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor)); ci->ci_gdt = gdt; setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1, SDT_MEMRWA, SEL_KPL, 0, 0); gdt_init_cpu(ci); }
/* * push_gdt * * Allocates and populates a page in the guest phys memory space to hold * the boot-time GDT. Since vmd(8) is acting as the bootloader, we need to * create the same GDT that a real bootloader would have created. * This is loaded into the guest phys RAM space at address GDT_PAGE. */ static void push_gdt(void) { uint8_t gdtpage[PAGE_SIZE]; struct mem_segment_descriptor *sd; memset(&gdtpage, 0, sizeof(gdtpage)); sd = (struct mem_segment_descriptor *)&gdtpage; /* * Create three segment descriptors: * * GDT[0] : null desriptor. "Created" via memset above. * GDT[1] (selector @ 0x8): Executable segment, for CS * GDT[2] (selector @ 0x10): RW Data segment, for DS/ES/SS */ setsegment(&sd[1], 0, 0xffffffff, SDT_MEMERA, SEL_KPL, 1, 1); setsegment(&sd[2], 0, 0xffffffff, SDT_MEMRWA, SEL_KPL, 1, 1); write_mem(GDT_PAGE, gdtpage, PAGE_SIZE); }
/* XXX needs spinlocking if we ever mean to go finegrained. */ void setgdt(int sel, void *base, size_t limit, int type, int dpl, int def32, int gran) { struct segment_descriptor *sd = &gdt[sel].sd; CPU_INFO_ITERATOR cii; struct cpu_info *ci; setsegment(sd, base, limit, type, dpl, def32, gran); CPU_INFO_FOREACH(cii, ci) if (ci->ci_gdt != NULL && ci->ci_gdt != gdt) ci->ci_gdt[sel].sd = *sd; }
void kvm86_init() { size_t vmdsize; char *buf; struct kvm86_data *vmd; struct pcb *pcb; paddr_t pa; int i; vmdsize = round_page(sizeof(struct kvm86_data)) + PAGE_SIZE; if ((buf = km_alloc(vmdsize, &kv_any, &kp_zero, &kd_waitok)) == NULL) return; /* first page is stack */ vmd = (struct kvm86_data *)(buf + PAGE_SIZE); pcb = &vmd->pcb; /* * derive pcb and TSS from proc0 * we want to access all IO ports, so we need a full-size * permission bitmap * XXX do we really need the pcb or just the TSS? */ memcpy(pcb, &proc0.p_addr->u_pcb, sizeof(struct pcb)); pcb->pcb_tss.tss_esp0 = (int)vmd; pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); for (i = 0; i < sizeof(vmd->iomap) / 4; i++) vmd->iomap[i] = 0; pcb->pcb_tss.tss_ioopt = ((caddr_t)vmd->iomap - (caddr_t)&pcb->pcb_tss) << 16; /* setup TSS descriptor (including our iomap) */ setsegment(&vmd->sd, &pcb->pcb_tss, sizeof(struct pcb) + sizeof(vmd->iomap) - 1, SDT_SYS386TSS, SEL_KPL, 0, 0); /* prepare VM for BIOS calls */ kvm86_mapbios(vmd); if ((bioscallscratchpage = km_alloc(PAGE_SIZE, &kv_any, &kp_dirty, &kd_waitok)) == NULL) return; pmap_extract(pmap_kernel(), (vaddr_t)bioscallscratchpage, &pa); kvm86_map(vmd, pa, BIOSCALLSCRATCHPAGE_VMVA); bioscallvmd = vmd; bioscalltmpva = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok); mtx_init(&kvm86_mp_mutex, IPL_IPI); }
/* * Allocate shadow GDT for a slave cpu. */ void gdt_alloc_cpu(struct cpu_info *ci) { int max_len = MAXGDTSIZ * sizeof(union descriptor); int min_len = MINGDTSIZ * sizeof(union descriptor); ci->ci_gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len); uvm_map_pageable(kernel_map, (vaddr_t)ci->ci_gdt, (vaddr_t)ci->ci_gdt + min_len, FALSE, FALSE); bzero(ci->ci_gdt, min_len); bcopy(gdt, ci->ci_gdt, gdt_count * sizeof(union descriptor)); setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1, SDT_MEMRWA, SEL_KPL, 0, 0); }
/* XXX needs spinlocking if we ever mean to go finegrained. */ void setgdt(int sel, void *base, size_t limit, int type, int dpl, int def32, int gran) { struct segment_descriptor *sd = &gdt[sel].sd; CPU_INFO_ITERATOR cii; struct cpu_info *ci; if (type == SDT_SYS386TSS) panic("TSS descriptor not supported in GDT"); setsegment(sd, base, limit, type, dpl, def32, gran); CPU_INFO_FOREACH(cii, ci) if (ci->ci_gdt != NULL && ci->ci_gdt != gdt) xen_update_descriptor(&ci->ci_gdt[sel], (union descriptor *)sd); }
/* * Initialize the GDT subsystem. Called from autoconf(). */ void gdt_init() { size_t max_len, min_len; union descriptor *old_gdt; struct vm_page *pg; vaddr_t va; struct cpu_info *ci = &cpu_info_primary; mutex_init(&gdt_lock_store, MUTEX_DEFAULT, IPL_NONE); max_len = MAXGDTSIZ * sizeof(gdt[0]); min_len = MINGDTSIZ * sizeof(gdt[0]); gdt_size[0] = MINGDTSIZ; gdt_count[0] = NGDT; gdt_next[0] = NGDT; gdt_free[0] = GNULL_SEL; #ifdef XEN max_len = max_len * 2; gdt_size[1] = 0; gdt_count[1] = MAXGDTSIZ; gdt_next[1] = MAXGDTSIZ; gdt_free[1] = GNULL_SEL; #endif old_gdt = gdt; gdt = (union descriptor *)uvm_km_alloc(kernel_map, max_len, 0, UVM_KMF_VAONLY); for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) { pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); if (pg == NULL) { panic("gdt_init: no pages"); } pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ | VM_PROT_WRITE); } pmap_update(pmap_kernel()); memcpy(gdt, old_gdt, NGDT * sizeof(gdt[0])); ci->ci_gdt = gdt; setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 1); gdt_init_cpu(ci); }
void setgdt(int sel, const void *base, size_t limit, int type, int dpl, int def32, int gran) { struct segment_descriptor *sd = &gdt[sel].sd; CPU_INFO_ITERATOR cii; struct cpu_info *ci; #ifdef XEN if (type == SDT_SYS386TSS) { /* printk("XXX TSS descriptor not supported in GDT\n"); */ return; } #endif setsegment(sd, base, limit, type, dpl, def32, gran); for (CPU_INFO_FOREACH(cii, ci)) { if (ci->ci_gdt != NULL) update_descriptor(&ci->ci_gdt[sel], (union descriptor *)sd); } }
/* * Allocate shadow GDT for a slave cpu. */ void gdt_alloc_cpu(struct cpu_info *ci) { struct vm_page *pg; vaddr_t va; ci->ci_gdt = (union descriptor *)uvm_km_valloc(kernel_map, MAXGDTSIZ); uvm_map_pageable(kernel_map, (vaddr_t)ci->ci_gdt, (vaddr_t)ci->ci_gdt + MAXGDTSIZ, FALSE, FALSE); for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + MAXGDTSIZ; va += PAGE_SIZE) { pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); if (pg == NULL) panic("gdt_init: no pages"); pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE); } bzero(ci->ci_gdt, MAXGDTSIZ); bcopy(gdt, ci->ci_gdt, MAXGDTSIZ); setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1, SDT_MEMRWA, SEL_KPL, 0, 0); }
int i386_set_threadbase(struct proc *p, uint32_t base, int which) { struct segment_descriptor *sdp; /* * We can't place a limit on the segment used by the library * thread register (%gs) because the ELF ABI for i386 places * data structures both before and after base pointer, using * negative offsets for some bits (the static (load-time) * TLS slots) and non-negative for others (the TCB block, * including the pointer to the TLS dynamic thread vector). * Protection must be provided by the paging subsystem. */ sdp = &p->p_addr->u_pcb.pcb_threadsegs[which]; setsegment(sdp, (void *)base, 0xfffff, SDT_MEMRWA, SEL_UPL, 1, 1); if (p == curproc) { curcpu()->ci_gdt[which == TSEG_FS ? GUFS_SEL : GUGS_SEL].sd = *sdp; } return 0; }