/** Install I/O Permission bitmap. * * Current task's I/O permission bitmap, if any, is installed * in the current CPU's TSS. * * Interrupts must be disabled prior this call. * */ void io_perm_bitmap_install(void) { /* First, copy the I/O Permission Bitmap. */ irq_spinlock_lock(&TASK->lock, false); size_t ver = TASK->arch.iomapver; size_t elements = TASK->arch.iomap.elements; if (elements > 0) { ASSERT(TASK->arch.iomap.bits); bitmap_t iomap; bitmap_initialize(&iomap, TSS_IOMAP_SIZE * 8, CPU->arch.tss->iomap); bitmap_copy(&iomap, &TASK->arch.iomap, elements); /* * Set the trailing bits in the last byte of the map to disable * I/O access. */ bitmap_set_range(&iomap, elements, ALIGN_UP(elements, 8) - elements); /* * It is safe to set the trailing eight bits because of the * extra convenience byte in TSS_IOMAP_SIZE. */ bitmap_set_range(&iomap, ALIGN_UP(elements, 8), 8); } irq_spinlock_unlock(&TASK->lock, false); /* * Second, adjust TSS segment limit. * Take the extra ending byte with all bits set into account. */ ptr_16_64_t cpugdtr; gdtr_store(&cpugdtr); descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; size_t size = bitmap_size(elements); gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + size); gdtr_load(&cpugdtr); /* * Before we load new TSS limit, the current TSS descriptor * type must be changed to describe inactive TSS. */ tss_descriptor_t *tss_desc = (tss_descriptor_t *) &gdt_p[TSS_DES]; tss_desc->type = AR_TSS; tr_load(GDT_SELECTOR(TSS_DES)); /* * Update the generation count so that faults caused by * early accesses can be serviced. */ CPU->arch.iomapver_copy = ver; }
void pm_init(void) { descriptor_t *gdt_p = (descriptor_t *) gdtr.base; ptr_16_32_t idtr; /* * Update addresses in GDT and IDT to their virtual counterparts. */ idtr.limit = sizeof(idt); idtr.base = (uintptr_t) idt; gdtr_load(&gdtr); idtr_load(&idtr); /* * Each CPU has its private GDT and TSS. * All CPUs share one IDT. */ if (config.cpu_active == 1) { idt_init(); /* * NOTE: bootstrap CPU has statically allocated TSS, because * the heap hasn't been initialized so far. */ tss_p = &tss0; } else { tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC); if (!tss_p) panic("Cannot allocate TSS."); } tss_initialize(tss_p); gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL; gdt_p[TSS_DES].special = 1; gdt_p[TSS_DES].granularity = 0; gdt_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p); gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1); /* * As of this moment, the current CPU has its own GDT pointing * to its own TSS. We just need to load the TR register. */ tr_load(GDT_SELECTOR(TSS_DES)); /* Disable I/O on nonprivileged levels and clear NT flag. */ write_eflags(read_eflags() & ~(EFLAGS_IOPL | EFLAGS_NT)); /* Disable alignment check */ write_cr0(read_cr0() & ~CR0_AM); }
/** Initialize segmentation - code/data/idt tables * */ void pm_init(void) { descriptor_t *gdt_p = (descriptor_t *) gdtr.base; tss_descriptor_t *tss_desc; /* * Each CPU has its private GDT and TSS. * All CPUs share one IDT. */ if (config.cpu_active == 1) { idt_init(); /* * NOTE: bootstrap CPU has statically allocated TSS, because * the heap hasn't been initialized so far. */ tss_p = &tss; } else { /* We are going to use malloc, which may return * non boot-mapped pointer, initialize the CR3 register * ahead of page_init */ write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC); if (!tss_p) panic("Cannot allocate TSS."); } tss_initialize(tss_p); tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]); tss_desc->present = 1; tss_desc->type = AR_TSS; tss_desc->dpl = PL_KERNEL; gdt_tss_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p); gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1); gdtr_load(&gdtr); idtr_load(&idtr); /* * As of this moment, the current CPU has its own GDT pointing * to its own TSS. We just need to load the TR register. */ tr_load(GDT_SELECTOR(TSS_DES)); }