void pm_init(void) { descriptor_t *gdt_p = (descriptor_t *) gdtr.base; ptr_16_32_t idtr; /* * Update addresses in GDT and IDT to their virtual counterparts. */ idtr.limit = sizeof(idt); idtr.base = (uintptr_t) idt; gdtr_load(&gdtr); idtr_load(&idtr); /* * Each CPU has its private GDT and TSS. * All CPUs share one IDT. */ if (config.cpu_active == 1) { idt_init(); /* * NOTE: bootstrap CPU has statically allocated TSS, because * the heap hasn't been initialized so far. */ tss_p = &tss0; } else { tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC); if (!tss_p) panic("Cannot allocate TSS."); } tss_initialize(tss_p); gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL; gdt_p[TSS_DES].special = 1; gdt_p[TSS_DES].granularity = 0; gdt_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p); gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1); /* * As of this moment, the current CPU has its own GDT pointing * to its own TSS. We just need to load the TR register. */ tr_load(GDT_SELECTOR(TSS_DES)); /* Disable I/O on nonprivileged levels and clear NT flag. */ write_eflags(read_eflags() & ~(EFLAGS_IOPL | EFLAGS_NT)); /* Disable alignment check */ write_cr0(read_cr0() & ~CR0_AM); }
/** Install I/O Permission bitmap. * * Current task's I/O permission bitmap, if any, is installed * in the current CPU's TSS. * * Interrupts must be disabled prior this call. * */ void io_perm_bitmap_install(void) { /* First, copy the I/O Permission Bitmap. */ irq_spinlock_lock(&TASK->lock, false); size_t ver = TASK->arch.iomapver; size_t bits = TASK->arch.iomap.bits; if (bits) { ASSERT(TASK->arch.iomap.map); bitmap_t iomap; bitmap_initialize(&iomap, CPU->arch.tss->iomap, TSS_IOMAP_SIZE * 8); bitmap_copy(&iomap, &TASK->arch.iomap, bits); /* * Set the trailing bits in the last byte of the map to disable * I/O access. */ bitmap_set_range(&iomap, bits, ALIGN_UP(bits, 8) - bits); /* * It is safe to set the trailing eight bits because of the * extra convenience byte in TSS_IOMAP_SIZE. */ bitmap_set_range(&iomap, ALIGN_UP(bits, 8), 8); } irq_spinlock_unlock(&TASK->lock, false); /* * Second, adjust TSS segment limit. * Take the extra ending byte with all bits set into account. */ ptr_16_32_t cpugdtr; gdtr_store(&cpugdtr); descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + BITS2BYTES(bits)); gdtr_load(&cpugdtr); /* * Before we load new TSS limit, the current TSS descriptor * type must be changed to describe inactive TSS. */ gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL; tr_load(GDT_SELECTOR(TSS_DES)); /* * Update the generation count so that faults caused by * early accesses can be serviced. */ CPU->arch.iomapver_copy = ver; }