t_status architecture_environment_server(i_as id) { i_segment segment; i_region region; o_as* as; at_pd pd; o_region* r; o_segment* s; /* * 1) */ if (as_get(id, &as) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the address space object"); /* * 2) */ if (segment_reserve(as->id, ___kaneton$pagesz, PERMISSION_READ | PERMISSION_WRITE, SEGMENT_OPTION_SYSTEM, &segment) != STATUS_OK) MACHINE_ESCAPE("unable to reserve a segment"); if (segment_get(segment, &s) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the segment object"); /* * 3) */ as->machine.pd = s->address; /* * 4) */ if (architecture_pd_map(as->machine.pd, &pd) != STATUS_OK) MACHINE_ESCAPE("unable to map the page directory"); if (architecture_pd_build(pd) != STATUS_OK) MACHINE_ESCAPE("unable to build the page directory"); if (architecture_pd_unmap(pd) != STATUS_OK) MACHINE_ESCAPE("unable to unmap the page directory"); /* * 5) */ if (region_locate(_kernel.as, _thread.machine.tss, ®ion) == FALSE) MACHINE_ESCAPE("unable to locate the region in which the TSS lies"); if (region_get(_kernel.as, region, &r) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the region object"); if (region_reserve(as->id, r->segment, 0x0, REGION_OPTION_FORCE | REGION_OPTION_NONE, _thread.machine.tss, r->size, ®ion) != STATUS_OK) MACHINE_ESCAPE("unable to reserve the region mapping the TSS"); /* * 6) */ if (region_locate(_kernel.as, (t_vaddr)_segment.machine.gdt.table, ®ion) == FALSE) MACHINE_ESCAPE("unable to locate the region in which the GDT lies"); if (region_get(_kernel.as, region, &r) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the region object"); if (region_reserve(as->id, r->segment, 0x0, REGION_OPTION_FORCE | REGION_OPTION_NONE, (t_vaddr)_segment.machine.gdt.table, ___kaneton$pagesz, ®ion) != STATUS_OK) MACHINE_ESCAPE("unable to reserve the region mapping the GDT"); /* * 7) */ if (region_locate(_kernel.as, (t_vaddr)_event.machine.idt.table, ®ion) == FALSE) MACHINE_ESCAPE("unable to locate the region in which the IDT lies"); if (region_get(_kernel.as, region, &r) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the region object"); if (region_reserve(as->id, r->segment, 0x0, REGION_OPTION_FORCE | REGION_OPTION_NONE, (t_vaddr)_event.machine.idt.table, ___kaneton$pagesz, ®ion) != STATUS_OK) MACHINE_ESCAPE("unable to reserve the region mapping the IDT"); /* * 8) */ if (region_reserve(as->id, _init->kcode, ARCHITECTURE_LINKER_SYMBOL(_handler_code_begin) - _init->kcode, REGION_OPTION_FORCE, ARCHITECTURE_LINKER_SYMBOL(_handler_code_begin), ARCHITECTURE_LINKER_SYMBOL(_handler_code_end) - ARCHITECTURE_LINKER_SYMBOL(_handler_code_begin), ®ion) != STATUS_OK) MACHINE_ESCAPE("unable to map the handler code section"); /* * 9) */ if (region_reserve(as->id, _init->kcode, ARCHITECTURE_LINKER_SYMBOL(_handler_data_begin) - _init->kcode, REGION_OPTION_FORCE, ARCHITECTURE_LINKER_SYMBOL(_handler_data_begin), ARCHITECTURE_LINKER_SYMBOL(_handler_data_end) - ARCHITECTURE_LINKER_SYMBOL(_handler_data_begin), ®ion) != STATUS_OK) MACHINE_ESCAPE("unable to map the handler data section"); MACHINE_LEAVE(); }
t_error ia32_region_reserve(i_as asid, i_segment segid, t_paddr offset, t_opts opts, t_vaddr address, t_vsize size, i_region* regid) { t_ia32_pde pde_start; t_ia32_pde pde_end; t_ia32_pte pte_start; t_ia32_pte pte_end; t_ia32_table table; t_ia32_directory pd; o_as* oas; t_ia32_page page; t_paddr base; t_paddr ram_paddr; t_vaddr pd_addr; t_paddr pt_addr; o_segment* oseg; o_region* oreg; int i = 0; int j = 0; int x = 0; int clear = 0; REGION_ENTER(region); if (as_get(asid, &oas) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); if (region_get(asid, *regid, &oreg) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); if (segment_get(segid, &oseg) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); ram_paddr = oseg->address; pd = oas->machdep.pd; base = MK_BASE(pd); // Mapping PD into Kernel map_page(base, &pd_addr); /* printf("pd %x\n", pd_addr); */ /* printf("%x\n", oreg->address); */ pde_start = PDE_ENTRY(oreg->address); pde_end = PDE_ENTRY(oreg->address + size); pte_start = PTE_ENTRY(oreg->address); pte_end = PTE_ENTRY(oreg->address + size); for (i = pde_start; i <= pde_end; i++) { if (pd_get_table((t_ia32_directory *) &pd_addr, i, &table) == ERROR_UNKNOWN) { segment_reserve(asid, PAGESZ, PERM_READ | PERM_WRITE, &segid); if (segment_get(segid, &oseg) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); table.rw = PDE_FLAG_RW; table.present = 1; table.user = (opts & REGION_OPT_USER) ? PT_USER : PT_PRIVILEGED; table.writeback = PT_CACHED; table.cached = 1; pt_build(oseg->address, &table, 0); pd_add_table((t_ia32_directory *) &pd_addr, i, table); clear = 1; } else clear = 0; map_page(table.entries, &pt_addr); table.entries = pt_addr; if (clear) memset((void*)pt_addr, '\0', PAGESZ); for (j = (i == pde_start ? pte_start : 0); j <= (i == pde_end ? pte_end : 1023); j++) { page.addr = x + (offset + ram_paddr); page.present = 1; page.rw = (oseg->perms & PERM_WRITE) ? PG_WRITABLE : PG_READONLY; page.present = 1; page.user = (opts & REGION_OPT_USER) ? PG_USER : PG_PRIVILEGED; page.cached = PG_CACHED; pt_add_page(&table, j, page); x += PAGESZ; } unmap_page(&pt_addr); } tlb_flush(); unmap_page(&pd_addr); REGION_LEAVE(region, ERROR_NONE); }
t_error ia32_region_release(i_as asid, i_region regid) { REGION_ENTER(region); t_ia32_pde pde_start; t_ia32_pde pde_end; t_ia32_pte pte_start; t_ia32_pte pte_end; t_ia32_table table; t_ia32_directory pd; o_as* oas; t_vsize size; t_paddr base; t_vaddr pd_addr; t_paddr pt_addr; o_region* oreg; int i = 0; int j = 0; t_ia32_pte* t; REGION_ENTER(region); if (as_get(asid, &oas) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); if (region_get(asid, regid, &oreg) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); size = oreg->size; pd = oas->machdep.pd; base = MK_BASE(pd); // Mapping PD into Kernel map_page(base, &pd_addr); /* printf("pd %x\n", pd_addr); */ /* printf("%x\n", oreg->address); */ pde_start = PDE_ENTRY(oreg->address); pde_end = PDE_ENTRY(oreg->address + size); pte_start = PTE_ENTRY(oreg->address); pte_end = PTE_ENTRY(oreg->address + size); for (i = pde_start; i <= pde_end; i++) { if (pd_get_table((t_ia32_directory *) &pd_addr, i, &table) != ERROR_UNKNOWN) { map_page(table.entries, &pt_addr); table.entries = pt_addr; } for (j = (i == pde_start ? pte_start : 0); j <= (i == pde_end ? pte_end : 1023); j++) { t = (t_ia32_pte*)table.entries; if (t[j] != 0) if (pt_delete_page(&table, j) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); } unmap_page(&pt_addr); } tlb_flush(); unmap_page(&pd_addr); REGION_LEAVE(region, ERROR_NONE); }
t_status architecture_handler_setup(void) { t_uint16 selector; i_segment segment; i_region region; as_idt idt; o_region* o; t_uint32 i; /* * 1) */ if (segment_reserve(_kernel.as, ___kaneton$pagesz, PERMISSION_READ | PERMISSION_WRITE, SEGMENT_OPTION_SYSTEM, &segment) != STATUS_OK) MACHINE_ESCAPE("unable to reserve a segment"); if (region_reserve(_kernel.as, segment, 0x0, REGION_OPTION_NONE, 0x0, ___kaneton$pagesz, ®ion) != STATUS_OK) MACHINE_ESCAPE("unable to reserve a region for the segment"); if (region_get(_kernel.as, region, &o) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the region object"); /* * 2) */ if (architecture_idt_build(o->address, ARCHITECTURE_IDT_SIZE, &idt) != STATUS_OK) MACHINE_ESCAPE("unable to build the IDT at the given address"); if (architecture_idt_import(&idt) != STATUS_OK) MACHINE_ESCAPE("unable to import the built IDT"); /* * 3) */ if (architecture_gdt_selector(ARCHITECTURE_GDT_INDEX_KERNEL_CODE, ARCHITECTURE_PRIVILEGE_KERNEL, &selector) != STATUS_OK) MACHINE_ESCAPE("unable to build the kernel code segment selector"); /* * 4) */ for (i = ARCHITECTURE_IDT_EXCEPTION_BASE; i < ARCHITECTURE_IDT_EXCEPTION_BASE + ARCHITECTURE_IDT_EXCEPTION_SIZE; i++) { if (architecture_idt_insert(i, (t_vaddr)_architecture_handler_shells[i], selector, ARCHITECTURE_IDTE_DPL_SET( ARCHITECTURE_PRIVILEGE_RING0) | ARCHITECTURE_IDTE_32BIT | ARCHITECTURE_IDTE_INTERRUPT) != STATUS_OK) MACHINE_ESCAPE("unable to register the exception handler '%u'", i); } /* * 5) */ for (i = ARCHITECTURE_IDT_IRQ_BASE; i < ARCHITECTURE_IDT_IRQ_BASE + ARCHITECTURE_IDT_IRQ_SIZE; i++) { if (architecture_idt_insert(i, (t_vaddr)_architecture_handler_shells[i], selector, ARCHITECTURE_IDTE_DPL_SET( ARCHITECTURE_PRIVILEGE_RING0) | ARCHITECTURE_IDTE_32BIT | ARCHITECTURE_IDTE_INTERRUPT) != STATUS_OK) MACHINE_ESCAPE("unable to register the exception handler '%u'", i); } /* * 6) */ for (i = ARCHITECTURE_IDT_SYSCALL_BASE; i < ARCHITECTURE_IDT_SYSCALL_BASE + ARCHITECTURE_IDT_SYSCALL_SIZE; i++) { if (architecture_idt_insert(i, (t_vaddr)_architecture_handler_shells[i], selector, ARCHITECTURE_IDTE_DPL_SET( ARCHITECTURE_PRIVILEGE_RING3) | ARCHITECTURE_IDTE_32BIT | ARCHITECTURE_IDTE_INTERRUPT) != STATUS_OK) MACHINE_ESCAPE("unable to register the exception handler '%u'", i); } MACHINE_LEAVE(); }