t_status glue_as_show(i_as id, mt_margin margin) { o_as* o; /* * 1) */ if (as_get(id, &o) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the address space object"); /* * 2) */ module_call(console, message, '#', MODULE_CONSOLE_MARGIN_FORMAT " machine: pd(0x%x)\n", MODULE_CONSOLE_MARGIN_VALUE(margin), o->machine.pd); MACHINE_LEAVE(); }
t_error ia32_thread_init(void) { THREAD_ENTER(thread); o_as* as; t_vaddr int_stack; /* * 1) */ if (as_get(kasid, &as) != ERROR_NONE) THREAD_LEAVE(thread, ERROR_UNKNOWN); /* * 2) */ if (map_reserve(kasid, MAP_OPT_PRIVILEGED, 3 * PAGESZ, PERM_READ | PERM_WRITE, (t_vaddr*)&thread->machdep.tss) != ERROR_NONE) THREAD_LEAVE(thread, ERROR_UNKNOWN); memset(thread->machdep.tss, 0x0, sizeof(t_ia32_tss)); /* * 3) */ if (map_reserve(kasid, MAP_OPT_PRIVILEGED, 2 * PAGESZ, PERM_READ | PERM_WRITE, &int_stack) != ERROR_NONE) THREAD_LEAVE(thread, ERROR_UNKNOWN); /* * 4) */ if (tss_load(thread->machdep.tss, SEGSEL(PMODE_GDT_CORE_DS, PRIV_RING0), int_stack + 2 * PAGESZ - 16, 0x68) != ERROR_NONE) THREAD_LEAVE(thread, ERROR_UNKNOWN); gl_stack_int = int_stack + 2 * PAGESZ - 16; /* * 5) */ if (tss_init(thread->machdep.tss) != ERROR_NONE) THREAD_LEAVE(thread, ERROR_UNKNOWN); THREAD_LEAVE(thread, ERROR_NONE); }
t_status interface_as_attribute_task(o_syscall* message) { o_as* o; if (as_get(message->u.request.u.as_attribute_task.arg1, &o) != STATUS_OK) { message->u.reply.error = STATUS_ERROR; } else { message->u.reply.error = STATUS_OK; message->u.reply.u.as_attribute_task.result1 = o->task; } return (STATUS_OK); }
t_error ia32_thread_reserve(i_task tsk, i_thread* th) { o_thread *oth; ao_thread_named *src; o_task* otsk; o_as* oas; if (thread_get(*th, &oth) != ERROR_NONE) THREAD_LEAVE(thread, ERROR_UNKNOWN); src = &(oth->machdep.named); if (task_get(oth->taskid, &otsk) != ERROR_NONE) TASK_LEAVE(task, ERROR_UNKNOWN); if (as_get(otsk->asid, &oas) != ERROR_NONE) AS_LEAVE(as, ERROR_UNKNOWN); src->cr3 = (t_uint32) oas->machdep.pd; return ERROR_NONE; }
t_error unmap_page (t_vaddr *vaddr) { o_as* kas; t_ia32_pde pde; t_ia32_pde pte; t_ia32_directory dir; t_ia32_table table; REGION_ENTER(region); if (as_get(kasid, &kas) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); // Page Dir dir = kas->machdep.pd; pde = PDE_ENTRY(*vaddr); if (pd_get_table(&dir, pde, &table) != ERROR_UNKNOWN) { table.entries = ENTRY_ADDR(PD_MIRROR, pde); } else REGION_LEAVE(region, ERROR_NONE); // Page Table pte = PTE_ENTRY(*vaddr); if (pt_delete_page(&table, pte) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); tlb_invalidate(*vaddr); /* if ((set_remove(kasid, (i_region) *vaddr) != ERROR_NONE)) */ /* REGION_LEAVE(region, ERROR_UNKNOWN); */ REGION_LEAVE(region, ERROR_NONE); }
t_status architecture_environment_server(i_as id) { i_segment segment; i_region region; o_as* as; at_pd pd; o_region* r; o_segment* s; /* * 1) */ if (as_get(id, &as) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the address space object"); /* * 2) */ if (segment_reserve(as->id, ___kaneton$pagesz, PERMISSION_READ | PERMISSION_WRITE, SEGMENT_OPTION_SYSTEM, &segment) != STATUS_OK) MACHINE_ESCAPE("unable to reserve a segment"); if (segment_get(segment, &s) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the segment object"); /* * 3) */ as->machine.pd = s->address; /* * 4) */ if (architecture_pd_map(as->machine.pd, &pd) != STATUS_OK) MACHINE_ESCAPE("unable to map the page directory"); if (architecture_pd_build(pd) != STATUS_OK) MACHINE_ESCAPE("unable to build the page directory"); if (architecture_pd_unmap(pd) != STATUS_OK) MACHINE_ESCAPE("unable to unmap the page directory"); /* * 5) */ if (region_locate(_kernel.as, _thread.machine.tss, ®ion) == FALSE) MACHINE_ESCAPE("unable to locate the region in which the TSS lies"); if (region_get(_kernel.as, region, &r) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the region object"); if (region_reserve(as->id, r->segment, 0x0, REGION_OPTION_FORCE | REGION_OPTION_NONE, _thread.machine.tss, r->size, ®ion) != STATUS_OK) MACHINE_ESCAPE("unable to reserve the region mapping the TSS"); /* * 6) */ if (region_locate(_kernel.as, (t_vaddr)_segment.machine.gdt.table, ®ion) == FALSE) MACHINE_ESCAPE("unable to locate the region in which the GDT lies"); if (region_get(_kernel.as, region, &r) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the region object"); if (region_reserve(as->id, r->segment, 0x0, REGION_OPTION_FORCE | REGION_OPTION_NONE, (t_vaddr)_segment.machine.gdt.table, ___kaneton$pagesz, ®ion) != STATUS_OK) MACHINE_ESCAPE("unable to reserve the region mapping the GDT"); /* * 7) */ if (region_locate(_kernel.as, (t_vaddr)_event.machine.idt.table, ®ion) == FALSE) MACHINE_ESCAPE("unable to locate the region in which the IDT lies"); if (region_get(_kernel.as, region, &r) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the region object"); if (region_reserve(as->id, r->segment, 0x0, REGION_OPTION_FORCE | REGION_OPTION_NONE, (t_vaddr)_event.machine.idt.table, ___kaneton$pagesz, ®ion) != STATUS_OK) MACHINE_ESCAPE("unable to reserve the region mapping the IDT"); /* * 8) */ if (region_reserve(as->id, _init->kcode, ARCHITECTURE_LINKER_SYMBOL(_handler_code_begin) - _init->kcode, REGION_OPTION_FORCE, ARCHITECTURE_LINKER_SYMBOL(_handler_code_begin), ARCHITECTURE_LINKER_SYMBOL(_handler_code_end) - ARCHITECTURE_LINKER_SYMBOL(_handler_code_begin), ®ion) != STATUS_OK) MACHINE_ESCAPE("unable to map the handler code section"); /* * 9) */ if (region_reserve(as->id, _init->kcode, ARCHITECTURE_LINKER_SYMBOL(_handler_data_begin) - _init->kcode, REGION_OPTION_FORCE, ARCHITECTURE_LINKER_SYMBOL(_handler_data_begin), ARCHITECTURE_LINKER_SYMBOL(_handler_data_end) - ARCHITECTURE_LINKER_SYMBOL(_handler_data_begin), ®ion) != STATUS_OK) MACHINE_ESCAPE("unable to map the handler data section"); MACHINE_LEAVE(); }
t_status architecture_environment_kernel(i_as id) { struct { at_pdei start; at_pdei end; at_pdei index; } pde; struct { at_ptei start; at_ptei end; at_ptei index; } pte; i_region useless; at_cr3 pdbr; o_as* as; at_pd pd; at_pt pt; o_region* r; t_uint32 i; /* * 1) */ if (as_get(id, &as) != STATUS_OK) MACHINE_ESCAPE("unable to retrieve the address space object"); /* * 2) */ as->machine.pd = _init->machine.pd; /* * 3) */ if (architecture_paging_pdbr(as->machine.pd, ARCHITECTURE_REGISTER_CR3_PCE | ARCHITECTURE_REGISTER_CR3_PWB, &pdbr) != STATUS_OK) MACHINE_ESCAPE("unable to build the CR3 register's content"); /* * 4) */ pd = (at_pd)as->machine.pd; /* * 5) */ if (architecture_paging_import(pd, pdbr) != STATUS_OK) MACHINE_ESCAPE("unable to import the kernel page directory"); /* * 6) */ if (architecture_pd_insert(pd, ARCHITECTURE_PD_MIRROR, as->machine.pd, ARCHITECTURE_PDE_PRESENT | ARCHITECTURE_PDE_RW | ARCHITECTURE_PDE_SUPERVISOR | ARCHITECTURE_PDE_PWB | ARCHITECTURE_PDE_PCE) != STATUS_OK) MACHINE_ESCAPE("unable to insert the mirroring directory entry"); /* * 7) */ if ((r = malloc(sizeof (o_region))) == NULL) MACHINE_ESCAPE("unable to allocate memory for the region object"); r->address = ARCHITECTURE_PAGING_ADDRESS(ARCHITECTURE_PD_MIRROR, 0); r->segment = ID_UNUSED; r->offset = 0x0; r->size = ARCHITECTURE_PT_SIZE * ___kaneton$pagesz; r->options = REGION_OPTION_NONE; if (region_inject(as->id, r, &useless) != STATUS_OK) MACHINE_ESCAPE("unable to inject the mirroring region"); /* * 8) */ pde.start = 0; pte.start = 0; for (i = 0; i < (_init->nregions + 1); i++) { /* * a) */ if (i != _init->nregions) { pde.end = ARCHITECTURE_PD_INDEX(_init->regions[i].address); pte.end = ARCHITECTURE_PT_INDEX(_init->regions[i].address); } else { pde.end = ARCHITECTURE_PD_SIZE - 1; pte.end = ARCHITECTURE_PT_SIZE; } /* * b) */ for (pde.index = pde.start; pde.index <= pde.end; pde.index++) { /* * i) */ if ((pde.index != ARCHITECTURE_PD_MIRROR) && (pd[pde.index] & ARCHITECTURE_PDE_PRESENT)) { /* * #1) */ pt = (at_pt)ARCHITECTURE_PDE_ADDRESS(pd[pde.index]); /* * #2) */ for (pte.index = (pde.index == pde.start ? pte.start : 0); pte.index < (pde.index == pde.end ? pte.end : ARCHITECTURE_PT_SIZE); pte.index++) { /* * #a) */ if (pt[pte.index] & ARCHITECTURE_PTE_PRESENT) { if (architecture_pt_delete(pt, pte.index) != STATUS_OK) MACHINE_ESCAPE("unable to delete the page " "table entry"); } } } } /* * c) */ if (i != _init->nregions) { pde.start = ARCHITECTURE_PD_INDEX(_init->regions[i].address + _init->regions[i].size); pte.start = ARCHITECTURE_PT_INDEX(_init->regions[i].address + _init->regions[i].size); } } /* * 9) */ if (architecture_tlb_flush() != STATUS_OK) MACHINE_ESCAPE("unable to flush the TLB"); /* * 10) */ _architecture.kernel.pdbr = pdbr; MACHINE_LEAVE(); }
// FIXME: lot of code has been removed here t_error map_page(t_paddr paddr, t_vaddr *vaddr) { o_as* kas; t_ia32_pde pde; t_ia32_pde pte; t_ia32_directory dir; t_ia32_table table; t_ia32_page page; o_region* oreg; i_segment segid; o_segment* segment; int clear = 0; REGION_ENTER(region); if (as_get(kasid, &kas) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); if (region_space(kas, PAGESZ, vaddr) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); if ((oreg = malloc(sizeof(o_region))) == NULL) REGION_LEAVE(region, ERROR_UNKNOWN); oreg->address = *vaddr; oreg->regid = (i_region)oreg->address; oreg->opts = REGION_OPT_PRIVILEGED; oreg->size = PAGESZ; oreg->offset = 0; oreg->segid = (i_segment) paddr; if (region_inject(kasid, oreg) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); // Page Dir dir = kas->machdep.pd; pde = PDE_ENTRY(*vaddr); if (pd_get_table(&dir, pde, &table) == ERROR_UNKNOWN) { segment_reserve(kasid, PAGESZ, PERM_READ | PERM_WRITE, &segid); if (segment_get(segid, &segment) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); table.rw = PT_WRITABLE; pt_build(segment->address, &table, 0); pd_add_table(&dir, pde, table); clear = 1; } table.entries = ENTRY_ADDR(PD_MIRROR, pde); //fixme if (clear) memset((void *)ENTRY_ADDR(PD_MIRROR, pde), '\0', PAGESZ); // Page Table pte = PTE_ENTRY(*vaddr); page.present = 1; page.addr = paddr; page.rw = PG_WRITABLE; pt_add_page(&table, pte, page); tlb_invalidate(*vaddr); REGION_LEAVE(region, ERROR_NONE); }
t_error ia32_region_release(i_as asid, i_region regid) { REGION_ENTER(region); t_ia32_pde pde_start; t_ia32_pde pde_end; t_ia32_pte pte_start; t_ia32_pte pte_end; t_ia32_table table; t_ia32_directory pd; o_as* oas; t_vsize size; t_paddr base; t_vaddr pd_addr; t_paddr pt_addr; o_region* oreg; int i = 0; int j = 0; t_ia32_pte* t; REGION_ENTER(region); if (as_get(asid, &oas) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); if (region_get(asid, regid, &oreg) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); size = oreg->size; pd = oas->machdep.pd; base = MK_BASE(pd); // Mapping PD into Kernel map_page(base, &pd_addr); /* printf("pd %x\n", pd_addr); */ /* printf("%x\n", oreg->address); */ pde_start = PDE_ENTRY(oreg->address); pde_end = PDE_ENTRY(oreg->address + size); pte_start = PTE_ENTRY(oreg->address); pte_end = PTE_ENTRY(oreg->address + size); for (i = pde_start; i <= pde_end; i++) { if (pd_get_table((t_ia32_directory *) &pd_addr, i, &table) != ERROR_UNKNOWN) { map_page(table.entries, &pt_addr); table.entries = pt_addr; } for (j = (i == pde_start ? pte_start : 0); j <= (i == pde_end ? pte_end : 1023); j++) { t = (t_ia32_pte*)table.entries; if (t[j] != 0) if (pt_delete_page(&table, j) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); } unmap_page(&pt_addr); } tlb_flush(); unmap_page(&pd_addr); REGION_LEAVE(region, ERROR_NONE); }
t_error ia32_region_reserve(i_as asid, i_segment segid, t_paddr offset, t_opts opts, t_vaddr address, t_vsize size, i_region* regid) { t_ia32_pde pde_start; t_ia32_pde pde_end; t_ia32_pte pte_start; t_ia32_pte pte_end; t_ia32_table table; t_ia32_directory pd; o_as* oas; t_ia32_page page; t_paddr base; t_paddr ram_paddr; t_vaddr pd_addr; t_paddr pt_addr; o_segment* oseg; o_region* oreg; int i = 0; int j = 0; int x = 0; int clear = 0; REGION_ENTER(region); if (as_get(asid, &oas) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); if (region_get(asid, *regid, &oreg) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); if (segment_get(segid, &oseg) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); ram_paddr = oseg->address; pd = oas->machdep.pd; base = MK_BASE(pd); // Mapping PD into Kernel map_page(base, &pd_addr); /* printf("pd %x\n", pd_addr); */ /* printf("%x\n", oreg->address); */ pde_start = PDE_ENTRY(oreg->address); pde_end = PDE_ENTRY(oreg->address + size); pte_start = PTE_ENTRY(oreg->address); pte_end = PTE_ENTRY(oreg->address + size); for (i = pde_start; i <= pde_end; i++) { if (pd_get_table((t_ia32_directory *) &pd_addr, i, &table) == ERROR_UNKNOWN) { segment_reserve(asid, PAGESZ, PERM_READ | PERM_WRITE, &segid); if (segment_get(segid, &oseg) != ERROR_NONE) REGION_LEAVE(region, ERROR_UNKNOWN); table.rw = PDE_FLAG_RW; table.present = 1; table.user = (opts & REGION_OPT_USER) ? PT_USER : PT_PRIVILEGED; table.writeback = PT_CACHED; table.cached = 1; pt_build(oseg->address, &table, 0); pd_add_table((t_ia32_directory *) &pd_addr, i, table); clear = 1; } else clear = 0; map_page(table.entries, &pt_addr); table.entries = pt_addr; if (clear) memset((void*)pt_addr, '\0', PAGESZ); for (j = (i == pde_start ? pte_start : 0); j <= (i == pde_end ? pte_end : 1023); j++) { page.addr = x + (offset + ram_paddr); page.present = 1; page.rw = (oseg->perms & PERM_WRITE) ? PG_WRITABLE : PG_READONLY; page.present = 1; page.user = (opts & REGION_OPT_USER) ? PG_USER : PG_PRIVILEGED; page.cached = PG_CACHED; pt_add_page(&table, j, page); x += PAGESZ; } unmap_page(&pt_addr); } tlb_flush(); unmap_page(&pd_addr); REGION_LEAVE(region, ERROR_NONE); }