/** Create PTL0. * * PTL0 of 4-level page table will be created for each address space. * * @param flags Flags can specify whether ptl0 is for the kernel address space. * * @return New PTL0. * */ pte_t *ptl0_create(unsigned int flags) { pte_t *dst_ptl0 = (pte_t *) PA2KA(frame_alloc(PTL0_FRAMES, FRAME_LOWMEM, PTL0_SIZE - 1)); if (flags & FLAG_AS_KERNEL) memsetb(dst_ptl0, PTL0_SIZE, 0); else { /* * Copy the kernel address space portion to new PTL0. */ mutex_lock(&AS_KERNEL->lock); pte_t *src_ptl0 = (pte_t *) PA2KA((uintptr_t) AS_KERNEL->genarch.page_table); uintptr_t src = (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; uintptr_t dst = (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; memsetb(dst_ptl0, PTL0_SIZE, 0); memcpy((void *) dst, (void *) src, PTL0_SIZE - (src - (uintptr_t) src_ptl0)); mutex_unlock(&AS_KERNEL->lock); } return (pte_t *) KA2PA((uintptr_t) dst_ptl0); }
void acpi_init(void) { uint8_t *addr[2] = { NULL, (uint8_t *) PA2KA(0xe0000) }; unsigned int i; unsigned int j; unsigned int length[2] = { 1024, 128 * 1024 }; uint64_t *sig = (uint64_t *) RSDP_SIGNATURE; /* * Find Root System Description Pointer * 1. search first 1K of EBDA * 2. search 128K starting at 0xe0000 */ addr[0] = (uint8_t *) PA2KA(ebda); for (i = (ebda ? 0 : 1); i < 2; i++) { for (j = 0; j < length[i]; j += 16) { if ((*((uint64_t *) &addr[i][j]) == *sig) && (rsdp_check(&addr[i][j]))) { acpi_rsdp = (struct acpi_rsdp *) &addr[i][j]; goto rsdp_found; } } } return; rsdp_found: LOG("%p: ACPI Root System Description Pointer", acpi_rsdp); uintptr_t acpi_rsdt_p = (uintptr_t) acpi_rsdp->rsdt_address; uintptr_t acpi_xsdt_p = 0; if (acpi_rsdp->revision) acpi_xsdt_p = (uintptr_t) acpi_rsdp->xsdt_address; if (acpi_rsdt_p) acpi_rsdt = (struct acpi_rsdt *) map_sdt( (struct acpi_sdt_header *) acpi_rsdt_p); if (acpi_xsdt_p) acpi_xsdt = (struct acpi_xsdt *) map_sdt( (struct acpi_sdt_header *) acpi_xsdt_p); if ((acpi_rsdt) && (!acpi_sdt_check((uint8_t *) acpi_rsdt))) { printf("RSDT: bad checksum\n"); return; } if ((acpi_xsdt) && (!acpi_sdt_check((uint8_t *) acpi_xsdt))) { printf("XSDT: bad checksum\n"); return; } if (acpi_xsdt) configure_via_xsdt(); else if (acpi_rsdt) configure_via_rsdt(); }
/** Create a temporary page. * * The page is mapped read/write to a newly allocated frame of physical memory. * The page must be returned back to the system by a call to * km_temporary_page_put(). * * @param[inout] framep Pointer to a variable which will receive the physical * address of the allocated frame. * @param[in] flags Frame allocation flags. FRAME_NONE, FRAME_NO_RESERVE * and FRAME_ATOMIC bits are allowed. * @return Virtual address of the allocated frame. */ uintptr_t km_temporary_page_get(uintptr_t *framep, frame_flags_t flags) { uintptr_t frame; uintptr_t page; ASSERT(THREAD); ASSERT(framep); ASSERT(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC))); /* * Allocate a frame, preferably from high memory. */ frame = (uintptr_t) frame_alloc(ONE_FRAME, FRAME_HIGHMEM | FRAME_ATOMIC | flags); if (frame) { page = km_map(frame, PAGE_SIZE, PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE); ASSERT(page); // FIXME } else { frame = (uintptr_t) frame_alloc(ONE_FRAME, FRAME_LOWMEM | flags); if (!frame) return (uintptr_t) NULL; page = PA2KA(frame); } *framep = frame; return page; }
/** Initializes page tables. * * 1:1 virtual-physical mapping is created in kernel address space. Mapping * for table with exception vectors is also created. */ void page_arch_init(void) { int flags = PAGE_CACHEABLE; page_mapping_operations = &pt_mapping_operations; page_table_lock(AS_KERNEL, true); uintptr_t cur; /* Kernel identity mapping */ for (cur = PHYSMEM_START_ADDR; cur < min(config.identity_size, config.physmem_end); cur += FRAME_SIZE) page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); #ifdef HIGH_EXCEPTION_VECTORS /* Create mapping for exception table at high offset */ uintptr_t ev_frame = (uintptr_t) frame_alloc(ONE_FRAME, FRAME_NONE); page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, ev_frame, flags); #else #error "Only high exception vector supported now" #endif page_table_unlock(AS_KERNEL, true); as_switch(NULL, AS_KERNEL); boot_page_table_free(); }
void page_arch_init(void) { if (config.cpu_active > 1) { write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); return; } uintptr_t cur; unsigned int identity_flags = PAGE_GLOBAL | PAGE_CACHEABLE | PAGE_EXEC | PAGE_WRITE | PAGE_READ; page_mapping_operations = &pt_mapping_operations; page_table_lock(AS_KERNEL, true); /* * PA2KA(identity) mapping for all low-memory frames. */ for (cur = 0; cur < min(config.identity_size, config.physmem_end); cur += FRAME_SIZE) page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags); page_table_unlock(AS_KERNEL, true); exc_register(14, "page_fault", true, (iroutine_t) page_fault); write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); }
static slab_cache_t * slab_cache_alloc() { slab_t *slab; void *obj; u32_t *p; DBG("%s\n", __FUNCTION__); if (list_empty(&slab_cache_cache.partial_slabs)) { // spinlock_unlock(&cache->slablock); // slab = slab_create(); void *data; unsigned int i; data = (void*)(PA2KA(alloc_page())); if (!data) { return NULL; } slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t)); /* Fill in slab structures */ frame_set_parent(ADDR2PFN(KA2PA(data)), slab); slab->start = data; slab->available = slab_cache_cache.objects; slab->nextavail = (void*)data; slab->cache = &slab_cache_cache; for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++) { *p = (u32_t)p+slab_cache_cache.size; p = (u32_t*)((u32_t)p+slab_cache_cache.size); }; atomic_inc(&slab_cache_cache.allocated_slabs); // spinlock_lock(&cache->slablock); } else { slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link); list_remove(&slab->link); } obj = slab->nextavail; slab->nextavail = *((void**)obj); slab->available--; if (!slab->available) list_prepend(&slab->link, &slab_cache_cache.full_slabs); else list_prepend(&slab->link, &slab_cache_cache.partial_slabs); // spinlock_unlock(&cache->slablock); return (slab_cache_t*)obj; }
uintptr_t vhpt_set_up(void) { uintptr_t vhpt_frame = frame_alloc(SIZE2FRAMES(VHPT_SIZE), FRAME_ATOMIC, 0); if (!vhpt_frame) panic("Kernel configured with VHPT but no memory for table."); vhpt_base = (vhpt_entry_t *) PA2KA(vhpt_frame); vhpt_invalidate_all(); return (uintptr_t) vhpt_base; }
thr_t* __fastcall create_systhread(addr_t entry_ptr) { static count_t thr_cnt = 0; static count_t slot = 1; thr_t *thr; addr_t thr_stack; DBG("%s\n", __FUNCTION__); thr = (thr_t*)slab_alloc(thr_slab,0); thr_stack = PA2KA(frame_alloc(2)); thr_cnt++; thr->eax = (thr_cnt<<8)|slot; thr->tid = (thr_cnt<<8)|slot; thr->slot = slot; slot++; thr->pdir = KA2PA(&sys_pdbr); thr->ebx = 0; thr->edi = 0; thr->esi = 0; thr->ebp = 0; thr->edx = 0; thr->ecx = 0; thr->cs = sel_srv_code; thr->eflags = EFL_IOPL1; thr->esp = thr_stack + 8192; thr->ss = sel_srv_stack; thr->thr_flags = 0; thr->ticks_left = 8; thr->quantum_size = 8; thr->eip = entry_ptr; //lock_enqueue(thr_ptr); /* add to scheduling queues */ return thr; };
void frame_low_arch_init(void) { if (config.cpu_active > 1) return; frame_common_arch_init(true); /* * On sparc64, physical memory can start on a non-zero address. * The generic frame_init() only marks PFN 0 as not free, so we * must mark the physically first frame not free explicitly * here, no matter what is its address. */ frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); /* PA2KA will work only on low-memory. */ end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE; }
void frame_low_arch_init(void) { if (config.cpu_active > 1) return; frame_common_arch_init(true); /* * Blacklist ROM regions. */ frame_mark_unavailable(ADDR2PFN(ROM_BASE), SIZE2FRAMES(ROM_SIZE)); frame_mark_unavailable(ADDR2PFN(KERNEL_RESERVED_AREA_BASE), SIZE2FRAMES(KERNEL_RESERVED_AREA_SIZE)); /* PA2KA will work only on low-memory. */ end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE; }
int as_constructor_arch(as_t *as, unsigned int flags) { #ifdef CONFIG_TSB uintptr_t tsb_phys = frame_alloc(SIZE2FRAMES((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t)), flags, 0); if (!tsb_phys) return -1; tsb_entry_t *tsb = (tsb_entry_t *) PA2KA(tsb_phys); as->arch.itsb = tsb; as->arch.dtsb = tsb + ITSB_ENTRY_COUNT; memsetb(as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0); #endif return 0; }
/** * Allocate frames for slab space and initialize * */ static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) { void *data; slab_t *slab; size_t fsize; unsigned int i; u32_t p; DBG("%s order %d\n", __FUNCTION__, cache->order); data = (void*)PA2KA(frame_alloc(1 << cache->order)); if (!data) { return NULL; } slab = (slab_t*)slab_create(); if (!slab) { frame_free(KA2PA(data)); return NULL; } /* Fill in slab structures */ for (i = 0; i < ((u32_t) 1 << cache->order); i++) frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab); slab->start = data; slab->available = cache->objects; slab->nextavail = (void*)data; slab->cache = cache; for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++) { *(addr_t *)p = p+cache->size; p = p+cache->size; }; atomic_inc(&cache->allocated_slabs); return slab; }
void page_arch_init(void) { int flags = PAGE_CACHEABLE | PAGE_EXEC; page_mapping_operations = &pt_mapping_operations; page_table_lock(AS_KERNEL, true); /* Kernel identity mapping */ // FIXME: // We need to consider the possibility that // identity_base > identity_size and physmem_end. // This might lead to overflow if identity_size is too big. for (uintptr_t cur = PHYSMEM_START_ADDR; cur < min(KA2PA(config.identity_base) + config.identity_size, config.physmem_end); cur += FRAME_SIZE) page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); page_table_unlock(AS_KERNEL, true); as_switch(NULL, AS_KERNEL); /* Switch MMU to new context table */ asi_u32_write(ASI_MMUREGS, MMU_CONTEXT_TABLE, KA2PA(as_context_table) >> 4); }
void bootstrap(void) { mmu_start(); version_print(); printf("\nMemory statistics\n"); printf(" %p|%p: bootstrap stack\n", &boot_stack, &boot_stack); printf(" %p|%p: bootstrap page table\n", &boot_pt, &boot_pt); printf(" %p|%p: boot info structure\n", &bootinfo, &bootinfo); printf(" %p|%p: kernel entry point\n", (void *) PA2KA(BOOT_OFFSET), (void *) BOOT_OFFSET); size_t i; for (i = 0; i < COMPONENTS; i++) printf(" %p|%p: %s image (%u/%u bytes)\n", components[i].start, components[i].start, components[i].name, components[i].inflated, components[i].size); void *dest[COMPONENTS]; size_t top = 0; size_t cnt = 0; bootinfo.cnt = 0; for (i = 0; i < min(COMPONENTS, TASKMAP_MAX_RECORDS); i++) { top = ALIGN_UP(top, PAGE_SIZE); if (i > 0) { bootinfo.tasks[bootinfo.cnt].addr = TOP2ADDR(top); bootinfo.tasks[bootinfo.cnt].size = components[i].inflated; str_cpy(bootinfo.tasks[bootinfo.cnt].name, BOOTINFO_TASK_NAME_BUFLEN, components[i].name); bootinfo.cnt++; } dest[i] = TOP2ADDR(top); top += components[i].inflated; cnt++; } printf("\nInflating components ... "); for (i = cnt; i > 0; i--) { void *tail = components[i - 1].start + components[i - 1].size; if (tail >= dest[i - 1]) { printf("\n%s: Image too large to fit (%p >= %p), halting.\n", components[i].name, tail, dest[i - 1]); halt(); } printf("%s ", components[i - 1].name); int err = inflate(components[i - 1].start, components[i - 1].size, dest[i - 1], components[i - 1].inflated); if (err != EOK) { printf("\n%s: Inflating error %d\n", components[i - 1].name, err); halt(); } } printf(".\n"); printf("Booting the kernel... \n"); jump_to_kernel((void *) PA2KA(BOOT_OFFSET), &bootinfo); }
return pte; } return NULL; } static void pht_insert(const uintptr_t vaddr, const pte_t *pte) { uint32_t page = (vaddr >> 12) & 0xffff; uint32_t api = (vaddr >> 22) & 0x3f; uint32_t vsid = sr_get(vaddr); uint32_t sdr1 = sdr1_get(); // FIXME: compute size of PHT exactly phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000); /* Primary hash (xor) */ uint32_t h = 0; uint32_t hash = vsid ^ page; uint32_t base = (hash & 0x3ff) << 3; uint32_t i; bool found = false; /* Find colliding PTE in PTEG */ for (i = 0; i < 8; i++) { if ((phte[base + i].v) && (phte[base + i].vsid == vsid) && (phte[base + i].api == api) && (phte[base + i].h == 0)) { found = true;
/** Merge zones z1 and z2. * * The merged zones must be 2 zones with no zone existing in between * (which means that z2 = z1 + 1). Both zones must be available zones * with the same flags. * * When you create a new zone, the frame allocator configuration does * not to be 2^order size. Once the allocator is running it is no longer * possible, merged configuration data occupies more space :-/ * */ bool zone_merge(size_t z1, size_t z2) { irq_spinlock_lock(&zones.lock, true); bool ret = true; /* * We can join only 2 zones with none existing inbetween, * the zones have to be available and with the same * set of flags */ if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) || (zones.info[z1].flags != zones.info[z2].flags)) { ret = false; goto errout; } pfn_t cframes = SIZE2FRAMES(zone_conf_size( zones.info[z2].base - zones.info[z1].base + zones.info[z2].count)); /* Allocate merged zone data inside one of the zones */ pfn_t pfn; if (zone_can_alloc(&zones.info[z1], cframes, 0)) { pfn = zones.info[z1].base + zone_frame_alloc(&zones.info[z1], cframes, 0); } else if (zone_can_alloc(&zones.info[z2], cframes, 0)) { pfn = zones.info[z2].base + zone_frame_alloc(&zones.info[z2], cframes, 0); } else { ret = false; goto errout; } /* Preserve original data from z1 */ zone_t old_z1 = zones.info[z1]; /* Do zone merging */ zone_merge_internal(z1, z2, &old_z1, (void *) PA2KA(PFN2ADDR(pfn))); /* Subtract zone information from busy frames */ zones.info[z1].busy_count -= cframes; /* Free old zone information */ return_config_frames(z1, ADDR2PFN(KA2PA((uintptr_t) old_z1.frames)), old_z1.count); return_config_frames(z1, ADDR2PFN(KA2PA((uintptr_t) zones.info[z2].frames)), zones.info[z2].count); /* Move zones down */ for (size_t i = z2 + 1; i < zones.count; i++) zones.info[i - 1] = zones.info[i]; zones.count--; errout: irq_spinlock_unlock(&zones.lock, true); return ret; }
/** Create and add zone to system. * * @param start First frame number (absolute). * @param count Size of zone in frames. * @param confframe Where configuration frames are supposed to be. * Automatically checks that we will not disturb the * kernel and possibly init. If confframe is given * _outside_ this zone, it is expected, that the area is * already marked BUSY and big enough to contain * zone_conf_size() amount of data. If the confframe is * inside the area, the zone free frame information is * modified not to include it. * * @return Zone number or -1 on error. * */ size_t zone_create(pfn_t start, size_t count, pfn_t confframe, zone_flags_t flags) { irq_spinlock_lock(&zones.lock, true); if (flags & ZONE_AVAILABLE) { /* Create available zone */ /* * Theoretically we could have NULL here, practically make sure * nobody tries to do that. If some platform requires, remove * the assert */ ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); /* Update the known end of physical memory. */ config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count)); /* * If confframe is supposed to be inside our zone, then make sure * it does not span kernel & init */ size_t confcount = SIZE2FRAMES(zone_conf_size(count)); if ((confframe >= start) && (confframe < start + count)) { for (; confframe < start + count; confframe++) { uintptr_t addr = PFN2ADDR(confframe); if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.base), config.kernel_size)) continue; if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.stack_base), config.stack_size)) continue; bool overlap = false; for (size_t i = 0; i < init.cnt; i++) { if (overlaps(addr, PFN2ADDR(confcount), init.tasks[i].paddr, init.tasks[i].size)) { overlap = true; break; } } if (overlap) continue; break; } if (confframe >= start + count) panic("Cannot find configuration data for zone."); } size_t znum = zones_insert_zone(start, count, flags); if (znum == (size_t) -1) { irq_spinlock_unlock(&zones.lock, true); return (size_t) -1; } void *confdata = (void *) PA2KA(PFN2ADDR(confframe)); zone_construct(&zones.info[znum], start, count, flags, confdata); /* If confdata in zone, mark as unavailable */ if ((confframe >= start) && (confframe < start + count)) { for (size_t i = confframe; i < confframe + confcount; i++) zone_mark_unavailable(&zones.info[znum], i - zones.info[znum].base); } irq_spinlock_unlock(&zones.lock, true); return znum; } /* Non-available zone */ size_t znum = zones_insert_zone(start, count, flags); if (znum == (size_t) -1) { irq_spinlock_unlock(&zones.lock, true); return (size_t) -1; } zone_construct(&zones.info[znum], start, count, flags, NULL); irq_spinlock_unlock(&zones.lock, true); return znum; }
void bootstrap(void) { version_print(); ofw_memmap(&bootinfo.memmap); void *bootinfo_pa = ofw_translate(&bootinfo); void *real_mode_pa = ofw_translate(&real_mode); void *loader_address_pa = ofw_translate((void *) LOADER_ADDRESS); printf("\nMemory statistics (total %llu MB)\n", bootinfo.memmap.total >> 20); printf(" %p|%p: real mode trampoline\n", &real_mode, real_mode_pa); printf(" %p|%p: boot info structure\n", &bootinfo, bootinfo_pa); printf(" %p|%p: kernel entry point\n", (void *) PA2KA(BOOT_OFFSET), (void *) BOOT_OFFSET); printf(" %p|%p: loader entry point\n", (void *) LOADER_ADDRESS, loader_address_pa); size_t i; for (i = 0; i < COMPONENTS; i++) printf(" %p|%p: %s image (%zu/%zu bytes)\n", components[i].start, ofw_translate(components[i].start), components[i].name, components[i].inflated, components[i].size); size_t dest[COMPONENTS]; size_t top = 0; size_t cnt = 0; bootinfo.taskmap.cnt = 0; for (i = 0; i < min(COMPONENTS, TASKMAP_MAX_RECORDS); i++) { top = ALIGN_UP(top, PAGE_SIZE); if (i > 0) { bootinfo.taskmap.tasks[bootinfo.taskmap.cnt].addr = (void *) PA2KA(top); bootinfo.taskmap.tasks[bootinfo.taskmap.cnt].size = components[i].inflated; str_cpy(bootinfo.taskmap.tasks[bootinfo.taskmap.cnt].name, BOOTINFO_TASK_NAME_BUFLEN, components[i].name); bootinfo.taskmap.cnt++; } dest[i] = top; top += components[i].inflated; cnt++; } void *balloc_base; void *balloc_base_pa; ofw_alloc("boot allocator area", &balloc_base, &balloc_base_pa, BALLOC_MAX_SIZE, loader_address_pa); printf(" %p|%p: boot allocator area\n", balloc_base, balloc_base_pa); void *inflate_base; void *inflate_base_pa; ofw_alloc("inflate area", &inflate_base, &inflate_base_pa, top, loader_address_pa); printf(" %p|%p: inflate area\n", inflate_base, inflate_base_pa); uintptr_t balloc_start = ALIGN_UP(top, PAGE_SIZE); size_t pages = (balloc_start + ALIGN_UP(BALLOC_MAX_SIZE, PAGE_SIZE)) >> PAGE_WIDTH; void *transtable; void *transtable_pa; ofw_alloc("translate table", &transtable, &transtable_pa, pages * sizeof(void *), loader_address_pa); printf(" %p|%p: translate table\n", transtable, transtable_pa); check_overlap("boot allocator area", balloc_base_pa, pages); check_overlap("inflate area", inflate_base_pa, pages); check_overlap("translate table", transtable_pa, pages); printf("\nInflating components ... "); for (i = cnt; i > 0; i--) { printf("%s ", components[i - 1].name); int err = inflate(components[i - 1].start, components[i - 1].size, inflate_base + dest[i - 1], components[i - 1].inflated); if (err != EOK) { printf("\n%s: Inflating error %d, halting.\n", components[i - 1].name, err); halt(); } } printf(".\n"); printf("Setting up boot allocator ...\n"); balloc_init(&bootinfo.ballocs, balloc_base, PA2KA(balloc_start), BALLOC_MAX_SIZE); printf("Setting up screens ...\n"); ofw_setup_screens(); printf("Canonizing OpenFirmware device tree ...\n"); bootinfo.ofw_root = ofw_tree_build(); printf("Setting up translate table ...\n"); for (i = 0; i < pages; i++) { uintptr_t off = i << PAGE_WIDTH; void *phys; if (off < balloc_start) phys = ofw_translate(inflate_base + off); else phys = ofw_translate(balloc_base + off - balloc_start); ((void **) transtable)[i] = phys; } printf("Booting the kernel...\n"); jump_to_kernel(bootinfo_pa, transtable_pa, pages, real_mode_pa); }