/** Initialize physical memory management. * */ void frame_init(void) { if (config.cpu_active == 1) { zones.count = 0; irq_spinlock_initialize(&zones.lock, "frame.zones.lock"); mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE); condvar_initialize(&mem_avail_cv); } /* Tell the architecture to create some memory */ frame_low_arch_init(); if (config.cpu_active == 1) { frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), SIZE2FRAMES(config.kernel_size)); frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)), SIZE2FRAMES(config.stack_size)); for (size_t i = 0; i < init.cnt; i++) frame_mark_unavailable(ADDR2PFN(init.tasks[i].paddr), SIZE2FRAMES(init.tasks[i].size)); if (ballocs.size) frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)), SIZE2FRAMES(ballocs.size)); /* * Blacklist first frame, as allocating NULL would * fail in some places */ frame_mark_unavailable(0, 1); } frame_high_arch_init(); }
/** Perform sparc64-specific initialization before main_bsp() is called. */ void arch_pre_main(bootinfo_t *bootinfo) { /* Copy init task info. */ init.cnt = min3(bootinfo->taskmap.cnt, TASKMAP_MAX_RECORDS, CONFIG_INIT_TASKS); size_t i; for (i = 0; i < init.cnt; i++) { init.tasks[i].paddr = KA2PA(bootinfo->taskmap.tasks[i].addr); init.tasks[i].size = bootinfo->taskmap.tasks[i].size; str_cpy(init.tasks[i].name, CONFIG_TASK_NAME_BUFLEN, bootinfo->taskmap.tasks[i].name); } /* Copy physical memory map. */ memmap.total = bootinfo->memmap.total; memmap.cnt = min(bootinfo->memmap.cnt, MEMMAP_MAX_RECORDS); for (i = 0; i < memmap.cnt; i++) { memmap.zones[i].start = bootinfo->memmap.zones[i].start; memmap.zones[i].size = bootinfo->memmap.zones[i].size; } /* Copy boot allocations info. */ ballocs.base = bootinfo->ballocs.base; ballocs.size = bootinfo->ballocs.size; ofw_tree_init(bootinfo->ofw_root); }
/** Create PTL0. * * PTL0 of 4-level page table will be created for each address space. * * @param flags Flags can specify whether ptl0 is for the kernel address space. * * @return New PTL0. * */ pte_t *ptl0_create(unsigned int flags) { pte_t *dst_ptl0 = (pte_t *) PA2KA(frame_alloc(PTL0_FRAMES, FRAME_LOWMEM, PTL0_SIZE - 1)); if (flags & FLAG_AS_KERNEL) memsetb(dst_ptl0, PTL0_SIZE, 0); else { /* * Copy the kernel address space portion to new PTL0. */ mutex_lock(&AS_KERNEL->lock); pte_t *src_ptl0 = (pte_t *) PA2KA((uintptr_t) AS_KERNEL->genarch.page_table); uintptr_t src = (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; uintptr_t dst = (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; memsetb(dst_ptl0, PTL0_SIZE, 0); memcpy((void *) dst, (void *) src, PTL0_SIZE - (src - (uintptr_t) src_ptl0)); mutex_unlock(&AS_KERNEL->lock); } return (pte_t *) KA2PA((uintptr_t) dst_ptl0); }
static slab_cache_t * slab_cache_alloc() { slab_t *slab; void *obj; u32_t *p; DBG("%s\n", __FUNCTION__); if (list_empty(&slab_cache_cache.partial_slabs)) { // spinlock_unlock(&cache->slablock); // slab = slab_create(); void *data; unsigned int i; data = (void*)(PA2KA(alloc_page())); if (!data) { return NULL; } slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t)); /* Fill in slab structures */ frame_set_parent(ADDR2PFN(KA2PA(data)), slab); slab->start = data; slab->available = slab_cache_cache.objects; slab->nextavail = (void*)data; slab->cache = &slab_cache_cache; for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++) { *p = (u32_t)p+slab_cache_cache.size; p = (u32_t*)((u32_t)p+slab_cache_cache.size); }; atomic_inc(&slab_cache_cache.allocated_slabs); // spinlock_lock(&cache->slablock); } else { slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link); list_remove(&slab->link); } obj = slab->nextavail; slab->nextavail = *((void**)obj); slab->available--; if (!slab->available) list_prepend(&slab->link, &slab_cache_cache.full_slabs); else list_prepend(&slab->link, &slab_cache_cache.partial_slabs); // spinlock_unlock(&cache->slablock); return (slab_cache_t*)obj; }
/** * Deallocate space associated with slab * * @return number of freed frames */ static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) { frame_free(KA2PA(slab->start)); if (! (cache->flags & SLAB_CACHE_SLINSIDE)) slab_free(slab_cache, slab); // atomic_dec(&cache->allocated_slabs); return 1 << cache->order; }
int as_destructor_arch(as_t *as) { #ifdef CONFIG_TSB size_t frames = SIZE2FRAMES((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t)); frame_free(KA2PA((uintptr_t) as->arch.itsb), frames); return frames; #else return 0; #endif }
/** Create memory zones according to information stored in memmap. * * Walk the memory map and create frame zones according to it. */ static void frame_common_arch_init(bool low) { unsigned int i; for (i = 0; i < memmap.cnt; i++) { uintptr_t base; size_t size; /* * The memmap is created by HelenOS boot loader. * It already contains no holes. */ /* To be safe, make the available zone possibly smaller */ base = ALIGN_UP((uintptr_t) memmap.zones[i].start, FRAME_SIZE); size = ALIGN_DOWN(memmap.zones[i].size - (base - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); if (!frame_adjust_zone_bounds(low, &base, &size)) continue; pfn_t confdata; pfn_t pfn = ADDR2PFN(base); size_t count = SIZE2FRAMES(size); if (low) { confdata = pfn; if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0)))) confdata = ADDR2PFN(KA2PA(PFN2ADDR(2))); zone_create(pfn, count, confdata, ZONE_AVAILABLE | ZONE_LOWMEM); } else { confdata = zone_external_conf_alloc(count); if (confdata != 0) zone_create(pfn, count, confdata, ZONE_AVAILABLE | ZONE_HIGHMEM); } } }
/** * Allocate frames for slab space and initialize * */ static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) { void *data; slab_t *slab; size_t fsize; unsigned int i; u32_t p; DBG("%s order %d\n", __FUNCTION__, cache->order); data = (void*)PA2KA(frame_alloc(1 << cache->order)); if (!data) { return NULL; } slab = (slab_t*)slab_create(); if (!slab) { frame_free(KA2PA(data)); return NULL; } /* Fill in slab structures */ for (i = 0; i < ((u32_t) 1 << cache->order); i++) frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab); slab->start = data; slab->available = cache->objects; slab->nextavail = (void*)data; slab->cache = cache; for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++) { *(addr_t *)p = p+cache->size; p = p+cache->size; }; atomic_inc(&cache->allocated_slabs); return slab; }
thr_t* __fastcall create_systhread(addr_t entry_ptr) { static count_t thr_cnt = 0; static count_t slot = 1; thr_t *thr; addr_t thr_stack; DBG("%s\n", __FUNCTION__); thr = (thr_t*)slab_alloc(thr_slab,0); thr_stack = PA2KA(frame_alloc(2)); thr_cnt++; thr->eax = (thr_cnt<<8)|slot; thr->tid = (thr_cnt<<8)|slot; thr->slot = slot; slot++; thr->pdir = KA2PA(&sys_pdbr); thr->ebx = 0; thr->edi = 0; thr->esi = 0; thr->ebp = 0; thr->edx = 0; thr->ecx = 0; thr->cs = sel_srv_code; thr->eflags = EFL_IOPL1; thr->esp = thr_stack + 8192; thr->ss = sel_srv_stack; thr->thr_flags = 0; thr->ticks_left = 8; thr->quantum_size = 8; thr->eip = entry_ptr; //lock_enqueue(thr_ptr); /* add to scheduling queues */ return thr; };
void frame_low_arch_init(void) { if (config.cpu_active > 1) return; frame_common_arch_init(true); /* * On sparc64, physical memory can start on a non-zero address. * The generic frame_init() only marks PFN 0 as not free, so we * must mark the physically first frame not free explicitly * here, no matter what is its address. */ frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); /* PA2KA will work only on low-memory. */ end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE; }
/** Adjust bounds of physical memory region according to low/high memory split. * * @param low[in] If true, the adjustment is performed to make the region * fit in the low memory. Otherwise the adjustment is * performed to make the region fit in the high memory. * @param basep[inout] Pointer to a variable which contains the region's base * address and which may receive the adjusted base address. * @param sizep[inout] Pointer to a variable which contains the region's size * and which may receive the adjusted size. * * @return True if the region still exists even after the adjustment. * @return False otherwise. * */ bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep) { uintptr_t limit = KA2PA(config.identity_base) + config.identity_size; if (low) { if (*basep > limit) return false; if (*basep + *sizep > limit) *sizep = limit - *basep; } else { if (*basep + *sizep <= limit) return false; if (*basep <= limit) { *sizep -= limit - *basep; *basep = limit; } } return true; }
void page_arch_init(void) { int flags = PAGE_CACHEABLE | PAGE_EXEC; page_mapping_operations = &pt_mapping_operations; page_table_lock(AS_KERNEL, true); /* Kernel identity mapping */ // FIXME: // We need to consider the possibility that // identity_base > identity_size and physmem_end. // This might lead to overflow if identity_size is too big. for (uintptr_t cur = PHYSMEM_START_ADDR; cur < min(KA2PA(config.identity_base) + config.identity_size, config.physmem_end); cur += FRAME_SIZE) page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); page_table_unlock(AS_KERNEL, true); as_switch(NULL, AS_KERNEL); /* Switch MMU to new context table */ asi_u32_write(ASI_MMUREGS, MMU_CONTEXT_TABLE, KA2PA(as_context_table) >> 4); }
/** Create and add zone to system. * * @param start First frame number (absolute). * @param count Size of zone in frames. * @param confframe Where configuration frames are supposed to be. * Automatically checks that we will not disturb the * kernel and possibly init. If confframe is given * _outside_ this zone, it is expected, that the area is * already marked BUSY and big enough to contain * zone_conf_size() amount of data. If the confframe is * inside the area, the zone free frame information is * modified not to include it. * * @return Zone number or -1 on error. * */ size_t zone_create(pfn_t start, size_t count, pfn_t confframe, zone_flags_t flags) { irq_spinlock_lock(&zones.lock, true); if (flags & ZONE_AVAILABLE) { /* Create available zone */ /* * Theoretically we could have NULL here, practically make sure * nobody tries to do that. If some platform requires, remove * the assert */ ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); /* Update the known end of physical memory. */ config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count)); /* * If confframe is supposed to be inside our zone, then make sure * it does not span kernel & init */ size_t confcount = SIZE2FRAMES(zone_conf_size(count)); if ((confframe >= start) && (confframe < start + count)) { for (; confframe < start + count; confframe++) { uintptr_t addr = PFN2ADDR(confframe); if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.base), config.kernel_size)) continue; if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.stack_base), config.stack_size)) continue; bool overlap = false; for (size_t i = 0; i < init.cnt; i++) { if (overlaps(addr, PFN2ADDR(confcount), init.tasks[i].paddr, init.tasks[i].size)) { overlap = true; break; } } if (overlap) continue; break; } if (confframe >= start + count) panic("Cannot find configuration data for zone."); } size_t znum = zones_insert_zone(start, count, flags); if (znum == (size_t) -1) { irq_spinlock_unlock(&zones.lock, true); return (size_t) -1; } void *confdata = (void *) PA2KA(PFN2ADDR(confframe)); zone_construct(&zones.info[znum], start, count, flags, confdata); /* If confdata in zone, mark as unavailable */ if ((confframe >= start) && (confframe < start + count)) { for (size_t i = confframe; i < confframe + confcount; i++) zone_mark_unavailable(&zones.info[znum], i - zones.info[znum].base); } irq_spinlock_unlock(&zones.lock, true); return znum; } /* Non-available zone */ size_t znum = zones_insert_zone(start, count, flags); if (znum == (size_t) -1) { irq_spinlock_unlock(&zones.lock, true); return (size_t) -1; } zone_construct(&zones.info[znum], start, count, flags, NULL); irq_spinlock_unlock(&zones.lock, true); return znum; }
/** Merge zones z1 and z2. * * The merged zones must be 2 zones with no zone existing in between * (which means that z2 = z1 + 1). Both zones must be available zones * with the same flags. * * When you create a new zone, the frame allocator configuration does * not to be 2^order size. Once the allocator is running it is no longer * possible, merged configuration data occupies more space :-/ * */ bool zone_merge(size_t z1, size_t z2) { irq_spinlock_lock(&zones.lock, true); bool ret = true; /* * We can join only 2 zones with none existing inbetween, * the zones have to be available and with the same * set of flags */ if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) || (zones.info[z1].flags != zones.info[z2].flags)) { ret = false; goto errout; } pfn_t cframes = SIZE2FRAMES(zone_conf_size( zones.info[z2].base - zones.info[z1].base + zones.info[z2].count)); /* Allocate merged zone data inside one of the zones */ pfn_t pfn; if (zone_can_alloc(&zones.info[z1], cframes, 0)) { pfn = zones.info[z1].base + zone_frame_alloc(&zones.info[z1], cframes, 0); } else if (zone_can_alloc(&zones.info[z2], cframes, 0)) { pfn = zones.info[z2].base + zone_frame_alloc(&zones.info[z2], cframes, 0); } else { ret = false; goto errout; } /* Preserve original data from z1 */ zone_t old_z1 = zones.info[z1]; /* Do zone merging */ zone_merge_internal(z1, z2, &old_z1, (void *) PA2KA(PFN2ADDR(pfn))); /* Subtract zone information from busy frames */ zones.info[z1].busy_count -= cframes; /* Free old zone information */ return_config_frames(z1, ADDR2PFN(KA2PA((uintptr_t) old_z1.frames)), old_z1.count); return_config_frames(z1, ADDR2PFN(KA2PA((uintptr_t) zones.info[z2].frames)), zones.info[z2].count); /* Move zones down */ for (size_t i = z2 + 1; i < zones.count; i++) zones.info[i - 1] = zones.info[i]; zones.count--; errout: irq_spinlock_unlock(&zones.lock, true); return ret; }
/** Map object to slab structure */ static slab_t * obj2slab(void *obj) { return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj))); }
/** Perform sparc64-specific tasks when an address space becomes active on the * processor. * * Install ASID and map TSBs. * * @param as Address space. */ void as_install_arch(as_t *as) { tlb_context_reg_t ctx; /* * Note that we don't and may not lock the address space. That's ok * since we only read members that are currently read-only. * * Moreover, the as->asid is protected by asidlock, which is being held. * */ /* * Write ASID to secondary context register. The primary context * register has to be set from TL>0 so it will be filled from the * secondary context register from the TL=1 code just before switch to * userspace. * */ ctx.v = 0; ctx.context = as->asid; mmu_secondary_context_write(ctx.v); #ifdef CONFIG_TSB uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); ASSERT(as->arch.itsb && as->arch.dtsb); uintptr_t tsb = (uintptr_t) as->arch.itsb; if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { /* * TSBs were allocated from memory not covered * by the locked 4M kernel DTLB entry. We need * to map both TSBs explicitly. * */ dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb); dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true); } /* * Setup TSB Base registers. * */ tsb_base_reg_t tsb_base; tsb_base.value = 0; tsb_base.size = TSB_SIZE; tsb_base.split = 0; tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH; itsb_base_write(tsb_base.value); tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH; dtsb_base_write(tsb_base.value); #if defined (US3) /* * Clear the extension registers. * In Einherjar, primary and secondary context registers contain * equal values and kernel misses (context 0, ie. the nucleus context) * are excluded from the TSB miss handler, so it makes no sense * to have separate TSBs for primary, secondary and nucleus contexts. * Clearing the extension registers will ensure that the value of the * TSB Base register will be used as an address of TSB, making the code * compatible with the US port. * */ itsb_primary_extension_write(0); itsb_nucleus_extension_write(0); dtsb_primary_extension_write(0); dtsb_secondary_extension_write(0); dtsb_nucleus_extension_write(0); #endif #endif }