Exemplo n.º 1
0
/** Initialize physical memory management.
 *
 */
void frame_init(void)
{
	if (config.cpu_active == 1) {
		zones.count = 0;
		irq_spinlock_initialize(&zones.lock, "frame.zones.lock");
		mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE);
		condvar_initialize(&mem_avail_cv);
	}
	
	/* Tell the architecture to create some memory */
	frame_low_arch_init();
	
	if (config.cpu_active == 1) {
		frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)),
		    SIZE2FRAMES(config.kernel_size));
		frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)),
		    SIZE2FRAMES(config.stack_size));
		
		for (size_t i = 0; i < init.cnt; i++)
			frame_mark_unavailable(ADDR2PFN(init.tasks[i].paddr),
			    SIZE2FRAMES(init.tasks[i].size));
		
		if (ballocs.size)
			frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)),
			    SIZE2FRAMES(ballocs.size));
		
		/*
		 * Blacklist first frame, as allocating NULL would
		 * fail in some places
		 */
		frame_mark_unavailable(0, 1);
	}
	
	frame_high_arch_init();
}
Exemplo n.º 2
0
static void frame_common_arch_init(bool low)
{
	uintptr_t base;
	size_t size;

	machine_get_memory_extents(&base, &size);
	base = ALIGN_UP(base, FRAME_SIZE);
	size = ALIGN_DOWN(size, FRAME_SIZE);
	
	if (!frame_adjust_zone_bounds(low, &base, &size))
		return;

	if (low) {
		zone_create(ADDR2PFN(base), SIZE2FRAMES(size),
		    BOOT_PAGE_TABLE_START_FRAME +
		    BOOT_PAGE_TABLE_SIZE_IN_FRAMES,
		    ZONE_AVAILABLE | ZONE_LOWMEM);
	} else {
		pfn_t conf = zone_external_conf_alloc(SIZE2FRAMES(size));
		if (conf != 0)
			zone_create(ADDR2PFN(base), SIZE2FRAMES(size), conf,
			    ZONE_AVAILABLE | ZONE_HIGHMEM);
	}
	
}
Exemplo n.º 3
0
/** Allocate external configuration frames from low memory. */
pfn_t zone_external_conf_alloc(size_t count)
{
	size_t frames = SIZE2FRAMES(zone_conf_size(count));
	
	return ADDR2PFN((uintptr_t)
	    frame_alloc(frames, FRAME_LOWMEM | FRAME_ATOMIC, 0));
}
Exemplo n.º 4
0
static void frame_common_arch_init(bool low)
{
	unsigned int i;

	for (i = 0; i < bootinfo->memmap_items; i++) {
		if (bootinfo->memmap[i].type != MEMMAP_FREE_MEM)
			continue;

		uintptr_t base = bootinfo->memmap[i].base;
		size_t size = bootinfo->memmap[i].size;
		uintptr_t abase = ALIGN_UP(base, FRAME_SIZE);

		if (size > FRAME_SIZE)
			size -= abase - base;

		if (!frame_adjust_zone_bounds(low, &abase, &size))
			continue;

		if (size > MIN_ZONE_SIZE) {
			pfn_t pfn = ADDR2PFN(abase);
			size_t count = SIZE2FRAMES(size);

			if (low) {
				zone_create(pfn, count, max(MINCONF, pfn),
				    ZONE_AVAILABLE | ZONE_LOWMEM);
			} else {
				pfn_t conf = zone_external_conf_alloc(count);
				if (conf != 0)
					zone_create(pfn, count, conf,
					    ZONE_AVAILABLE | ZONE_HIGHMEM);
			}
		}
	}
}
Exemplo n.º 5
0
/** Initialize allocated memory as a slab cache */
static void
_slab_cache_create(slab_cache_t *cache,
       size_t size,
       size_t align,
       int (*constructor)(void *obj, int kmflag),
       int (*destructor)(void *obj),
       int flags)
{
    int pages;
 // ipl_t ipl;

//  memsetb((uintptr_t)cache, sizeof(*cache), 0);
//  cache->name = name;

//if (align < sizeof(unative_t))
//    align = sizeof(unative_t);
//  size = ALIGN_UP(size, align);

    cache->size = size;

//  cache->constructor = constructor;
//  cache->destructor = destructor;
    cache->flags = flags;

    list_initialize(&cache->full_slabs);
    list_initialize(&cache->partial_slabs);
    list_initialize(&cache->magazines);
//  spinlock_initialize(&cache->slablock, "slab_lock");
//  spinlock_initialize(&cache->maglock, "slab_maglock");
//  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
//    make_magcache(cache);

  /* Compute slab sizes, object counts in slabs etc. */

  /* Minimum slab order */
    pages = SIZE2FRAMES(cache->size);
  /* We need the 2^order >= pages */
    if (pages <= 1)
        cache->order = 0;
    else
        cache->order = fnzb(pages-1)+1;

    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
        cache->order += 1;
    }
    cache->objects = comp_objects(cache);

  /* Add cache to cache list */
//  ipl = interrupts_disable();
//  spinlock_lock(&slab_cache_lock);

    list_append(&cache->link, &slab_cache_list);

//  spinlock_unlock(&slab_cache_lock);
//  interrupts_restore(ipl);
}
Exemplo n.º 6
0
void frame_low_arch_init(void)
{
	if (config.cpu_active > 1)
		return;
	
	frame_common_arch_init(true);
	
	/*
	 * Blacklist ROM regions.
	 */
	frame_mark_unavailable(ADDR2PFN(ROM_BASE),
	    SIZE2FRAMES(ROM_SIZE));

	frame_mark_unavailable(ADDR2PFN(KERNEL_RESERVED_AREA_BASE),
	    SIZE2FRAMES(KERNEL_RESERVED_AREA_SIZE));

	/* PA2KA will work only on low-memory. */
	end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE;
}
Exemplo n.º 7
0
uintptr_t vhpt_set_up(void)
{
	uintptr_t vhpt_frame =
	    frame_alloc(SIZE2FRAMES(VHPT_SIZE), FRAME_ATOMIC, 0);
	if (!vhpt_frame)
		panic("Kernel configured with VHPT but no memory for table.");
	
	vhpt_base = (vhpt_entry_t *) PA2KA(vhpt_frame);
	vhpt_invalidate_all();
	return (uintptr_t) vhpt_base;
}
Exemplo n.º 8
0
int as_destructor_arch(as_t *as)
{
#ifdef CONFIG_TSB
	size_t frames = SIZE2FRAMES((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
	    sizeof(tsb_entry_t));
	frame_free(KA2PA((uintptr_t) as->arch.itsb), frames);
	
	return frames;
#else
	return 0;
#endif
}
Exemplo n.º 9
0
/** Return old configuration frames into the zone.
 *
 * We have two cases:
 * - The configuration data is outside the zone
 *   -> do nothing (perhaps call frame_free?)
 * - The configuration data was created by zone_create
 *   or updated by reduce_region -> free every frame
 *
 * @param znum  The actual zone where freeing should occur.
 * @param pfn   Old zone configuration frame.
 * @param count Old zone frame count.
 *
 */
NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count)
{
	ASSERT(zones.info[znum].flags & ZONE_AVAILABLE);
	
	size_t cframes = SIZE2FRAMES(zone_conf_size(count));
	
	if ((pfn < zones.info[znum].base) ||
	    (pfn >= zones.info[znum].base + zones.info[znum].count))
		return;
	
	for (size_t i = 0; i < cframes; i++)
		(void) zone_frame_free(&zones.info[znum],
		    pfn - zones.info[znum].base + i);
}
Exemplo n.º 10
0
/** RAM disk initialization routine
 *
 * The information about the RAM disk is provided as sysinfo
 * values to the uspace tasks.
 *
 */
void init_rd(void *data, size_t size)
{
    uintptr_t base = (uintptr_t) data;
    ASSERT((base % FRAME_SIZE) == 0);

    rd_parea.pbase = base;
    rd_parea.frames = SIZE2FRAMES(size);
    rd_parea.unpriv = false;
    rd_parea.mapped = false;
    ddi_parea_register(&rd_parea);

    sysinfo_set_item_val("rd", NULL, true);
    sysinfo_set_item_val("rd.size", NULL, size);
    sysinfo_set_item_val("rd.address.physical", NULL, (sysarg_t) base);

    printf("RAM disk at %p (size %zu bytes)\n", (void *) base, size);
}
Exemplo n.º 11
0
int as_constructor_arch(as_t *as, unsigned int flags)
{
#ifdef CONFIG_TSB
	uintptr_t tsb_phys =
	    frame_alloc(SIZE2FRAMES((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
	    sizeof(tsb_entry_t)), flags, 0);
	if (!tsb_phys)
		return -1;
	
	tsb_entry_t *tsb = (tsb_entry_t *) PA2KA(tsb_phys);
	
	as->arch.itsb = tsb;
	as->arch.dtsb = tsb + ITSB_ENTRY_COUNT;
	
	memsetb(as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
	    sizeof(tsb_entry_t), 0);
#endif
	
	return 0;
}
Exemplo n.º 12
0
/** Create memory zones according to information stored in memmap.
 *
 * Walk the memory map and create frame zones according to it.
 */
static void frame_common_arch_init(bool low)
{
	unsigned int i;
	
	for (i = 0; i < memmap.cnt; i++) {
		uintptr_t base;
		size_t size;

		/*
		 * The memmap is created by HelenOS boot loader.
		 * It already contains no holes.
		 */

		/* To be safe, make the available zone possibly smaller */
		base = ALIGN_UP((uintptr_t) memmap.zones[i].start, FRAME_SIZE);
		size = ALIGN_DOWN(memmap.zones[i].size -
		    (base - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE);
		
		if (!frame_adjust_zone_bounds(low, &base, &size))
			continue;
 
		pfn_t confdata;
		pfn_t pfn = ADDR2PFN(base);
		size_t count = SIZE2FRAMES(size);

		if (low) {
			confdata = pfn;
			if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0))))
				confdata = ADDR2PFN(KA2PA(PFN2ADDR(2)));
			
			zone_create(pfn, count, confdata,
			    ZONE_AVAILABLE | ZONE_LOWMEM);
		} else {
			confdata = zone_external_conf_alloc(count);
			if (confdata != 0)
				zone_create(pfn, count, confdata,
				    ZONE_AVAILABLE | ZONE_HIGHMEM);
		}
	}
}
Exemplo n.º 13
0
/** Create and add zone to system.
 *
 * @param start     First frame number (absolute).
 * @param count     Size of zone in frames.
 * @param confframe Where configuration frames are supposed to be.
 *                  Automatically checks that we will not disturb the
 *                  kernel and possibly init. If confframe is given
 *                  _outside_ this zone, it is expected, that the area is
 *                  already marked BUSY and big enough to contain
 *                  zone_conf_size() amount of data. If the confframe is
 *                  inside the area, the zone free frame information is
 *                  modified not to include it.
 *
 * @return Zone number or -1 on error.
 *
 */
size_t zone_create(pfn_t start, size_t count, pfn_t confframe,
    zone_flags_t flags)
{
	irq_spinlock_lock(&zones.lock, true);
	
	if (flags & ZONE_AVAILABLE) {  /* Create available zone */
		/*
		 * Theoretically we could have NULL here, practically make sure
		 * nobody tries to do that. If some platform requires, remove
		 * the assert
		 */
		ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL));
		
		/* Update the known end of physical memory. */
		config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count));
		
		/*
		 * If confframe is supposed to be inside our zone, then make sure
		 * it does not span kernel & init
		 */
		size_t confcount = SIZE2FRAMES(zone_conf_size(count));
		
		if ((confframe >= start) && (confframe < start + count)) {
			for (; confframe < start + count; confframe++) {
				uintptr_t addr = PFN2ADDR(confframe);
				if (overlaps(addr, PFN2ADDR(confcount),
				    KA2PA(config.base), config.kernel_size))
					continue;
				
				if (overlaps(addr, PFN2ADDR(confcount),
				    KA2PA(config.stack_base), config.stack_size))
					continue;
				
				bool overlap = false;
				for (size_t i = 0; i < init.cnt; i++) {
					if (overlaps(addr, PFN2ADDR(confcount),
					    init.tasks[i].paddr,
					    init.tasks[i].size)) {
						overlap = true;
						break;
					}
				}
				
				if (overlap)
					continue;
				
				break;
			}
			
			if (confframe >= start + count)
				panic("Cannot find configuration data for zone.");
		}
		
		size_t znum = zones_insert_zone(start, count, flags);
		if (znum == (size_t) -1) {
			irq_spinlock_unlock(&zones.lock, true);
			return (size_t) -1;
		}
		
		void *confdata = (void *) PA2KA(PFN2ADDR(confframe));
		zone_construct(&zones.info[znum], start, count, flags, confdata);
		
		/* If confdata in zone, mark as unavailable */
		if ((confframe >= start) && (confframe < start + count)) {
			for (size_t i = confframe; i < confframe + confcount; i++)
				zone_mark_unavailable(&zones.info[znum],
				    i - zones.info[znum].base);
		}
		
		irq_spinlock_unlock(&zones.lock, true);
		
		return znum;
	}
	
	/* Non-available zone */
	size_t znum = zones_insert_zone(start, count, flags);
	if (znum == (size_t) -1) {
		irq_spinlock_unlock(&zones.lock, true);
		return (size_t) -1;
	}
	
	zone_construct(&zones.info[znum], start, count, flags, NULL);
	
	irq_spinlock_unlock(&zones.lock, true);
	
	return znum;
}
Exemplo n.º 14
0
/** Merge zones z1 and z2.
 *
 * The merged zones must be 2 zones with no zone existing in between
 * (which means that z2 = z1 + 1). Both zones must be available zones
 * with the same flags.
 *
 * When you create a new zone, the frame allocator configuration does
 * not to be 2^order size. Once the allocator is running it is no longer
 * possible, merged configuration data occupies more space :-/
 *
 */
bool zone_merge(size_t z1, size_t z2)
{
	irq_spinlock_lock(&zones.lock, true);
	
	bool ret = true;
	
	/*
	 * We can join only 2 zones with none existing inbetween,
	 * the zones have to be available and with the same
	 * set of flags
	 */
	if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) ||
	    (zones.info[z1].flags != zones.info[z2].flags)) {
		ret = false;
		goto errout;
	}
	
	pfn_t cframes = SIZE2FRAMES(zone_conf_size(
	    zones.info[z2].base - zones.info[z1].base
	    + zones.info[z2].count));
	
	/* Allocate merged zone data inside one of the zones */
	pfn_t pfn;
	if (zone_can_alloc(&zones.info[z1], cframes, 0)) {
		pfn = zones.info[z1].base +
		    zone_frame_alloc(&zones.info[z1], cframes, 0);
	} else if (zone_can_alloc(&zones.info[z2], cframes, 0)) {
		pfn = zones.info[z2].base +
		    zone_frame_alloc(&zones.info[z2], cframes, 0);
	} else {
		ret = false;
		goto errout;
	}
	
	/* Preserve original data from z1 */
	zone_t old_z1 = zones.info[z1];
	
	/* Do zone merging */
	zone_merge_internal(z1, z2, &old_z1, (void *) PA2KA(PFN2ADDR(pfn)));
	
	/* Subtract zone information from busy frames */
	zones.info[z1].busy_count -= cframes;
	
	/* Free old zone information */
	return_config_frames(z1,
	    ADDR2PFN(KA2PA((uintptr_t) old_z1.frames)), old_z1.count);
	return_config_frames(z1,
	    ADDR2PFN(KA2PA((uintptr_t) zones.info[z2].frames)),
	    zones.info[z2].count);
	
	/* Move zones down */
	for (size_t i = z2 + 1; i < zones.count; i++)
		zones.info[i - 1] = zones.info[i];
	
	zones.count--;
	
errout:
	irq_spinlock_unlock(&zones.lock, true);
	
	return ret;
}