Esempio n. 1
0
/** Initialize physical memory management.
 *
 */
void frame_init(void)
{
	if (config.cpu_active == 1) {
		zones.count = 0;
		irq_spinlock_initialize(&zones.lock, "frame.zones.lock");
		mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE);
		condvar_initialize(&mem_avail_cv);
	}
	
	/* Tell the architecture to create some memory */
	frame_low_arch_init();
	
	if (config.cpu_active == 1) {
		frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)),
		    SIZE2FRAMES(config.kernel_size));
		frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)),
		    SIZE2FRAMES(config.stack_size));
		
		for (size_t i = 0; i < init.cnt; i++)
			frame_mark_unavailable(ADDR2PFN(init.tasks[i].paddr),
			    SIZE2FRAMES(init.tasks[i].size));
		
		if (ballocs.size)
			frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)),
			    SIZE2FRAMES(ballocs.size));
		
		/*
		 * Blacklist first frame, as allocating NULL would
		 * fail in some places
		 */
		frame_mark_unavailable(0, 1);
	}
	
	frame_high_arch_init();
}
Esempio n. 2
0
static void frame_common_arch_init(bool low)
{
	uintptr_t base;
	size_t size;

	machine_get_memory_extents(&base, &size);
	base = ALIGN_UP(base, FRAME_SIZE);
	size = ALIGN_DOWN(size, FRAME_SIZE);
	
	if (!frame_adjust_zone_bounds(low, &base, &size))
		return;

	if (low) {
		zone_create(ADDR2PFN(base), SIZE2FRAMES(size),
		    BOOT_PAGE_TABLE_START_FRAME +
		    BOOT_PAGE_TABLE_SIZE_IN_FRAMES,
		    ZONE_AVAILABLE | ZONE_LOWMEM);
	} else {
		pfn_t conf = zone_external_conf_alloc(SIZE2FRAMES(size));
		if (conf != 0)
			zone_create(ADDR2PFN(base), SIZE2FRAMES(size), conf,
			    ZONE_AVAILABLE | ZONE_HIGHMEM);
	}
	
}
Esempio n. 3
0
/** Allocate external configuration frames from low memory. */
pfn_t zone_external_conf_alloc(size_t count)
{
	size_t frames = SIZE2FRAMES(zone_conf_size(count));
	
	return ADDR2PFN((uintptr_t)
	    frame_alloc(frames, FRAME_LOWMEM | FRAME_ATOMIC, 0));
}
Esempio n. 4
0
static void frame_common_arch_init(bool low)
{
	unsigned int i;

	for (i = 0; i < bootinfo->memmap_items; i++) {
		if (bootinfo->memmap[i].type != MEMMAP_FREE_MEM)
			continue;

		uintptr_t base = bootinfo->memmap[i].base;
		size_t size = bootinfo->memmap[i].size;
		uintptr_t abase = ALIGN_UP(base, FRAME_SIZE);

		if (size > FRAME_SIZE)
			size -= abase - base;

		if (!frame_adjust_zone_bounds(low, &abase, &size))
			continue;

		if (size > MIN_ZONE_SIZE) {
			pfn_t pfn = ADDR2PFN(abase);
			size_t count = SIZE2FRAMES(size);

			if (low) {
				zone_create(pfn, count, max(MINCONF, pfn),
				    ZONE_AVAILABLE | ZONE_LOWMEM);
			} else {
				pfn_t conf = zone_external_conf_alloc(count);
				if (conf != 0)
					zone_create(pfn, count, conf,
					    ZONE_AVAILABLE | ZONE_HIGHMEM);
			}
		}
	}
}
Esempio n. 5
0
static slab_cache_t * slab_cache_alloc()
{
    slab_t *slab;
    void *obj;
    u32_t *p;

    DBG("%s\n", __FUNCTION__);

    if (list_empty(&slab_cache_cache.partial_slabs))
    {
//    spinlock_unlock(&cache->slablock);
//    slab = slab_create();

        void *data;
        unsigned int i;

        data = (void*)(PA2KA(alloc_page()));
        if (!data) {
            return NULL;
        }

        slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));

    /* Fill in slab structures */
        frame_set_parent(ADDR2PFN(KA2PA(data)), slab);

        slab->start = data;
        slab->available = slab_cache_cache.objects;
        slab->nextavail = (void*)data;
        slab->cache = &slab_cache_cache;

        for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++)
        {
            *p = (u32_t)p+slab_cache_cache.size;
            p = (u32_t*)((u32_t)p+slab_cache_cache.size);
        };


        atomic_inc(&slab_cache_cache.allocated_slabs);
//    spinlock_lock(&cache->slablock);
    }
    else {
        slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
        list_remove(&slab->link);
    }
    obj = slab->nextavail;
    slab->nextavail = *((void**)obj);
    slab->available--;

    if (!slab->available)
        list_prepend(&slab->link, &slab_cache_cache.full_slabs);
    else
        list_prepend(&slab->link, &slab_cache_cache.partial_slabs);

//  spinlock_unlock(&cache->slablock);

    return (slab_cache_t*)obj;
}
Esempio n. 6
0
void tlb_prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d,
    bool c, uintptr_t addr)
{
	lo->value = 0;
	lo->g = g;
	lo->v = v;
	lo->d = d;
	lo->c = c ? PAGE_CACHEABLE_EXC_WRITE : PAGE_UNCACHED;
	lo->pfn = ADDR2PFN(addr);
}
Esempio n. 7
0
void frame_low_arch_init(void)
{
	if (config.cpu_active > 1)
		return;
	
	frame_common_arch_init(true);
	
	/*
	 * Blacklist ROM regions.
	 */
	frame_mark_unavailable(ADDR2PFN(ROM_BASE),
	    SIZE2FRAMES(ROM_SIZE));

	frame_mark_unavailable(ADDR2PFN(KERNEL_RESERVED_AREA_BASE),
	    SIZE2FRAMES(KERNEL_RESERVED_AREA_SIZE));

	/* PA2KA will work only on low-memory. */
	end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE;
}
Esempio n. 8
0
/** Create memory zones according to information stored in memmap.
 *
 * Walk the memory map and create frame zones according to it.
 */
static void frame_common_arch_init(bool low)
{
	unsigned int i;
	
	for (i = 0; i < memmap.cnt; i++) {
		uintptr_t base;
		size_t size;

		/*
		 * The memmap is created by HelenOS boot loader.
		 * It already contains no holes.
		 */

		/* To be safe, make the available zone possibly smaller */
		base = ALIGN_UP((uintptr_t) memmap.zones[i].start, FRAME_SIZE);
		size = ALIGN_DOWN(memmap.zones[i].size -
		    (base - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE);
		
		if (!frame_adjust_zone_bounds(low, &base, &size))
			continue;
 
		pfn_t confdata;
		pfn_t pfn = ADDR2PFN(base);
		size_t count = SIZE2FRAMES(size);

		if (low) {
			confdata = pfn;
			if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0))))
				confdata = ADDR2PFN(KA2PA(PFN2ADDR(2)));
			
			zone_create(pfn, count, confdata,
			    ZONE_AVAILABLE | ZONE_LOWMEM);
		} else {
			confdata = zone_external_conf_alloc(count);
			if (confdata != 0)
				zone_create(pfn, count, confdata,
				    ZONE_AVAILABLE | ZONE_HIGHMEM);
		}
	}
}
Esempio n. 9
0
/** Free frames of physical memory.
 *
 * Find respective frame structures for supplied physical frames.
 * Decrement each frame reference count. If it drops to zero, mark
 * the frames as available.
 *
 * @param start Physical Address of the first frame to be freed.
 * @param count Number of frames to free.
 * @param flags Flags to control memory reservation.
 *
 */
void frame_free_generic(uintptr_t start, size_t count, frame_flags_t flags)
{
	size_t freed = 0;
	
	irq_spinlock_lock(&zones.lock, true);
	
	for (size_t i = 0; i < count; i++) {
		/*
		 * First, find host frame zone for addr.
		 */
		pfn_t pfn = ADDR2PFN(start) + i;
		size_t znum = find_zone(pfn, 1, 0);
		
		ASSERT(znum != (size_t) -1);
		
		freed += zone_frame_free(&zones.info[znum],
		    pfn - zones.info[znum].base);
	}
	
	irq_spinlock_unlock(&zones.lock, true);
	
	/*
	 * Signal that some memory has been freed.
	 * Since the mem_avail_mtx is an active mutex,
	 * we need to disable interruptsto prevent deadlock
	 * with TLB shootdown.
	 */
	
	ipl_t ipl = interrupts_disable();
	mutex_lock(&mem_avail_mtx);
	
	if (mem_avail_req > 0)
		mem_avail_req -= min(mem_avail_req, freed);
	
	if (mem_avail_req == 0) {
		mem_avail_gen++;
		condvar_broadcast(&mem_avail_cv);
	}
	
	mutex_unlock(&mem_avail_mtx);
	interrupts_restore(ipl);
	
	if (!(flags & FRAME_NO_RESERVE))
		reserve_free(freed);
}
Esempio n. 10
0
void frame_low_arch_init(void)
{
	if (config.cpu_active > 1)
		return;
	
	frame_common_arch_init(true);
	
	/*
	 * On sparc64, physical memory can start on a non-zero address.
	 * The generic frame_init() only marks PFN 0 as not free, so we
	 * must mark the physically first frame not free explicitly
	 * here, no matter what is its address.
	 */
	frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1);

	/* PA2KA will work only on low-memory. */
	end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE;
}
Esempio n. 11
0
/**
 * Allocate frames for slab space and initialize
 *
 */
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
{
    void *data;
    slab_t *slab;
    size_t fsize;
    unsigned int i;
    u32_t p;

    DBG("%s order %d\n", __FUNCTION__, cache->order);

    data = (void*)PA2KA(frame_alloc(1 << cache->order));
    if (!data) {
        return NULL;
    }
    slab = (slab_t*)slab_create();
    if (!slab) {
        frame_free(KA2PA(data));
        return NULL;
    }

  /* Fill in slab structures */
    for (i = 0; i < ((u32_t) 1 << cache->order); i++)
        frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);

    slab->start = data;
    slab->available = cache->objects;
    slab->nextavail = (void*)data;
    slab->cache = cache;

    for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++)
    {
        *(addr_t *)p = p+cache->size;
        p = p+cache->size;
    };
    atomic_inc(&cache->allocated_slabs);
    return slab;
}
Esempio n. 12
0
/** Allocate frames of physical memory.
 *
 * @param count      Number of continuous frames to allocate.
 * @param flags      Flags for host zone selection and address processing.
 * @param constraint Indication of physical address bits that cannot be
 *                   set in the address of the first allocated frame.
 * @param pzone      Preferred zone.
 *
 * @return Physical address of the allocated frame.
 *
 */
uintptr_t frame_alloc_generic(size_t count, frame_flags_t flags,
    uintptr_t constraint, size_t *pzone)
{
	ASSERT(count > 0);
	
	size_t hint = pzone ? (*pzone) : 0;
	pfn_t frame_constraint = ADDR2PFN(constraint);
	
	/*
	 * If not told otherwise, we must first reserve the memory.
	 */
	if (!(flags & FRAME_NO_RESERVE))
		reserve_force_alloc(count);
	
loop:
	irq_spinlock_lock(&zones.lock, true);
	
	/*
	 * First, find suitable frame zone.
	 */
	size_t znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags),
	    frame_constraint, hint);
	
	/*
	 * If no memory, reclaim some slab memory,
	 * if it does not help, reclaim all.
	 */
	if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) {
		irq_spinlock_unlock(&zones.lock, true);
		size_t freed = slab_reclaim(0);
		irq_spinlock_lock(&zones.lock, true);
		
		if (freed > 0)
			znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags),
			    frame_constraint, hint);
		
		if (znum == (size_t) -1) {
			irq_spinlock_unlock(&zones.lock, true);
			freed = slab_reclaim(SLAB_RECLAIM_ALL);
			irq_spinlock_lock(&zones.lock, true);
			
			if (freed > 0)
				znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags),
				    frame_constraint, hint);
		}
	}
	
	if (znum == (size_t) -1) {
		if (flags & FRAME_ATOMIC) {
			irq_spinlock_unlock(&zones.lock, true);
			
			if (!(flags & FRAME_NO_RESERVE))
				reserve_free(count);
			
			return 0;
		}
		
		size_t avail = frame_total_free_get_internal();
		
		irq_spinlock_unlock(&zones.lock, true);
		
		if (!THREAD)
			panic("Cannot wait for %zu frames to become available "
			    "(%zu available).", count, avail);
		
		/*
		 * Sleep until some frames are available again.
		 */
		
#ifdef CONFIG_DEBUG
		log(LF_OTHER, LVL_DEBUG,
		    "Thread %" PRIu64 " waiting for %zu frames "
		    "%zu available.", THREAD->tid, count, avail);
#endif
		
		/*
		 * Since the mem_avail_mtx is an active mutex, we need to
		 * disable interrupts to prevent deadlock with TLB shootdown.
		 */
		ipl_t ipl = interrupts_disable();
		mutex_lock(&mem_avail_mtx);
		
		if (mem_avail_req > 0)
			mem_avail_req = min(mem_avail_req, count);
		else
			mem_avail_req = count;
		
		size_t gen = mem_avail_gen;
		
		while (gen == mem_avail_gen)
			condvar_wait(&mem_avail_cv, &mem_avail_mtx);
		
		mutex_unlock(&mem_avail_mtx);
		interrupts_restore(ipl);
		
#ifdef CONFIG_DEBUG
		log(LF_OTHER, LVL_DEBUG, "Thread %" PRIu64 " woken up.",
		    THREAD->tid);
#endif
		
		goto loop;
	}
	
	pfn_t pfn = zone_frame_alloc(&zones.info[znum], count,
	    frame_constraint) + zones.info[znum].base;
	
	irq_spinlock_unlock(&zones.lock, true);
	
	if (pzone)
		*pzone = znum;
	
	return PFN2ADDR(pfn);
}
Esempio n. 13
0
/** Create and add zone to system.
 *
 * @param start     First frame number (absolute).
 * @param count     Size of zone in frames.
 * @param confframe Where configuration frames are supposed to be.
 *                  Automatically checks that we will not disturb the
 *                  kernel and possibly init. If confframe is given
 *                  _outside_ this zone, it is expected, that the area is
 *                  already marked BUSY and big enough to contain
 *                  zone_conf_size() amount of data. If the confframe is
 *                  inside the area, the zone free frame information is
 *                  modified not to include it.
 *
 * @return Zone number or -1 on error.
 *
 */
size_t zone_create(pfn_t start, size_t count, pfn_t confframe,
    zone_flags_t flags)
{
	irq_spinlock_lock(&zones.lock, true);
	
	if (flags & ZONE_AVAILABLE) {  /* Create available zone */
		/*
		 * Theoretically we could have NULL here, practically make sure
		 * nobody tries to do that. If some platform requires, remove
		 * the assert
		 */
		ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL));
		
		/* Update the known end of physical memory. */
		config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count));
		
		/*
		 * If confframe is supposed to be inside our zone, then make sure
		 * it does not span kernel & init
		 */
		size_t confcount = SIZE2FRAMES(zone_conf_size(count));
		
		if ((confframe >= start) && (confframe < start + count)) {
			for (; confframe < start + count; confframe++) {
				uintptr_t addr = PFN2ADDR(confframe);
				if (overlaps(addr, PFN2ADDR(confcount),
				    KA2PA(config.base), config.kernel_size))
					continue;
				
				if (overlaps(addr, PFN2ADDR(confcount),
				    KA2PA(config.stack_base), config.stack_size))
					continue;
				
				bool overlap = false;
				for (size_t i = 0; i < init.cnt; i++) {
					if (overlaps(addr, PFN2ADDR(confcount),
					    init.tasks[i].paddr,
					    init.tasks[i].size)) {
						overlap = true;
						break;
					}
				}
				
				if (overlap)
					continue;
				
				break;
			}
			
			if (confframe >= start + count)
				panic("Cannot find configuration data for zone.");
		}
		
		size_t znum = zones_insert_zone(start, count, flags);
		if (znum == (size_t) -1) {
			irq_spinlock_unlock(&zones.lock, true);
			return (size_t) -1;
		}
		
		void *confdata = (void *) PA2KA(PFN2ADDR(confframe));
		zone_construct(&zones.info[znum], start, count, flags, confdata);
		
		/* If confdata in zone, mark as unavailable */
		if ((confframe >= start) && (confframe < start + count)) {
			for (size_t i = confframe; i < confframe + confcount; i++)
				zone_mark_unavailable(&zones.info[znum],
				    i - zones.info[znum].base);
		}
		
		irq_spinlock_unlock(&zones.lock, true);
		
		return znum;
	}
	
	/* Non-available zone */
	size_t znum = zones_insert_zone(start, count, flags);
	if (znum == (size_t) -1) {
		irq_spinlock_unlock(&zones.lock, true);
		return (size_t) -1;
	}
	
	zone_construct(&zones.info[znum], start, count, flags, NULL);
	
	irq_spinlock_unlock(&zones.lock, true);
	
	return znum;
}
Esempio n. 14
0
/** Merge zones z1 and z2.
 *
 * The merged zones must be 2 zones with no zone existing in between
 * (which means that z2 = z1 + 1). Both zones must be available zones
 * with the same flags.
 *
 * When you create a new zone, the frame allocator configuration does
 * not to be 2^order size. Once the allocator is running it is no longer
 * possible, merged configuration data occupies more space :-/
 *
 */
bool zone_merge(size_t z1, size_t z2)
{
	irq_spinlock_lock(&zones.lock, true);
	
	bool ret = true;
	
	/*
	 * We can join only 2 zones with none existing inbetween,
	 * the zones have to be available and with the same
	 * set of flags
	 */
	if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) ||
	    (zones.info[z1].flags != zones.info[z2].flags)) {
		ret = false;
		goto errout;
	}
	
	pfn_t cframes = SIZE2FRAMES(zone_conf_size(
	    zones.info[z2].base - zones.info[z1].base
	    + zones.info[z2].count));
	
	/* Allocate merged zone data inside one of the zones */
	pfn_t pfn;
	if (zone_can_alloc(&zones.info[z1], cframes, 0)) {
		pfn = zones.info[z1].base +
		    zone_frame_alloc(&zones.info[z1], cframes, 0);
	} else if (zone_can_alloc(&zones.info[z2], cframes, 0)) {
		pfn = zones.info[z2].base +
		    zone_frame_alloc(&zones.info[z2], cframes, 0);
	} else {
		ret = false;
		goto errout;
	}
	
	/* Preserve original data from z1 */
	zone_t old_z1 = zones.info[z1];
	
	/* Do zone merging */
	zone_merge_internal(z1, z2, &old_z1, (void *) PA2KA(PFN2ADDR(pfn)));
	
	/* Subtract zone information from busy frames */
	zones.info[z1].busy_count -= cframes;
	
	/* Free old zone information */
	return_config_frames(z1,
	    ADDR2PFN(KA2PA((uintptr_t) old_z1.frames)), old_z1.count);
	return_config_frames(z1,
	    ADDR2PFN(KA2PA((uintptr_t) zones.info[z2].frames)),
	    zones.info[z2].count);
	
	/* Move zones down */
	for (size_t i = z2 + 1; i < zones.count; i++)
		zones.info[i - 1] = zones.info[i];
	
	zones.count--;
	
errout:
	irq_spinlock_unlock(&zones.lock, true);
	
	return ret;
}
Esempio n. 15
0
/** Map object to slab structure */
static slab_t * obj2slab(void *obj)
{
  return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
}