Exemple #1
0
void kfree(void* address)
{
	kdebug("< kfree %d", address);

	// start of block
	malloc_block_t* block = address - (uint32_t)&(((malloc_block_t*)0)->data);
	check_block(block);
	
	int size = block->size;
	int flags = block->flags & ~MALLOC_USED;

	// if next block is free, merge with it
	if ((block->flags&MALLOC_LAST)==0) {
		malloc_block_t* next = BLOCK_NEXT(block);
		if ((next->flags&MALLOC_USED)==0) {
			size += next->size;
			flags |= (next->flags&MALLOC_LAST);
		}
	}
	// if previous block is free, merge with it
	if ((block->flags&MALLOC_FIRST)==0) {
		malloc_block_t* prev = BLOCK_PREVIOUS(block);
		if ((prev->flags&MALLOC_USED)==0) {
			size += prev->size;
			flags |= (prev->flags&MALLOC_FIRST);
			block = prev;
		}
	}

	setup_block(block, flags, size);
	kmalloc_print();
	kdebug(">");
}
Exemple #2
0
void* kmalloc(size_t payload)
{
	malloc_block_t* block;
	malloc_block_t* ptr;

	kdebug("< kmalloc %d", payload);
	
	block = 0;
	ptr = first_block;
	while (1) {
		check_block(ptr);
		// free and big enough ?
		if (((ptr->flags&MALLOC_USED)==0) && (PAYLOAD_SIZE(ptr)>=payload)) {
			// better than the current fit (if any) ?
			if ((block==0) || (ptr->size<block->size)) {
				block = ptr;
			}
		}
		if (ptr->flags&MALLOC_LAST) {
			break;
		}
		ptr = BLOCK_NEXT(ptr);
	}

	if (ptr==0) {
		kpanic("could not allocate %d", payload);
		return 0;
	}

	int split_block = (block->size > (payload+BLOCK_OVERHEAD));
	if (split_block) {
		int last = block->flags&MALLOC_LAST;
		size_t size     = payload + BLOCK_OVERHEAD;
		size_t rem_size = block->size - size;
		setup_block(block, (block->flags&~MALLOC_LAST)|MALLOC_USED, size);
		setup_block(BLOCK_NEXT(block), last, rem_size);
	} else {
		block->flags |= MALLOC_USED;
	}
	kmalloc_print();
	kdebug(">");
	return (void*)block->data;
}
Exemple #3
0
void kmalloc_print()
{
	malloc_block_t* ptr = first_block;
	while (1) {
		kdebug("  @%d, size=%d, flags=%b", ptr, ptr->size, ptr->flags);
		if ((ptr->flags&MALLOC_LAST)!=0) {
			break;
		}
		ptr = BLOCK_NEXT(ptr);
	}
}
Exemple #4
0
void check_block(malloc_block_t* block)
{
	if (  (block->magic!=MALLOC_MAGIC)
		| (BLOCK_END(block)->magic!=MALLOC_MAGIC)
		| (BLOCK_END(block)->backlink!=block))
	{
		kpanic("corrupted malloc data");
	}
	if ((block->flags&MALLOC_LAST)==0) {
		malloc_block_t* next = BLOCK_NEXT(block);
		if ((next->magic!=MALLOC_MAGIC) || (next->flags&MALLOC_FIRST)) {
			kpanic("corrupted malloc chain");
		}
	}
	if ((block->flags&MALLOC_FIRST)==0) {
		malloc_block_t* prev = BLOCK_PREVIOUS(block);
		if ((prev->magic!=MALLOC_MAGIC) || (prev->flags&MALLOC_LAST)) {
			kpanic("corrupted malloc chain");
		}
	}
}
/*
 * Free block identified with <page, offset>
 */
void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
{
	void *page_start;
	struct block_header *block, *tmpblock;

	offset -= XV_ALIGN;

	spin_lock(&pool->lock);

	page_start = get_ptr_atomic(page, 0);
	block = (struct block_header *)((char *)page_start + offset);

	/* Catch double free bugs */
	BUG_ON(test_flag(block, BLOCK_FREE));

	block->size = ALIGN(block->size, XV_ALIGN);

	tmpblock = BLOCK_NEXT(block);
	if (offset + block->size + XV_ALIGN == PAGE_SIZE)
		tmpblock = NULL;

	/* Merge next block if its free */
	if (tmpblock && test_flag(tmpblock, BLOCK_FREE)) {
		/*
		 * Blocks smaller than XV_MIN_ALLOC_SIZE
		 * are not inserted in any free list.
		 */
		if (tmpblock->size >= XV_MIN_ALLOC_SIZE) {
			remove_block(pool, page,
				    offset + block->size + XV_ALIGN, tmpblock,
				    get_index_for_insert(tmpblock->size));
		}
		block->size += tmpblock->size + XV_ALIGN;
	}

	/* Merge previous block if its free */
	if (test_flag(block, PREV_FREE)) {
		tmpblock = (struct block_header *)((char *)(page_start) +
						get_blockprev(block));
		offset = offset - tmpblock->size - XV_ALIGN;

		if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
			remove_block(pool, page, offset, tmpblock,
				    get_index_for_insert(tmpblock->size));

		tmpblock->size += block->size + XV_ALIGN;
		block = tmpblock;
	}

	/* No used objects in this page. Free it. */
	if (block->size == PAGE_SIZE - XV_ALIGN) {
		put_ptr_atomic(page_start);
		spin_unlock(&pool->lock);

		__free_page(page);
		stat_dec(&pool->total_pages);
		return;
	}

	set_flag(block, BLOCK_FREE);
	if (block->size >= XV_MIN_ALLOC_SIZE)
		insert_block(pool, page, offset, block);

	if (offset + block->size + XV_ALIGN != PAGE_SIZE) {
		tmpblock = BLOCK_NEXT(block);
		set_flag(tmpblock, PREV_FREE);
		set_blockprev(tmpblock, offset);
	}

	put_ptr_atomic(page_start);
	spin_unlock(&pool->lock);
}
/**
 * xv_malloc - Allocate block of given size from pool.
 * @pool: pool to allocate from
 * @size: size of block to allocate
 * @page: page no. that holds the object
 * @offset: location of object within page
 *
 * On success, <page, offset> identifies block allocated
 * and 0 is returned. On failure, <page, offset> is set to
 * 0 and -ENOMEM is returned.
 *
 * Allocation requests with size > XV_MAX_ALLOC_SIZE will fail.
 */
int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
		u32 *offset, gfp_t flags)
{
	int error;
	u32 index, tmpsize, origsize, tmpoffset;
	struct block_header *block, *tmpblock;

	*page = NULL;
	*offset = 0;
	origsize = size;

	if (unlikely(!size || size > XV_MAX_ALLOC_SIZE))
		return -ENOMEM;

	size = ALIGN(size, XV_ALIGN);

	spin_lock(&pool->lock);

	index = find_block(pool, size, page, offset);

	if (!*page) {
		spin_unlock(&pool->lock);
		if (flags & GFP_NOWAIT)
			return -ENOMEM;
		error = grow_pool(pool, flags);
		if (unlikely(error))
			return error;

		spin_lock(&pool->lock);
		index = find_block(pool, size, page, offset);
	}

	if (!*page) {
		spin_unlock(&pool->lock);
		return -ENOMEM;
	}

	block = get_ptr_atomic(*page, *offset);

	remove_block(pool, *page, *offset, block, index);

	/* Split the block if required */
	tmpoffset = *offset + size + XV_ALIGN;
	tmpsize = block->size - size;
	tmpblock = (struct block_header *)((char *)block + size + XV_ALIGN);
	if (tmpsize) {
		tmpblock->size = tmpsize - XV_ALIGN;
		set_flag(tmpblock, BLOCK_FREE);
		clear_flag(tmpblock, PREV_FREE);

		set_blockprev(tmpblock, *offset);
		if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
			insert_block(pool, *page, tmpoffset, tmpblock);

		if (tmpoffset + XV_ALIGN + tmpblock->size != PAGE_SIZE) {
			tmpblock = BLOCK_NEXT(tmpblock);
			set_blockprev(tmpblock, tmpoffset);
		}
	} else {
		/* This block is exact fit */
		if (tmpoffset != PAGE_SIZE)
			clear_flag(tmpblock, PREV_FREE);
	}

	block->size = origsize;
	clear_flag(block, BLOCK_FREE);

	put_ptr_atomic(block);
	spin_unlock(&pool->lock);

	*offset += XV_ALIGN;

	return 0;
}