Exemple #1
0
/*
 * Remove block from freelist. Index 'slindex' identifies the freelist.
 */
static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
                         struct block_header *block, u32 slindex)
{
    u32 flindex;
    struct block_header *tmpblock;

    if (pool->freelist[slindex].page == page
            && pool->freelist[slindex].offset == offset) {
        remove_block_head(pool, block, slindex);
        return;
    }

    flindex = slindex / BITS_PER_LONG;

    if (block->link.prev_page) {
        tmpblock = get_ptr_atomic(block->link.prev_page,
                                  block->link.prev_offset, KM_USER1);
        tmpblock->link.next_page = block->link.next_page;
        tmpblock->link.next_offset = block->link.next_offset;
        put_ptr_atomic(tmpblock, KM_USER1);
    }

    if (block->link.next_page) {
        tmpblock = get_ptr_atomic(block->link.next_page,
                                  block->link.next_offset, KM_USER1);
        tmpblock->link.prev_page = block->link.prev_page;
        tmpblock->link.prev_offset = block->link.prev_offset;
        put_ptr_atomic(tmpblock, KM_USER1);
    }
}
/*
 * Remove block from freelist. Index 'slindex' identifies the freelist.
 */
static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
			struct block_header *block, u32 slindex)
{
	u32 flindex = slindex / BITS_PER_LONG;
	struct block_header *tmpblock;

	if (block->link.prev_page) {
		tmpblock = get_ptr_atomic(block->link.prev_page,
				block->link.prev_offset, KM_USER1);
		tmpblock->link.next_page = block->link.next_page;
		tmpblock->link.next_offset = block->link.next_offset;
		put_ptr_atomic(tmpblock, KM_USER1);
	}

	if (block->link.next_page) {
		tmpblock = get_ptr_atomic(block->link.next_page,
				block->link.next_offset, KM_USER1);
		tmpblock->link.prev_page = block->link.prev_page;
		tmpblock->link.prev_offset = block->link.prev_offset;
		put_ptr_atomic(tmpblock, KM_USER1);
	}

	/* Is this block is at the head of the freelist? */
	if (pool->freelist[slindex].page == page
	   && pool->freelist[slindex].offset == offset) {

		pool->freelist[slindex].page = block->link.next_page;
		pool->freelist[slindex].offset = block->link.next_offset;

		if (pool->freelist[slindex].page) {
			struct block_header *tmpblock;
			tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
					pool->freelist[slindex].offset,
					KM_USER1);
			tmpblock->link.prev_page = NULL;
			tmpblock->link.prev_offset = 0;
			put_ptr_atomic(tmpblock, KM_USER1);
		} else {
			/* This freelist bucket is empty */
			__clear_bit(slindex % BITS_PER_LONG,
				    &pool->slbitmap[flindex]);
			if (!pool->slbitmap[flindex])
				__clear_bit(flindex, &pool->flbitmap);
		}
	}

	block->link.prev_page = NULL;
	block->link.prev_offset = 0;
	block->link.next_page = NULL;
	block->link.next_offset = 0;
}
/*
 * Allocate a page and add it to freelist of given pool.
 */
static int grow_pool(struct xv_pool *pool, gfp_t flags)
{
	struct page *page;
	struct block_header *block;

	page = alloc_page(flags);
	if (unlikely(!page))
		return -ENOMEM;

	stat_inc(&pool->total_pages);

	spin_lock(&pool->lock);
	block = get_ptr_atomic(page, 0);

	block->size = PAGE_SIZE - XV_ALIGN;
	set_flag(block, BLOCK_FREE);
	clear_flag(block, PREV_FREE);
	set_blockprev(block, 0);

	insert_block(pool, page, 0, block);

	put_ptr_atomic(block);
	spin_unlock(&pool->lock);

	return 0;
}
/*
 * Insert block at <page, offset> in freelist of given pool.
 * freelist used depends on block size.
 */
static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
			struct block_header *block)
{
	u32 flindex, slindex;
	struct block_header *nextblock;

	slindex = get_index_for_insert(block->size);
	flindex = slindex / BITS_PER_LONG;

	block->link.prev_page = NULL;
	block->link.prev_offset = 0;
	block->link.next_page = pool->freelist[slindex].page;
	block->link.next_offset = pool->freelist[slindex].offset;
	pool->freelist[slindex].page = page;
	pool->freelist[slindex].offset = offset;

	if (block->link.next_page) {
		nextblock = get_ptr_atomic(block->link.next_page,
					block->link.next_offset);
		nextblock->link.prev_page = page;
		nextblock->link.prev_offset = offset;
		put_ptr_atomic(nextblock);
		/* If there was a next page then the free bits are set. */
		return;
	}

	__set_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
	__set_bit(flindex, &pool->flbitmap);
}
Exemple #5
0
/*
 * Remove block from head of freelist. Index 'slindex' identifies the freelist.
 */
static void remove_block_head(struct xv_pool *pool,
                              struct block_header *block, u32 slindex)
{
    struct block_header *tmpblock;
    u32 flindex = slindex / BITS_PER_LONG;

    pool->freelist[slindex].page = block->link.next_page;
    pool->freelist[slindex].offset = block->link.next_offset;
    block->link.prev_page = 0;
    block->link.prev_offset = 0;

    if (!pool->freelist[slindex].page) {
        __clear_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
        if (!pool->slbitmap[flindex])
            __clear_bit(flindex, &pool->flbitmap);
    } else {
        /*
         * DEBUG ONLY: We need not reinitialize freelist head previous
         * pointer to 0 - we never depend on its value. But just for
         * sanity, lets do it.
         */
        tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
                                  pool->freelist[slindex].offset, KM_USER1);
        tmpblock->link.prev_page = 0;
        tmpblock->link.prev_offset = 0;
        put_ptr_atomic(tmpblock, KM_USER1);
    }
}
Exemple #6
0
/*
 * Insert block at <pagenum, offset> in freelist of given pool.
 * freelist used depends on block size.
 */
static void insert_block(struct xv_pool *pool, u32 pagenum, u32 offset,
			struct block_header *block)
{
	u32 flindex, slindex;
	struct block_header *nextblock;

	slindex = get_index_for_insert(block->size);
	flindex = slindex / BITS_PER_LONG;

	block->link.prev_pagenum = 0;
	block->link.prev_offset = 0;
	block->link.next_pagenum = pool->freelist[slindex].pagenum;
	block->link.next_offset = pool->freelist[slindex].offset;
	pool->freelist[slindex].pagenum = pagenum;
	pool->freelist[slindex].offset = offset;

	if (block->link.next_pagenum) {
		nextblock = get_ptr_atomic(block->link.next_pagenum,
					block->link.next_offset, KM_USER1);
		nextblock->link.prev_pagenum = pagenum;
		nextblock->link.prev_offset = offset;
		put_ptr_atomic(nextblock, KM_USER1);
	}

	__set_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
	__set_bit(flindex, &pool->flbitmap);
}
/*
 * Free block identified with <page, offset>
 */
void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
{
	void *page_start;
	struct block_header *block, *tmpblock;

	offset -= XV_ALIGN;

	spin_lock(&pool->lock);

	page_start = get_ptr_atomic(page, 0);
	block = (struct block_header *)((char *)page_start + offset);

	/* Catch double free bugs */
	BUG_ON(test_flag(block, BLOCK_FREE));

	block->size = ALIGN(block->size, XV_ALIGN);

	tmpblock = BLOCK_NEXT(block);
	if (offset + block->size + XV_ALIGN == PAGE_SIZE)
		tmpblock = NULL;

	/* Merge next block if its free */
	if (tmpblock && test_flag(tmpblock, BLOCK_FREE)) {
		/*
		 * Blocks smaller than XV_MIN_ALLOC_SIZE
		 * are not inserted in any free list.
		 */
		if (tmpblock->size >= XV_MIN_ALLOC_SIZE) {
			remove_block(pool, page,
				    offset + block->size + XV_ALIGN, tmpblock,
				    get_index_for_insert(tmpblock->size));
		}
		block->size += tmpblock->size + XV_ALIGN;
	}

	/* Merge previous block if its free */
	if (test_flag(block, PREV_FREE)) {
		tmpblock = (struct block_header *)((char *)(page_start) +
						get_blockprev(block));
		offset = offset - tmpblock->size - XV_ALIGN;

		if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
			remove_block(pool, page, offset, tmpblock,
				    get_index_for_insert(tmpblock->size));

		tmpblock->size += block->size + XV_ALIGN;
		block = tmpblock;
	}

	/* No used objects in this page. Free it. */
	if (block->size == PAGE_SIZE - XV_ALIGN) {
		put_ptr_atomic(page_start);
		spin_unlock(&pool->lock);

		__free_page(page);
		stat_dec(&pool->total_pages);
		return;
	}

	set_flag(block, BLOCK_FREE);
	if (block->size >= XV_MIN_ALLOC_SIZE)
		insert_block(pool, page, offset, block);

	if (offset + block->size + XV_ALIGN != PAGE_SIZE) {
		tmpblock = BLOCK_NEXT(block);
		set_flag(tmpblock, PREV_FREE);
		set_blockprev(tmpblock, offset);
	}

	put_ptr_atomic(page_start);
	spin_unlock(&pool->lock);
}
/**
 * xv_malloc - Allocate block of given size from pool.
 * @pool: pool to allocate from
 * @size: size of block to allocate
 * @page: page no. that holds the object
 * @offset: location of object within page
 *
 * On success, <page, offset> identifies block allocated
 * and 0 is returned. On failure, <page, offset> is set to
 * 0 and -ENOMEM is returned.
 *
 * Allocation requests with size > XV_MAX_ALLOC_SIZE will fail.
 */
int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
		u32 *offset, gfp_t flags)
{
	int error;
	u32 index, tmpsize, origsize, tmpoffset;
	struct block_header *block, *tmpblock;

	*page = NULL;
	*offset = 0;
	origsize = size;

	if (unlikely(!size || size > XV_MAX_ALLOC_SIZE))
		return -ENOMEM;

	size = ALIGN(size, XV_ALIGN);

	spin_lock(&pool->lock);

	index = find_block(pool, size, page, offset);

	if (!*page) {
		spin_unlock(&pool->lock);
		if (flags & GFP_NOWAIT)
			return -ENOMEM;
		error = grow_pool(pool, flags);
		if (unlikely(error))
			return error;

		spin_lock(&pool->lock);
		index = find_block(pool, size, page, offset);
	}

	if (!*page) {
		spin_unlock(&pool->lock);
		return -ENOMEM;
	}

	block = get_ptr_atomic(*page, *offset);

	remove_block(pool, *page, *offset, block, index);

	/* Split the block if required */
	tmpoffset = *offset + size + XV_ALIGN;
	tmpsize = block->size - size;
	tmpblock = (struct block_header *)((char *)block + size + XV_ALIGN);
	if (tmpsize) {
		tmpblock->size = tmpsize - XV_ALIGN;
		set_flag(tmpblock, BLOCK_FREE);
		clear_flag(tmpblock, PREV_FREE);

		set_blockprev(tmpblock, *offset);
		if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
			insert_block(pool, *page, tmpoffset, tmpblock);

		if (tmpoffset + XV_ALIGN + tmpblock->size != PAGE_SIZE) {
			tmpblock = BLOCK_NEXT(tmpblock);
			set_blockprev(tmpblock, tmpoffset);
		}
	} else {
		/* This block is exact fit */
		if (tmpoffset != PAGE_SIZE)
			clear_flag(tmpblock, PREV_FREE);
	}

	block->size = origsize;
	clear_flag(block, BLOCK_FREE);

	put_ptr_atomic(block);
	spin_unlock(&pool->lock);

	*offset += XV_ALIGN;

	return 0;
}