Exemple #1
0
static void free_object(struct kmem_cache *cache, void *object)
{
    struct slab_page *slab = VIRT_TO_PAGE(object);
    if (slab->cache != cache)
        panic("[slab] - Fatal error, free object in wrong cache.");

    slab->free[slab->avail++] = object;

    if (slab->avail == 1 && slab->avail < slab->limit)
    {
        /* Move from full slab list into partial slab list. */
        slab_list_remove(slab);
        slab_list_insert(&cache->partial_slab, slab);
    }
    else if (slab->avail == slab->limit)
    {
        /* Move from partial slab list into free slab list. */
        slab_list_remove(slab);
        slab_list_insert(&cache->free_slab, slab);
    }
}
Exemple #2
0
static inline void * alloc_object_from_slab(struct slab_page *slab)
{
    void *object = NULL;
    if (slab->avail == 0)
        panic("[slab] - Fatal error, alloc object from full slab.");

    object = slab->free[--slab->avail];

    if (slab->avail == 0)
    {
        /* Move from partial slab list into full slab list. */
        slab_list_remove(slab);
        slab_list_insert(&slab->cache->full_slab, slab);
    }
    else if (slab->avail + 1 == slab->limit)
    {
        /* Move from free slab list into partial slab list. */
        slab_list_remove(slab);
        slab_list_insert(&slab->cache->partial_slab, slab);
    }

    return object;
}
Exemple #3
0
static inline void init_slab_page(struct kmem_cache *cache,
                                  struct slab_page *slab)
{
    size_t payload = PAGE_SIZE - (sizeof(*slab) - sizeof(void *));

    /* Calculate capacity */
    slab->limit = payload / (cache->size + sizeof(void *));
    slab->object_base = (char *)slab + (PAGE_SIZE - (slab->limit * cache->size));
    slab->cache = cache;
    slab->avail = 0;

    /* Store all available objects address */
    for (size_t i = 0; i < slab->limit; ++i)
        slab->free[slab->avail++] = slab->object_base + i * cache->size;

    /* Insert into free_slab list */
    slab_list_insert(&cache->free_slab, slab);
}
Exemple #4
0
slab_t* slab_create(slab_cache_t* cache)
{
	const size_t size = SLAB_SZ;

	slab_t* slab = slab_depot_alloc(cache->bufsize);

	if (knot_unlikely(slab == 0)) {
		dbg_mem("%s: failed to allocate aligned memory block\n",
		          __func__);
		return 0;
	}

	/* Initialize slab. */
	slab->magic = SLAB_MAGIC;
	slab->cache = cache;
	slab_list_insert(&cache->slabs_free, slab);
#ifdef MEM_SLAB_CAP
	++cache->empty;
#endif

	/* Already initialized? */
	if (slab->bufsize == cache->bufsize) {
		return slab;
	} else {
		slab->bufsize = cache->bufsize;
	}

	/* Ensure the item size can hold at least a size of ptr. */
	size_t item_size = slab->bufsize;
	if (knot_unlikely(item_size < SLAB_MIN_BUFLEN)) {
		item_size = SLAB_MIN_BUFLEN;
	}

	/* Ensure at least some space for coloring */
	size_t data_size = size - sizeof(slab_t);
#ifdef MEM_COLORING
	size_t free_space = data_size % item_size;
	if (knot_unlikely(free_space < SLAB_MINCOLOR)) {
		free_space = SLAB_MINCOLOR;
	}


	/// unsigned short color = __sync_fetch_and_add(&cache->color, 1);
	unsigned short color = (cache->color += sizeof(void*));
	color = color % free_space;
#else
	const unsigned short color = 0;
#endif

	/* Calculate useable data size */
	data_size -= color;
	slab->bufs_count = data_size / item_size;
	slab->bufs_free = slab->bufs_count;

	// Save first item as next free
	slab->base = (char*)slab + sizeof(slab_t) + color;
	slab->head = (void**)slab->base;

	// Create freelist, skip last member, which is set to NULL
	char* item = (char*)slab->head;
	for(unsigned i = 0; i < slab->bufs_count - 1; ++i) {
		*((void**)item) = item + item_size;
		item += item_size;
	}

	// Set last buf to NULL (tail)
	*((void**)item) = (void*)0;

	// Ensure the last item has a NULL next
	dbg_mem("%s: created slab (%p, %p) (%zu B)\n",
	          __func__, slab, slab + size, size);
	return slab;
}
Exemple #5
0
/*! \brief Move slab from one linked list to another. */
static inline void slab_list_move(slab_t** target, slab_t* slab)
{
	slab_list_remove(slab);
	slab_list_insert(target, slab);
}