Exemple #1
0
void* slab_alloc(slab_t* slab)
{
	// Fetch first free item
	void **item = 0;
	{
		if((item = slab->head)) {
			slab->head = (void**)*item;
			--slab->bufs_free;
		} else {
			// No more free items
			return 0;
		}
	}

#ifdef MEM_DEBUG
	// Increment statistics
	__sync_add_and_fetch(&slab->cache->stat_allocs, 1);
#endif

	// Move to full?
	if (knot_unlikely(slab->bufs_free == 0)) {
		slab_list_move(&slab->cache->slabs_full, slab);
	} else {
#ifdef MEM_SLAB_CAP
		// Mark not empty?
		if (knot_unlikely(slab->bufs_free == slab->bufs_count - 1)) {
			--slab->cache->empty;
		}
#endif
	}

	return item;
}
Exemple #2
0
void slab_free(void* ptr)
{
	// Null pointer check
	if (knot_unlikely(!ptr)) {
		return;
	}

	// Get slab start address
	slab_t* slab = slab_from_ptr(ptr);
	assert(slab);

	// Check if it exists in directory
	if (slab->magic == SLAB_MAGIC) {

		// Return buf to slab
		*((void**)ptr) = (void*)slab->head;
		slab->head = (void**)ptr;
		++slab->bufs_free;

#ifdef MEM_DEBUG
		// Increment statistics
		__sync_add_and_fetch(&slab->cache->stat_frees, 1);
#endif

		// Return to partial
		if(knot_unlikely(slab->bufs_free == 1)) {
			slab_list_move(&slab->cache->slabs_free, slab);
		} else {
#ifdef MEM_SLAB_CAP
		// Recycle if empty
			if(knot_unlikely(slab_isempty(slab))) {
				if(slab->cache->empty == MEM_SLAB_CAP) {
					slab_destroy(&slab);
				} else {
					++slab->cache->empty;
				}
			}
#endif
		}

	} else {

		// Pointer is not a slab
		// Presuming it's a large block
		slab_obj_t* bs = (slab_obj_t*)ptr - 1;

#ifdef MEM_POISON
		// Remove memory barrier
		mprotect(ptr + bs->size, sizeof(int), PROT_READ|PROT_WRITE);
#endif

		// Unmap
		dbg_mem("%s: unmapping large block of %zu bytes at %p\n",
		          __func__, bs->size, ptr);
		free(bs);
	}
}
Exemple #3
0
uint8_t log_levels(int facility, logsrc_t src)
{
	// Check facility
	if (knot_unlikely(!LOG_FCL_SIZE || facility >= LOG_FCL_SIZE)) {
		return 0;
	}

	return *(LOG_FCL + (facility << LOG_SRC_BITS) + src);
}
Exemple #4
0
int slab_cache_init(slab_cache_t* cache, size_t bufsize)
{
	if (knot_unlikely(!bufsize)) {
		return -1;
	}

	memset(cache, 0, sizeof(slab_cache_t));
	cache->bufsize = bufsize;
	dbg_mem("%s: created cache of size %zu\n",
	          __func__, bufsize);

	return 0;
}
Exemple #5
0
int log_open_file(const char* filename)
{
	// Check facility
	if (knot_unlikely(!LOG_FCL_SIZE || LOGT_FILE + LOG_FDS_OPEN >= LOG_FCL_SIZE)) {
		return KNOT_ERROR;
	}

	// Open file
	LOG_FDS[LOG_FDS_OPEN] = fopen(filename, "a");
	if (!LOG_FDS[LOG_FDS_OPEN]) {
		return KNOT_EINVAL;
	}

	// Disable buffering
	setvbuf(LOG_FDS[LOG_FDS_OPEN], (char *)0, _IONBF, 0);

	return LOGT_FILE + LOG_FDS_OPEN++;
}
Exemple #6
0
int log_levels_set(int facility, logsrc_t src, uint8_t levels)
{
	// Check facility
	if (knot_unlikely(!LOG_FCL_SIZE || facility >= LOG_FCL_SIZE)) {
		return KNOT_EINVAL;
	}

	// Get facility pointer from offset
	uint8_t *lp = LOG_FCL + (facility << LOG_SRC_BITS);

	// Assign level if not multimask
	if (src != LOG_ANY) {
		*(lp + src) = levels;
	} else {
		// Any == set to all sources
		for (int i = 0; i <= LOG_ANY; ++i) {
			*(lp + i) = levels;
		}
	}

	return KNOT_EOK;
}
Exemple #7
0
slab_t* slab_create(slab_cache_t* cache)
{
	const size_t size = SLAB_SZ;

	slab_t* slab = slab_depot_alloc(cache->bufsize);

	if (knot_unlikely(slab == 0)) {
		dbg_mem("%s: failed to allocate aligned memory block\n",
		          __func__);
		return 0;
	}

	/* Initialize slab. */
	slab->magic = SLAB_MAGIC;
	slab->cache = cache;
	slab_list_insert(&cache->slabs_free, slab);
#ifdef MEM_SLAB_CAP
	++cache->empty;
#endif

	/* Already initialized? */
	if (slab->bufsize == cache->bufsize) {
		return slab;
	} else {
		slab->bufsize = cache->bufsize;
	}

	/* Ensure the item size can hold at least a size of ptr. */
	size_t item_size = slab->bufsize;
	if (knot_unlikely(item_size < SLAB_MIN_BUFLEN)) {
		item_size = SLAB_MIN_BUFLEN;
	}

	/* Ensure at least some space for coloring */
	size_t data_size = size - sizeof(slab_t);
#ifdef MEM_COLORING
	size_t free_space = data_size % item_size;
	if (knot_unlikely(free_space < SLAB_MINCOLOR)) {
		free_space = SLAB_MINCOLOR;
	}


	/// unsigned short color = __sync_fetch_and_add(&cache->color, 1);
	unsigned short color = (cache->color += sizeof(void*));
	color = color % free_space;
#else
	const unsigned short color = 0;
#endif

	/* Calculate useable data size */
	data_size -= color;
	slab->bufs_count = data_size / item_size;
	slab->bufs_free = slab->bufs_count;

	// Save first item as next free
	slab->base = (char*)slab + sizeof(slab_t) + color;
	slab->head = (void**)slab->base;

	// Create freelist, skip last member, which is set to NULL
	char* item = (char*)slab->head;
	for(unsigned i = 0; i < slab->bufs_count - 1; ++i) {
		*((void**)item) = item + item_size;
		item += item_size;
	}

	// Set last buf to NULL (tail)
	*((void**)item) = (void*)0;

	// Ensure the last item has a NULL next
	dbg_mem("%s: created slab (%p, %p) (%zu B)\n",
	          __func__, slab, slab + size, size);
	return slab;
}