Exemple #1
0
void debug_secmem(uint8_t *sm, struct debug_t *debug)
{
	if (debug->active) {
		debug_print_P(PSTR("-> Security Memory Bytes ----\n"), debug);
#ifdef DEBUG_SECRET_PIN
		dbg_mem(sm, 4, debug);
#else
		dbg_mem(sm, 1, debug);
#endif
	}
}
Exemple #2
0
void debug_memory(uint8_t *mm, struct debug_t *debug)
{
	if (debug->active) {
		debug_print_P(PSTR("-> Main Memory Bytes ----\n"), debug);
		dbg_mem(mm, 256, debug);
	}
}
Exemple #3
0
void debug_atr(uint8_t *atr, struct debug_t *debug)
{
	if (debug->active) {
		debug_print_P(PSTR("-> ATR Bytes ----\n"), debug);
		dbg_mem(atr, 4, debug);
	}
}
Exemple #4
0
void debug_proc_counts(uint8_t *pc, struct debug_t *debug)
{
	if (debug->active) {
		debug_print_P(PSTR("-> Auth Proccessing times ----\n"), debug);
		dbg_mem(pc, 5, debug);
	}
}
Exemple #5
0
void debug_prt_memory(uint8_t *pm, struct debug_t *debug)
{
	if (debug->active) {
		debug_print_P(PSTR("-> Protected Memory Bytes ----\n"), debug);
		dbg_mem(pm, 4, debug);
	}
}
Exemple #6
0
void slab_free(void* ptr)
{
	// Null pointer check
	if (knot_unlikely(!ptr)) {
		return;
	}

	// Get slab start address
	slab_t* slab = slab_from_ptr(ptr);
	assert(slab);

	// Check if it exists in directory
	if (slab->magic == SLAB_MAGIC) {

		// Return buf to slab
		*((void**)ptr) = (void*)slab->head;
		slab->head = (void**)ptr;
		++slab->bufs_free;

#ifdef MEM_DEBUG
		// Increment statistics
		__sync_add_and_fetch(&slab->cache->stat_frees, 1);
#endif

		// Return to partial
		if(knot_unlikely(slab->bufs_free == 1)) {
			slab_list_move(&slab->cache->slabs_free, slab);
		} else {
#ifdef MEM_SLAB_CAP
		// Recycle if empty
			if(knot_unlikely(slab_isempty(slab))) {
				if(slab->cache->empty == MEM_SLAB_CAP) {
					slab_destroy(&slab);
				} else {
					++slab->cache->empty;
				}
			}
#endif
		}

	} else {

		// Pointer is not a slab
		// Presuming it's a large block
		slab_obj_t* bs = (slab_obj_t*)ptr - 1;

#ifdef MEM_POISON
		// Remove memory barrier
		mprotect(ptr + bs->size, sizeof(int), PROT_READ|PROT_WRITE);
#endif

		// Unmap
		dbg_mem("%s: unmapping large block of %zu bytes at %p\n",
		          __func__, bs->size, ptr);
		free(bs);
	}
}
Exemple #7
0
void slab_destroy(slab_t** slab)
{
	/* Disconnect from the list */
	slab_list_remove(*slab);

	/* Free slab */
	slab_depot_free(*slab);

	/* Invalidate pointer. */
	dbg_mem("%s: deleted slab %p\n", __func__, *slab);
	*slab = 0;
}
Exemple #8
0
int slab_cache_init(slab_cache_t* cache, size_t bufsize)
{
	if (knot_unlikely(!bufsize)) {
		return -1;
	}

	memset(cache, 0, sizeof(slab_cache_t));
	cache->bufsize = bufsize;
	dbg_mem("%s: created cache of size %zu\n",
	          __func__, bufsize);

	return 0;
}
Exemple #9
0
void slab_cache_destroy(slab_cache_t* cache) {

	// Free slabs
	unsigned free_s = slab_cache_free_slabs(cache->slabs_free);
	unsigned full_s = slab_cache_free_slabs(cache->slabs_full);
#ifndef MEM_DEBUG
	UNUSED(free_s);
	UNUSED(full_s);
#else
	dbg_mem("%s: %u empty/partial, %u full caches\n",
	          __func__, free_s, full_s);
#endif

	// Invalidate cache
	cache->bufsize = 0;
	cache->slabs_free = cache->slabs_full = 0;
}
Exemple #10
0
slab_t* slab_create(slab_cache_t* cache)
{
	const size_t size = SLAB_SZ;

	slab_t* slab = slab_depot_alloc(cache->bufsize);

	if (knot_unlikely(slab == 0)) {
		dbg_mem("%s: failed to allocate aligned memory block\n",
		          __func__);
		return 0;
	}

	/* Initialize slab. */
	slab->magic = SLAB_MAGIC;
	slab->cache = cache;
	slab_list_insert(&cache->slabs_free, slab);
#ifdef MEM_SLAB_CAP
	++cache->empty;
#endif

	/* Already initialized? */
	if (slab->bufsize == cache->bufsize) {
		return slab;
	} else {
		slab->bufsize = cache->bufsize;
	}

	/* Ensure the item size can hold at least a size of ptr. */
	size_t item_size = slab->bufsize;
	if (knot_unlikely(item_size < SLAB_MIN_BUFLEN)) {
		item_size = SLAB_MIN_BUFLEN;
	}

	/* Ensure at least some space for coloring */
	size_t data_size = size - sizeof(slab_t);
#ifdef MEM_COLORING
	size_t free_space = data_size % item_size;
	if (knot_unlikely(free_space < SLAB_MINCOLOR)) {
		free_space = SLAB_MINCOLOR;
	}


	/// unsigned short color = __sync_fetch_and_add(&cache->color, 1);
	unsigned short color = (cache->color += sizeof(void*));
	color = color % free_space;
#else
	const unsigned short color = 0;
#endif

	/* Calculate useable data size */
	data_size -= color;
	slab->bufs_count = data_size / item_size;
	slab->bufs_free = slab->bufs_count;

	// Save first item as next free
	slab->base = (char*)slab + sizeof(slab_t) + color;
	slab->head = (void**)slab->base;

	// Create freelist, skip last member, which is set to NULL
	char* item = (char*)slab->head;
	for(unsigned i = 0; i < slab->bufs_count - 1; ++i) {
		*((void**)item) = item + item_size;
		item += item_size;
	}

	// Set last buf to NULL (tail)
	*((void**)item) = (void*)0;

	// Ensure the last item has a NULL next
	dbg_mem("%s: created slab (%p, %p) (%zu B)\n",
	          __func__, slab, slab + size, size);
	return slab;
}