Esempio n. 1
0
size_t ksize(const void *p)
{
	struct kmem_cache *cache = (struct kmem_cache *)*((void**)p - 1);
	if (cache)
		return kmem_cache_size(cache);
	return -1;
}
Esempio n. 2
0
/**
 * Allocate memory
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate.
 *
 * kmalloc is the normal method of allocating memory
 * in the kernel.
 */
void *__kmalloc(size_t size, gfp_t flags)
{
	/* add space for back-pointer */
	size += sizeof(void *);

	/* find appropriate cache */
	struct kmem_cache *cache = find_cache(size);

	void **p;
	if (cache)
		/* allocate from cache */
		p = kmem_cache_alloc(cache, flags);
	else
		/* no cache for this size - use ddekit malloc */
		p = ddekit_large_malloc(size);

	ddekit_log(DEBUG_MALLOC, "size=%d, cache=%p (%d) => %p",
	           size, cache, cache ? kmem_cache_size(cache) : 0, p);

	/* return pointer to actual chunk */
	if (p) {
		*p = cache;
		p++;
	}

	/* Need to zero out mem? */
	if (p && (flags & __GFP_ZERO)) {
		size -= sizeof(void*);
		memset(p, 0, size);
	}

	return p;
}
Esempio n. 3
0
static void *
repl_kmem_cache_alloc_notrace(struct kmem_cache *mc, gfp_t flags) 
{
    void *ret_val;
    ret_val = kmem_cache_alloc_notrace(mc, flags);

    if (ret_val != NULL)
        klc_add_alloc(ret_val, (size_t)kmem_cache_size(mc), stack_depth);
    
    return ret_val;
}
int __init kmem_cache_create_init(void) 
{ 	 
	my_cachep = kmem_cache_create("my_cache", 32, 0, SLAB_HWCACHE_ALIGN, NULL); 	 
	if(my_cachep == NULL ) 
		printk("kmem_cache_create failed!\n"); 
	else 
	{ 
		printk("Cache size is: %d\n",kmem_cache_size(my_cachep)); 
		printk("Cache name is: %s\n", kmem_cache_name( my_cachep )); 
	} 	 
	return 0; 
}
Esempio n. 5
0
void *_KMemCacheAllocWrapper(struct kmem_cache *psCache,
				 gfp_t Flags,
				 char *pszFileName, u32 ui32Line)
{
	void *pvRet;

	pvRet = kmem_cache_alloc(psCache, Flags);

#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
	DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvRet, pvRet,
			       0, psCache, kmem_cache_size(psCache),
			       pszFileName, ui32Line);
#endif

	return pvRet;
}
Esempio n. 6
0
/**
 * Free previously allocated memory
 * @objp: pointer returned by kmalloc.
 *
 * If @objp is NULL, no operation is performed.
 *
 * Don't free memory not originally allocated by kmalloc()
 * or you will run into trouble.
 */
void kfree(const void *objp)
{
	if (!objp) return;

	/* find cache back-pointer */
	void **p = (void **)objp - 1;

	ddekit_log(DEBUG_MALLOC, "objp=%p cache=%p (%d)",
	           p, *p, *p ? kmem_cache_size(*p) : 0);

	if (*p)
		/* free from cache */
		kmem_cache_free(*p, p);
	else
		/* no cache for this size - use ddekit free */
		ddekit_large_free(p);
}
Esempio n. 7
0
File: test.c Progetto: sidwubf/bq
int slab_test( void )
{
  void *object;

  printk( "Cache name is %s\n", kmem_cache_name( my_cachep ) );
  printk( "Cache object size is %d\n", kmem_cache_size( my_cachep ) );

  object = kmem_cache_alloc( my_cachep, GFP_KERNEL );

  if (object) {

    kmem_cache_free( my_cachep, object );

  }

  return 0;
}
Esempio n. 8
0
/**
 * lc_create - prepares to track objects in an active set
 * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
 * @e_count: number of elements allowed to be active simultaneously
 * @e_size: size of the tracked objects
 * @e_off: offset to the &struct lc_element member in a tracked object
 *
 * Returns a pointer to a newly initialized struct lru_cache on success,
 * or NULL on (allocation) failure.
 */
struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
		unsigned e_count, size_t e_size, size_t e_off)
{
	struct hlist_head *slot = NULL;
	struct lc_element **element = NULL;
	struct lru_cache *lc;
	struct lc_element *e;
	unsigned cache_obj_size = kmem_cache_size(cache);
	unsigned i;

	WARN_ON(cache_obj_size < e_size);
	if (cache_obj_size < e_size)
		return NULL;

	/* e_count too big; would probably fail the allocation below anyways.
	 * for typical use cases, e_count should be few thousand at most. */
	if (e_count > LC_MAX_ACTIVE)
		return NULL;

	slot = kzalloc(e_count * sizeof(struct hlist_head*), GFP_KERNEL);
	if (!slot)
		goto out_fail;
	element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL);
	if (!element)
		goto out_fail;

	lc = kzalloc(sizeof(*lc), GFP_KERNEL);
	if (!lc)
		goto out_fail;

	INIT_LIST_HEAD(&lc->in_use);
	INIT_LIST_HEAD(&lc->lru);
	INIT_LIST_HEAD(&lc->free);

	lc->name = name;
	lc->element_size = e_size;
	lc->element_off = e_off;
	lc->nr_elements = e_count;
	lc->new_number = LC_FREE;
	lc->lc_cache = cache;
	lc->lc_element = element;
	lc->lc_slot = slot;

	/* preallocate all objects */
	for (i = 0; i < e_count; i++) {
		void *p = kmem_cache_alloc(cache, GFP_KERNEL);
		if (!p)
			break;
		memset(p, 0, lc->element_size);
		e = p + e_off;
		e->lc_index = i;
		e->lc_number = LC_FREE;
		list_add(&e->list, &lc->free);
		element[i] = e;
	}
	if (i == e_count)
		return lc;

	/* else: could not allocate all elements, give up */
	for (i--; i; i--) {
		void *p = element[i];
		kmem_cache_free(cache, p - e_off);
	}
	kfree(lc);
out_fail:
	kfree(element);
	kfree(slot);
	return NULL;
}