Esempio n. 1
0
bool kasan_slab_free(struct kmem_cache *cache, void *object)
{
#ifdef CONFIG_SLAB
	/* RCU slabs could be legally used after free within the RCU period */
	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
		return false;

	if (likely(cache->flags & SLAB_KASAN)) {
		struct kasan_alloc_meta *alloc_info =
			get_alloc_info(cache, object);
		struct kasan_free_meta *free_info =
			get_free_info(cache, object);

		switch (alloc_info->state) {
		case KASAN_STATE_ALLOC:
			alloc_info->state = KASAN_STATE_QUARANTINE;
			quarantine_put(free_info, cache);
			set_track(&free_info->track, GFP_NOWAIT);
			kasan_poison_slab_free(cache, object);
			return true;
		case KASAN_STATE_QUARANTINE:
		case KASAN_STATE_FREE:
			pr_err("Double free");
			dump_stack();
			break;
		default:
			break;
		}
	}
	return false;
#else
	kasan_poison_slab_free(cache, object);
	return false;
#endif
}
Esempio n. 2
0
void kasan_poison_kfree(void *ptr)
{
	struct page *page;

	page = virt_to_head_page(ptr);

	if (unlikely(!PageSlab(page)))
		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
				KASAN_FREE_PAGE);
	else
		kasan_poison_slab_free(page->slab_cache, ptr);
}
Esempio n. 3
0
bool kasan_slab_free(struct kmem_cache *cache, void *object)
{
	s8 shadow_byte;

	/* RCU slabs could be legally used after free within the RCU period */
	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
		return false;

	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
		kasan_report_double_free(cache, object, shadow_byte);
		return true;
	}

	kasan_poison_slab_free(cache, object);

	if (unlikely(!(cache->flags & SLAB_KASAN)))
		return false;

	set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
	quarantine_put(get_free_info(cache, object), cache);
	return true;
}