Example #1
0
static void object_err(struct kmem_cache *cache, struct page *page,
			void *object, char *unused_reason)
{
	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
	struct kasan_free_meta *free_info;

	dump_stack();
	pr_err("Object at %p, in cache %s\n", object, cache->name);
	if (!(cache->flags & SLAB_KASAN))
		return;
	switch (alloc_info->state) {
	case KASAN_STATE_INIT:
		pr_err("Object not allocated yet\n");
		break;
	case KASAN_STATE_ALLOC:
		pr_err("Object allocated with size %u bytes.\n",
		       alloc_info->alloc_size);
		pr_err("Allocation:\n");
		print_track(&alloc_info->track);
		break;
	case KASAN_STATE_FREE:
		pr_err("Object freed, allocated with size %u bytes\n",
		       alloc_info->alloc_size);
		free_info = get_free_info(cache, object);
		pr_err("Allocation:\n");
		print_track(&alloc_info->track);
		pr_err("Deallocation:\n");
		print_track(&free_info->track);
		break;
	}
}
Example #2
0
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
		   gfp_t flags)
{
	unsigned long redzone_start;
	unsigned long redzone_end;

#ifdef CONFIG_SLAB
	if (flags & __GFP_RECLAIM)
		quarantine_reduce();
#endif

	if (unlikely(object == NULL))
		return;

	redzone_start = round_up((unsigned long)(object + size),
				KASAN_SHADOW_SCALE_SIZE);
	redzone_end = round_up((unsigned long)object + cache->object_size,
				KASAN_SHADOW_SCALE_SIZE);

	kasan_unpoison_shadow(object, size);
	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
		KASAN_KMALLOC_REDZONE);
#ifdef CONFIG_SLAB
	if (cache->flags & SLAB_KASAN) {
		struct kasan_alloc_meta *alloc_info =
			get_alloc_info(cache, object);

		alloc_info->state = KASAN_STATE_ALLOC;
		alloc_info->alloc_size = size;
		set_track(&alloc_info->track, flags);
	}
#endif
}
Example #3
0
bool kasan_slab_free(struct kmem_cache *cache, void *object)
{
#ifdef CONFIG_SLAB
	/* RCU slabs could be legally used after free within the RCU period */
	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
		return false;

	if (likely(cache->flags & SLAB_KASAN)) {
		struct kasan_alloc_meta *alloc_info =
			get_alloc_info(cache, object);
		struct kasan_free_meta *free_info =
			get_free_info(cache, object);

		switch (alloc_info->state) {
		case KASAN_STATE_ALLOC:
			alloc_info->state = KASAN_STATE_QUARANTINE;
			quarantine_put(free_info, cache);
			set_track(&free_info->track, GFP_NOWAIT);
			kasan_poison_slab_free(cache, object);
			return true;
		case KASAN_STATE_QUARANTINE:
		case KASAN_STATE_FREE:
			pr_err("Double free");
			dump_stack();
			break;
		default:
			break;
		}
	}
	return false;
#else
	kasan_poison_slab_free(cache, object);
	return false;
#endif
}
Example #4
0
static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
			      unsigned long ip, bool quarantine)
{
	s8 shadow_byte;
	unsigned long rounded_up_size;

	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
	    object)) {
		kasan_report_invalid_free(object, ip);
		return true;
	}

	/* RCU slabs could be legally used after free within the RCU period */
	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
		return false;

	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
		kasan_report_invalid_free(object, ip);
		return true;
	}

	rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);

	if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN)))
		return false;

	set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
	quarantine_put(get_free_info(cache, object), cache);
	return true;
}
Example #5
0
void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
{
	struct kasan_alloc_meta *alloc_info;

	if (!(cache->flags & SLAB_KASAN))
		return;

	alloc_info = get_alloc_info(cache, object);
	__memset(alloc_info, 0, sizeof(*alloc_info));
}
Example #6
0
static void qlink_free(void **qlink, struct kmem_cache *cache)
{
	void *object = qlink_to_object(qlink, cache);
	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
	unsigned long flags;

	local_irq_save(flags);
	alloc_info->state = KASAN_STATE_FREE;
	___cache_free(cache, object, _THIS_IP_);
	local_irq_restore(flags);
}
Example #7
0
void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
	kasan_poison_shadow(object,
			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
			KASAN_KMALLOC_REDZONE);
#ifdef CONFIG_SLAB
	if (cache->flags & SLAB_KASAN) {
		struct kasan_alloc_meta *alloc_info =
			get_alloc_info(cache, object);
		alloc_info->state = KASAN_STATE_INIT;
	}
#endif
}
Example #8
0
File: report.c Project: Lyude/linux
static void describe_object(struct kmem_cache *cache, void *object,
				const void *addr)
{
	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);

	if (cache->flags & SLAB_KASAN) {
		print_track(&alloc_info->alloc_track, "Allocated");
		pr_err("\n");
		print_track(&alloc_info->free_track, "Freed");
		pr_err("\n");
	}

	describe_object_addr(cache, object, addr);
}
Example #9
0
static void kasan_object_err(struct kmem_cache *cache, void *object)
{
	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);

	dump_stack();
	pr_err("Object at %p, in cache %s size: %d\n", object, cache->name,
		cache->object_size);

	if (!(cache->flags & SLAB_KASAN))
		return;

	pr_err("Allocated:\n");
	print_track(&alloc_info->alloc_track);
	pr_err("Freed:\n");
	print_track(&alloc_info->free_track);
}
Example #10
0
bool kasan_slab_free(struct kmem_cache *cache, void *object)
{
	s8 shadow_byte;

	/* RCU slabs could be legally used after free within the RCU period */
	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
		return false;

	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
		kasan_report_double_free(cache, object, shadow_byte);
		return true;
	}

	kasan_poison_slab_free(cache, object);

	if (unlikely(!(cache->flags & SLAB_KASAN)))
		return false;

	set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
	quarantine_put(get_free_info(cache, object), cache);
	return true;
}
Example #11
0
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
		   gfp_t flags)
{
	unsigned long redzone_start;
	unsigned long redzone_end;

	if (gfpflags_allow_blocking(flags))
		quarantine_reduce();

	if (unlikely(object == NULL))
		return;

	redzone_start = round_up((unsigned long)(object + size),
				KASAN_SHADOW_SCALE_SIZE);
	redzone_end = round_up((unsigned long)object + cache->object_size,
				KASAN_SHADOW_SCALE_SIZE);

	kasan_unpoison_shadow(object, size);
	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
		KASAN_KMALLOC_REDZONE);

	if (cache->flags & SLAB_KASAN)
		set_track(&get_alloc_info(cache, object)->alloc_track, flags);
}