void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags) { int redzone_adjust; /* Make sure the adjusted size is still less than * KMALLOC_MAX_CACHE_SIZE. * TODO: this check is only useful for SLAB, but not SLUB. We'll need * to skip it for SLUB when it starts using kasan_cache_create(). */ if (*size > KMALLOC_MAX_CACHE_SIZE - sizeof(struct kasan_alloc_meta) - sizeof(struct kasan_free_meta)) return; *flags |= SLAB_KASAN; /* Add alloc meta. */ cache->kasan_info.alloc_meta_offset = *size; *size += sizeof(struct kasan_alloc_meta); /* Add free meta. */ if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor || cache->object_size < sizeof(struct kasan_free_meta)) { cache->kasan_info.free_meta_offset = *size; *size += sizeof(struct kasan_free_meta); } redzone_adjust = optimal_redzone(cache->object_size) - (*size - cache->object_size); if (redzone_adjust > 0) *size += redzone_adjust; *size = min(KMALLOC_MAX_CACHE_SIZE, max(*size, cache->object_size + optimal_redzone(cache->object_size))); }
void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags) { int redzone_adjust; int orig_size = *size; /* Add alloc meta. */ cache->kasan_info.alloc_meta_offset = *size; *size += sizeof(struct kasan_alloc_meta); /* Add free meta. */ if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || cache->object_size < sizeof(struct kasan_free_meta)) { cache->kasan_info.free_meta_offset = *size; *size += sizeof(struct kasan_free_meta); } redzone_adjust = optimal_redzone(cache->object_size) - (*size - cache->object_size); if (redzone_adjust > 0) *size += redzone_adjust; *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size + optimal_redzone(cache->object_size))); /* * If the metadata doesn't fit, don't enable KASAN at all. */ if (*size <= cache->kasan_info.alloc_meta_offset || *size <= cache->kasan_info.free_meta_offset) { cache->kasan_info.alloc_meta_offset = 0; cache->kasan_info.free_meta_offset = 0; *size = orig_size; return; } *flags |= SLAB_KASAN; }