void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) { void *b; flags &= gfp_allowed_mask; lockdep_trace_alloc(flags); if (c->size < PAGE_SIZE) { b = slob_alloc(c->size, flags, c->align, node); trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, SLOB_UNITS(c->size) * SLOB_UNIT, flags, node); } else { b = slob_new_pages(flags, get_order(c->size), node); trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, PAGE_SIZE << get_order(c->size), flags, node); } if (b && c->ctor) c->ctor(b); kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); return b; }
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) { flags &= gfp_allowed_mask; kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); }