PUBLIC void * Slab_cache::alloc() // request initialized member from cache { void *unused_block = 0; void *ret; { auto guard = lock_guard(lock); Slab *s = get_available_locked(); if (EXPECT_FALSE(!s)) { guard.reset(); char *m = (char*)block_alloc(_slab_size, _slab_size); Slab *new_slab = 0; if (m) new_slab = new (m + _slab_size - sizeof(Slab)) Slab(_elem_num, _entry_size, m); guard.lock(&lock); // retry gettin a slab that might be allocated by a different // CPU meanwhile s = get_available_locked(); if (!s) { // real OOM if (!m) return 0; _partial.add(new_slab); s = new_slab; } else unused_block = m; } ret = s->alloc(); assert(ret); if (s->is_full()) { cxx::H_list<Slab>::remove(s); _full.add(s); } } if (unused_block) block_free(unused_block, _slab_size); return ret; }
PUBLIC void Slab_cache::free(void *cache_entry) // return initialized member to cache { Slab *to_free = 0; { auto guard = lock_guard(lock); Slab *s = reinterpret_cast<Slab*> ((reinterpret_cast<unsigned long>(cache_entry) & ~(_slab_size - 1)) + _slab_size - sizeof(Slab)); bool was_full = s->is_full(); s->free(cache_entry); if (was_full) { cxx::H_list<Slab>::remove(s); _partial.add(s); } else if (s->is_empty()) { cxx::H_list<Slab>::remove(s); if (_num_empty < 2) { _empty.add(s); ++_num_empty; } else to_free = s; } else { // We weren't either full or empty; we already had free // elements. This changes nothing in the queue, and there // already must have been a _first_available_slab. } } if (to_free) { to_free->~Slab(); block_free(reinterpret_cast<char *>(to_free + 1) - _slab_size, _slab_size); } }