static ENGINE_ERROR_CODE item_scrub(struct default_engine *engine, hash_item *item, void *cookie) { rel_time_t current_time = engine->server.core->get_current_time(); (void)cookie; engine->scrubber.visited++; /* scrubber is used for generic bucket deletion and scrub_cmd all expired or orphaned items are unlinked */ if (engine->scrubber.force_delete && item->refcount > 0) { // warn that someone isn't releasing items before deleting their bucket. EXTENSION_LOGGER_DESCRIPTOR* logger; logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER); logger->log(EXTENSION_LOG_WARNING, NULL, "Bucket (%d) deletion is removing an item with refcount %d", engine->bucket_id, item->refcount); } if (engine->scrubber.force_delete || (item->refcount == 0 && (item->exptime != 0 && item->exptime < current_time))) { do_item_unlink(engine, item); engine->scrubber.cleaned++; } return ENGINE_SUCCESS; }
/* grows the hashtable to the next power of 2. */ static void assoc_expand(struct default_engine *engine) { engine->assoc.old_hashtable = engine->assoc.primary_hashtable; engine->assoc.primary_hashtable = calloc(hashsize(engine->assoc.hashpower + 1), sizeof(hash_item *)); if (engine->assoc.primary_hashtable) { int ret = 0; cb_thread_t tid; engine->assoc.hashpower++; engine->assoc.expanding = true; engine->assoc.expand_bucket = 0; /* start a thread to do the expansion */ if ((ret = cb_create_thread(&tid, assoc_maintenance_thread, engine, 1)) != 0) { EXTENSION_LOGGER_DESCRIPTOR *logger; logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER); logger->log(EXTENSION_LOG_WARNING, NULL, "Can't create thread: %s\n", strerror(ret)); engine->assoc.hashpower--; engine->assoc.expanding = false; free(engine->assoc.primary_hashtable); engine->assoc.primary_hashtable =engine->assoc.old_hashtable; } } else { engine->assoc.primary_hashtable = engine->assoc.old_hashtable; /* Bad news, but we can keep running. */ } }
static void *assoc_maintenance_thread(void *arg) { struct default_engine *engine = arg; bool done = false; struct timespec sleep_time = {0, 1000}; int i,try_cnt = 9; long tot_execs = 0; EXTENSION_LOGGER_DESCRIPTOR *logger = engine->server.log->get_logger(); if (engine->config.verbose) { logger->log(EXTENSION_LOG_INFO, NULL, "Hash table expansion start: %d => %d\n", hashsize(engine->assoc.hashpower - 1), hashsize(engine->assoc.hashpower)); } do { int ii; /* long-running background task. * hold the cache lock lazily in order to give priority to normal workers. */ for (i = 0; i < try_cnt; i++) { if (pthread_mutex_trylock(&engine->cache_lock) == 0) break; nanosleep(&sleep_time, NULL); } if (i == try_cnt) pthread_mutex_lock(&engine->cache_lock); for (ii = 0; ii < hash_bulk_move && engine->assoc.expanding; ++ii) { hash_item *it, *next; int bucket; for (it = engine->assoc.old_hashtable[engine->assoc.expand_bucket]; NULL != it; it = next) { next = it->h_next; bucket = engine->server.core->hash(item_get_key(it), it->nkey, 0) & hashmask(engine->assoc.hashpower); it->h_next = engine->assoc.primary_hashtable[bucket]; engine->assoc.primary_hashtable[bucket] = it; } engine->assoc.old_hashtable[engine->assoc.expand_bucket] = NULL; engine->assoc.expand_bucket++; if (engine->assoc.expand_bucket == hashsize(engine->assoc.hashpower - 1)) { engine->assoc.expanding = false; free(engine->assoc.old_hashtable); } } if (!engine->assoc.expanding) { done = true; } pthread_mutex_unlock(&engine->cache_lock); if ((++tot_execs % 100) == 0) { nanosleep(&sleep_time, NULL); } } while (!done); if (engine->config.verbose) { logger->log(EXTENSION_LOG_INFO, NULL, "Hash table expansion done\n"); } return NULL; }
void slabs_adjust_mem_requested(struct default_engine *engine, unsigned int id, size_t old, size_t ntotal) { slabclass_t *p; cb_mutex_enter(&engine->slabs.lock); if (id < POWER_SMALLEST || id > engine->slabs.power_largest) { EXTENSION_LOGGER_DESCRIPTOR *logger; logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER); logger->log(EXTENSION_LOG_WARNING, NULL, "Internal error! Invalid slab class\n"); abort(); } p = &engine->slabs.slabclass[id]; p->requested = p->requested - old + ntotal; cb_mutex_exit(&engine->slabs.lock); }
/** wrapper around assoc_find which does the lazy expiration logic */ hash_item *do_item_get(struct default_engine *engine, const hash_key *key) { rel_time_t current_time = engine->server.core->get_current_time(); hash_item *it = assoc_find(engine, crc32c(hash_key_get_key(key), hash_key_get_key_len(key), 0), key); int was_found = 0; if (engine->config.verbose > 2) { EXTENSION_LOGGER_DESCRIPTOR *logger; logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER); if (it == NULL) { logger->log(EXTENSION_LOG_DEBUG, NULL, "> NOT FOUND in bucket %d, %s", hash_key_get_bucket_index(key), hash_key_get_client_key(key)); } else { logger->log(EXTENSION_LOG_DEBUG, NULL, "> FOUND KEY in bucket %d, %s", hash_key_get_bucket_index(item_get_key(it)), hash_key_get_client_key(item_get_key(it))); was_found++; } } if (it != NULL && engine->config.oldest_live != 0 && engine->config.oldest_live <= current_time && it->time <= engine->config.oldest_live) { do_item_unlink(engine, it); /* MTSAFE - items.lock held */ it = NULL; } if (it == NULL && was_found) { EXTENSION_LOGGER_DESCRIPTOR *logger; logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER); logger->log(EXTENSION_LOG_DEBUG, NULL, " -nuked by flush"); was_found--; } if (it != NULL && it->exptime != 0 && it->exptime <= current_time) { do_item_unlink(engine, it); /* MTSAFE - items.lock held */ it = NULL; } if (it == NULL && was_found) { EXTENSION_LOGGER_DESCRIPTOR *logger; logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER); logger->log(EXTENSION_LOG_DEBUG, NULL, " -nuked by expire"); was_found--; } if (it != NULL) { it->refcount++; DEBUG_REFCNT(it, '+'); do_item_update(engine, it); } return it; }
static void assoc_maintenance_thread(void *arg) { struct default_engine *engine = arg; bool done = false; do { int ii; cb_mutex_enter(&engine->cache_lock); for (ii = 0; ii < hash_bulk_move && engine->assoc.expanding; ++ii) { hash_item *it, *next; int bucket; for (it = engine->assoc.old_hashtable[engine->assoc.expand_bucket]; NULL != it; it = next) { next = it->h_next; bucket = engine->server.core->hash(item_get_key(it), it->nkey, 0) & hashmask(engine->assoc.hashpower); it->h_next = engine->assoc.primary_hashtable[bucket]; engine->assoc.primary_hashtable[bucket] = it; } engine->assoc.old_hashtable[engine->assoc.expand_bucket] = NULL; engine->assoc.expand_bucket++; if (engine->assoc.expand_bucket == hashsize(engine->assoc.hashpower - 1)) { engine->assoc.expanding = false; free(engine->assoc.old_hashtable); if (engine->config.verbose > 1) { EXTENSION_LOGGER_DESCRIPTOR *logger; logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER); logger->log(EXTENSION_LOG_INFO, NULL, "Hash table expansion done\n"); } } } if (!engine->assoc.expanding) { done = true; } cb_mutex_exit(&engine->cache_lock); } while (!done); }
static SERVER_HANDLE_V1* invalid_server_api(void) { static SERVER_HANDLE_V1 rv = { .interface = 9999, }; return &rv; } bool should_fail_register = true; EXTENSION_LOGGER_DESCRIPTOR *descr; static bool register_extension(extension_type_t type, void *extension) { if (should_fail_register) { return false; } assert(extension != NULL); assert(type == EXTENSION_LOGGER); assert(extension != NULL); descr = extension; assert(descr->get_name != NULL); assert(descr->log != NULL); assert(strcmp("syslog", descr->get_name()) == 0); return true; } static SERVER_HANDLE_V1* get_server_api(void) { static SERVER_EXTENSION_API extension_api = { .register_extension = register_extension }; static SERVER_HANDLE_V1 rv = { .interface = 1, .extension = &extension_api, }; return &rv; } int main(int argc, char **argv) { (void)argc; (void)argv; /* lets test that it can handle a NULL server api */ assert(memcached_extensions_initialize(NULL, null_server_api) == EXTENSION_FATAL); /* And invalid (unknown) server api */ assert(memcached_extensions_initialize(NULL, invalid_server_api) == EXTENSION_FATAL); should_fail_register = true; assert(memcached_extensions_initialize(NULL, get_server_api) == EXTENSION_FATAL); should_fail_register = false; assert(memcached_extensions_initialize(NULL, get_server_api) == EXTENSION_SUCCESS); expected_msg = "test a string"; expected_priority = LOG_DEBUG; descr->log(EXTENSION_LOG_DETAIL, NULL, "%s", expected_msg); /* @todo feel free to add a bunch more tests... */ return 0; }
/** * Determines the chunk sizes and initializes the slab class descriptors * accordingly. */ ENGINE_ERROR_CODE slabs_init(struct default_engine *engine, const size_t limit, const double factor, const bool prealloc) { int i = POWER_SMALLEST - 1; unsigned int size = sizeof(hash_item) + (unsigned int)engine->config.chunk_size; engine->slabs.mem_limit = limit; if (prealloc) { /* Allocate everything in a big chunk with malloc */ engine->slabs.mem_base = my_allocate(engine, engine->slabs.mem_limit); if (engine->slabs.mem_base != NULL) { engine->slabs.mem_current = engine->slabs.mem_base; engine->slabs.mem_avail = engine->slabs.mem_limit; } else { return ENGINE_ENOMEM; } } memset(engine->slabs.slabclass, 0, sizeof(engine->slabs.slabclass)); while (++i < POWER_LARGEST && size <= engine->config.item_size_max / factor) { /* Make sure items are always n-byte aligned */ if (size % CHUNK_ALIGN_BYTES) size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES); engine->slabs.slabclass[i].size = size; engine->slabs.slabclass[i].perslab = (unsigned int)engine->config.item_size_max / engine->slabs.slabclass[i].size; size = (unsigned int)(size * factor); if (engine->config.verbose > 1) { EXTENSION_LOGGER_DESCRIPTOR *logger; logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER); logger->log(EXTENSION_LOG_INFO, NULL, "slab class %3d: chunk size %9u perslab %7u\n", i, engine->slabs.slabclass[i].size, engine->slabs.slabclass[i].perslab); } } engine->slabs.power_largest = i; engine->slabs.slabclass[engine->slabs.power_largest].size = (unsigned int)engine->config.item_size_max; engine->slabs.slabclass[engine->slabs.power_largest].perslab = 1; if (engine->config.verbose > 1) { EXTENSION_LOGGER_DESCRIPTOR *logger; logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER); logger->log(EXTENSION_LOG_INFO, NULL, "slab class %3d: chunk size %9u perslab %7u\n", i, engine->slabs.slabclass[i].size, engine->slabs.slabclass[i].perslab); } /* for the test suite: faking of how much we've already malloc'd */ { char *t_initial_malloc = getenv("T_MEMD_INITIAL_MALLOC"); if (t_initial_malloc) { engine->slabs.mem_malloced = (size_t)atol(t_initial_malloc); } } #ifndef DONT_PREALLOC_SLABS { char *pre_alloc = getenv("T_MEMD_SLABS_ALLOC"); if (pre_alloc == NULL || atoi(pre_alloc) != 0) { slabs_preallocate(power_largest); } } #endif return ENGINE_SUCCESS; }
/* * Stores an item in the cache according to the semantics of one of the set * commands. In threaded mode, this is protected by the cache lock. * * Returns the state of storage. */ static ENGINE_ERROR_CODE do_store_item(struct default_engine *engine, hash_item *it, uint64_t *cas, ENGINE_STORE_OPERATION operation, const void *cookie) { const char *key = item_get_key(it); hash_item *old_it = do_item_get(engine, key, it->nkey); ENGINE_ERROR_CODE stored = ENGINE_NOT_STORED; hash_item *new_it = NULL; if (old_it != NULL && operation == OPERATION_ADD) { /* add only adds a nonexistent item, but promote to head of LRU */ do_item_update(engine, old_it); } else if (!old_it && (operation == OPERATION_REPLACE || operation == OPERATION_APPEND || operation == OPERATION_PREPEND)) { /* replace only replaces an existing value; don't store */ } else if (operation == OPERATION_CAS) { /* validate cas operation */ if(old_it == NULL) { // LRU expired stored = ENGINE_KEY_ENOENT; } else if (item_get_cas(it) == item_get_cas(old_it)) { // cas validates // it and old_it may belong to different classes. // I'm updating the stats for the one that's getting pushed out do_item_replace(engine, old_it, it); stored = ENGINE_SUCCESS; } else { if (engine->config.verbose > 1) { EXTENSION_LOGGER_DESCRIPTOR *logger; logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER); logger->log(EXTENSION_LOG_INFO, NULL, "CAS: failure: expected %"PRIu64", got %"PRIu64"\n", item_get_cas(old_it), item_get_cas(it)); } stored = ENGINE_KEY_EEXISTS; } } else { /* * Append - combine new and old record into single one. Here it's * atomic and thread-safe. */ if (operation == OPERATION_APPEND || operation == OPERATION_PREPEND) { /* * Validate CAS */ if (item_get_cas(it) != 0) { // CAS much be equal if (item_get_cas(it) != item_get_cas(old_it)) { stored = ENGINE_KEY_EEXISTS; } } if (stored == ENGINE_NOT_STORED) { /* we have it and old_it here - alloc memory to hold both */ new_it = do_item_alloc(engine, key, it->nkey, old_it->flags, old_it->exptime, it->nbytes + old_it->nbytes, cookie); if (new_it == NULL) { /* SERVER_ERROR out of memory */ if (old_it != NULL) { do_item_release(engine, old_it); } return ENGINE_NOT_STORED; } /* copy data from it and old_it to new_it */ if (operation == OPERATION_APPEND) { memcpy(item_get_data(new_it), item_get_data(old_it), old_it->nbytes); memcpy(item_get_data(new_it) + old_it->nbytes, item_get_data(it), it->nbytes); } else { /* OPERATION_PREPEND */ memcpy(item_get_data(new_it), item_get_data(it), it->nbytes); memcpy(item_get_data(new_it) + it->nbytes, item_get_data(old_it), old_it->nbytes); } it = new_it; } } if (stored == ENGINE_NOT_STORED) { if (old_it != NULL) { do_item_replace(engine, old_it, it); } else { do_item_link(engine, it); } *cas = item_get_cas(it); stored = ENGINE_SUCCESS; } } if (old_it != NULL) { do_item_release(engine, old_it); /* release our reference */ } if (new_it != NULL) { do_item_release(engine, new_it); } if (stored == ENGINE_SUCCESS) { *cas = item_get_cas(it); } return stored; }