static void test_cache_overflow(session &sess) { ioremap::cache::cache_manager *cache = (ioremap::cache::cache_manager*) global_data->nodes[0].get_native()->cache; const size_t cache_size = cache->cache_size(); const size_t cache_pages_number = cache->cache_pages_number(); argument_data data("0"); cache->clear(); size_t record_size = 0; { ELLIPTICS_REQUIRE(write_result, sess.write_cache(key(std::string("0")), data, 3000)); const auto& stats = cache->get_total_cache_stats(); record_size = stats.size_of_objects; } size_t records_number = (cache_size / cache_pages_number / record_size) * 10; for (size_t id = 1; id < records_number; ++id) { ELLIPTICS_REQUIRE(write_result, sess.write_cache(key(boost::lexical_cast<std::string>(id)), data, 3000)); const auto& stats = cache->get_total_cache_stats(); size_t total_pages_sizes = 0; for (size_t i = 0; i < stats.pages_sizes.size(); ++i) { total_pages_sizes += stats.pages_sizes[i]; // BOOST_REQUIRE_LE(stats.pages_sizes[i], stats.pages_max_sizes[i]); } // BOOST_REQUIRE_LE(stats.size_of_objects, cache_size); // BOOST_REQUIRE_EQUAL(stats.size_of_objects, total_pages_sizes); } }
static void test_cache_overflow(session &sess, const nodes_data *setup) { dnet_node *node = setup->nodes[0].get_native(); dnet_backend_io *backend_io = dnet_get_backend_io(node->io, 0); ioremap::cache::cache_manager *cache = reinterpret_cast<ioremap::cache::cache_manager *>(backend_io->cache); const size_t cache_size = cache->cache_size(); const size_t cache_pages_number = cache->cache_pages_number(); argument_data data("0"); cache->clear(); size_t record_size = 0; { ELLIPTICS_REQUIRE(write_result, sess.write_cache(key(std::string("0")), data, 3000)); auto stats = cache->get_total_cache_stats(); record_size = stats.size_of_objects; } size_t records_number = (cache_size / cache_pages_number / record_size) * 10; for (size_t id = 1; id < records_number; ++id) { ELLIPTICS_REQUIRE(write_result, sess.write_cache(key(boost::lexical_cast<std::string>(id)), data, 3000)); auto stats = cache->get_total_cache_stats(); size_t total_pages_sizes = 0; for (size_t i = 0; i < stats.pages_sizes.size(); ++i) { total_pages_sizes += stats.pages_sizes[i]; } } }
void cache_write_check_lru(session &sess, int id, const argument_data &data, long timeout, lru_list_emulator_t &lru_list_emulator, ioremap::cache::cache_manager *cache) { key idKey = key(boost::lexical_cast<std::string>(id)); int objects_number_before = cache->get_total_cache_stats().number_of_objects; ELLIPTICS_REQUIRE(write_result, sess.write_cache(idKey, data, timeout)); lru_list_emulator.add(id); int objects_number_after = cache->get_total_cache_stats().number_of_objects; int objects_removed = objects_number_before - objects_number_after + 1; for (int i = 0; i < objects_removed; ++i) { lru_list_emulator.remove_last(); } }
/* * After writing of a key to cache, keys data will be synced to disk cache_sync_timeout seconds later. * Before syncing a key, dnet_oplock() taken for this key. After syncing a key, key's oplock released. * * Following test checks this mechanics by calling write_data(key, data) multiple times with the same data, * then writing to cache by calling write_cache(key, cache_data) cache data, waiting cache_sync_timeout seconds * until cache is synced back to disk (backend), thereby taking oplock. Then called write_data(key, result_data). * If last write_data() operation timeouted, then dnet_opunlock() (after cache sync) is not properly realeased key's oplock. */ static void test_oplock(session &sess) { const key id(std::string("oplock_key")); const std::string data = "some_data"; const std::string cache_data = "cache_data"; const std::string result_data = "result_data"; const size_t num_writes = 10; std::unique_ptr<async_write_result[]> results(new async_write_result[num_writes]); for (size_t i = 0; i < num_writes; ++i) { results[i] = std::move(sess.write_data(id, data, 0)); } for (size_t i = 0; i < num_writes; ++i) { results[i].wait(); } ELLIPTICS_COMPARE_REQUIRE(read_data_result, sess.read_data(id, 0, 0), data); ELLIPTICS_REQUIRE(async_cache_write, sess.write_cache(id, cache_data, 0)); sleep(cache_sync_timeout + 1); ELLIPTICS_COMPARE_REQUIRE(read_cache_result, sess.read_data(id, 0, 0), cache_data); ELLIPTICS_REQUIRE(async_write, sess.write_data(id, result_data, 0)); ELLIPTICS_COMPARE_REQUIRE(read_result, sess.read_data(id, 0, 0), result_data); }