static void test_direct_backend(session &sess)
{
	const key id = std::string("direct-backend-test");
	sess.set_groups({ 0 });
	const std::string first_str = "first-data";
	const std::string second_str = "second-data";

	server_node &node = global_data->nodes.front();

	session first = sess.clone();
	first.set_direct_id(node.remote(), 0);

	session second = sess.clone();
	second.set_direct_id(node.remote(), 3);

	ELLIPTICS_REQUIRE(async_first_write, first.write_data(id, first_str, 0));
	ELLIPTICS_REQUIRE(async_second_write, second.write_data(id, second_str, 0));

	ELLIPTICS_REQUIRE(async_first_read, first.read_data(id, 0, 0));
	read_result_entry first_read = async_first_read.get_one();
	BOOST_REQUIRE_EQUAL(first_read.file().to_string(), first_str);
	BOOST_REQUIRE_EQUAL(first_read.command()->backend_id, 0);

	ELLIPTICS_REQUIRE(async_second_read, second.read_data(id, 0, 0));
	read_result_entry second_read = async_second_read.get_one();
	BOOST_REQUIRE_EQUAL(second_read.file().to_string(), second_str);
	BOOST_REQUIRE_EQUAL(second_read.command()->backend_id, 3);
}
static void test_set_backend_ids_for_disabled(session &sess)
{
	server_node &node = global_data->nodes.back();

	auto ids = generate_ids(16);

	ELLIPTICS_REQUIRE(async_set_result, sess.set_backend_ids(node.remote(), 4, ids));

	backend_status_result_entry result = async_set_result.get_one();
	BOOST_REQUIRE(result.is_valid());
	BOOST_REQUIRE_EQUAL(result.count(), 1);

	dnet_backend_status *status = result.backend(0);
	BOOST_REQUIRE_EQUAL(status->backend_id, 4);
	BOOST_REQUIRE_EQUAL(status->state, DNET_BACKEND_DISABLED);

	ELLIPTICS_REQUIRE(async_enable_result, sess.enable_backend(node.remote(), 4));

	// Wait 0.1 secs to ensure that route list was changed
	usleep(100 * 1000);

	auto route_ids = backend_ids(sess, node.remote(), 4);
	BOOST_REQUIRE_EQUAL(ids.size(), route_ids.size());
	BOOST_REQUIRE(compare_ids(ids, route_ids));
}
Exemple #3
0
static void test_cache_overflow(session &sess)
{
	ioremap::cache::cache_manager *cache = (ioremap::cache::cache_manager*) global_data->nodes[0].get_native()->cache;
	const size_t cache_size = cache->cache_size();
	const size_t cache_pages_number = cache->cache_pages_number();
	argument_data data("0");

	cache->clear();
	size_t record_size = 0;
	{
		ELLIPTICS_REQUIRE(write_result, sess.write_cache(key(std::string("0")), data, 3000));
		const auto& stats = cache->get_total_cache_stats();
		record_size = stats.size_of_objects;
	}

	size_t records_number = (cache_size / cache_pages_number / record_size) * 10;
	for (size_t id = 1; id < records_number; ++id) {
		ELLIPTICS_REQUIRE(write_result, sess.write_cache(key(boost::lexical_cast<std::string>(id)), data, 3000));
		const auto& stats = cache->get_total_cache_stats();

		size_t total_pages_sizes = 0;
		for (size_t i = 0; i < stats.pages_sizes.size(); ++i) {
			total_pages_sizes += stats.pages_sizes[i];

//			BOOST_REQUIRE_LE(stats.pages_sizes[i], stats.pages_max_sizes[i]);
		}

//		BOOST_REQUIRE_LE(stats.size_of_objects, cache_size);
//		BOOST_REQUIRE_EQUAL(stats.size_of_objects, total_pages_sizes);
	}
}
Exemple #4
0
static void test_cache_timestamp(session &sess)
{
	argument_data data("this is a timestamp test");

	key k("this is a timestamp test key");
	sess.transform(k);

	dnet_io_control ctl;
	memset(&ctl, 0, sizeof(ctl));

	ctl.data = data.data();

	dnet_current_time(&ctl.io.timestamp);
	ctl.io.flags = DNET_IO_FLAGS_CACHE;
	ctl.io.start = 5;
	ctl.io.size = data.size();

	memcpy(&ctl.id, &k.id(), sizeof(dnet_id));
	ctl.fd = -1;

	ELLIPTICS_REQUIRE(write_result, sess.write_data(ctl));

	sleep(ctl.io.start + 2);

	ELLIPTICS_REQUIRE(read_result, sess.read_data(k, 0, 0));
	auto io = read_result.get_one().io_attribute();

	BOOST_REQUIRE_EQUAL(io->timestamp.tsec, ctl.io.timestamp.tsec);
	BOOST_REQUIRE_EQUAL(io->timestamp.tnsec, ctl.io.timestamp.tnsec);
}
Exemple #5
0
static void test_cache_records_sizes(session &sess, const nodes_data *setup)
{
	dnet_node *node = setup->nodes[0].get_native();
	dnet_backend_io *backend_io = dnet_get_backend_io(node->io, 0);
	ioremap::cache::cache_manager *cache = reinterpret_cast<ioremap::cache::cache_manager *>(backend_io->cache);
	const size_t cache_size = cache->cache_size();
	const size_t cache_pages_number = cache->cache_pages_number();
	argument_data data("0");

	cache->clear();
	size_t record_size = 0;
	{
		ELLIPTICS_REQUIRE(write_result, sess.write_cache(key(boost::lexical_cast<std::string>(0)), data, 3000));
		auto stats = cache->get_total_cache_stats();
		record_size = stats.size_of_objects;
		BOOST_REQUIRE_EQUAL(stats.number_of_objects, 1);
	}

	size_t records_number = cache_size / cache_pages_number / record_size - 5;
	for (size_t id = 1; id < records_number; ++id) {
		ELLIPTICS_REQUIRE(write_result,
		                  sess.write_cache(key(boost::lexical_cast<std::string>(id)), data, 3000));
		auto stats = cache->get_total_cache_stats();

		size_t total_pages_sizes = 0;
		for (size_t i = 0; i < stats.pages_sizes.size(); ++i) {
			total_pages_sizes += stats.pages_sizes[i];
		}

		BOOST_REQUIRE_EQUAL(stats.number_of_objects * record_size, stats.size_of_objects);
		BOOST_REQUIRE_EQUAL(stats.number_of_objects, id + 1);
		BOOST_REQUIRE_EQUAL(stats.size_of_objects, total_pages_sizes);
	}
}
Exemple #6
0
static void start_application(session &sess, const std::string &app_name)
{
    key key_id = app_name;
    key_id.transform(sess);
    dnet_id id = key_id.id();

    ELLIPTICS_REQUIRE(result, sess.exec(&id, app_name + "@start-task", data_pointer()));
}
Exemple #7
0
static void send_echo(session &sess, const std::string &app_name, const std::string &data)
{
    key key_id = app_name;
    key_id.transform(sess);
    dnet_id id = key_id.id();

    ELLIPTICS_REQUIRE(exec_result, sess.exec(&id, app_name + "@echo", data));

    sync_exec_result result = exec_result;
    BOOST_REQUIRE_EQUAL(result.size(), 1);
    BOOST_REQUIRE_EQUAL(result[0].context().data().to_string(), data);
}
static void test_change_group(session &sess)
{
	server_node &node = global_data->nodes.back();
	const uint32_t backend_id = 4;
	const int old_group_id = 2;
	const int new_group_id = 10;

	std::string host = node.remote().to_string();
	auto old_tuple = std::make_tuple(host, old_group_id, backend_id);
	auto new_tuple = std::make_tuple(host, new_group_id, backend_id);

	auto unique_hosts = get_unique_hosts(sess);

	BOOST_REQUIRE_MESSAGE(unique_hosts.find(old_tuple) != unique_hosts.end(),
		"Host must not exist: " + host + ", group: 2, backend: 1");

	BOOST_REQUIRE_MESSAGE(unique_hosts.find(new_tuple) == unique_hosts.end(),
		"Host must not exist: " + host + ", group: 10, backend: 1");

	server_config server = node.config();
	config_data &backend = server.backends[backend_id];
	backend("group", new_group_id);

	server.write(node.config_path());

	ELLIPTICS_REQUIRE(stop_result, sess.disable_backend(node.remote(), backend_id));
	ELLIPTICS_REQUIRE(start_result, sess.enable_backend(node.remote(), backend_id));

	// Wait 0.1 secs to ensure that route list was changed
	usleep(100 * 1000);

	unique_hosts = get_unique_hosts(sess);

	BOOST_REQUIRE_MESSAGE(unique_hosts.find(old_tuple) == unique_hosts.end(),
		"Host must not exist: " + host + ", group: 2, backend: 1");

	BOOST_REQUIRE_MESSAGE(unique_hosts.find(new_tuple) != unique_hosts.end(),
		"Host must not exist: " + host + ", group: 10, backend: 1");
}
static void test_remove_backend(session &sess) {
	server_node &node = get_setup()->nodes[0];


	server_config &config = node.config();
	config_data &last_backend = config.backends.back();
	const uint32_t backend_id = std::stoi(last_backend.string_value("backend_id"));

	ELLIPTICS_REQUIRE(enable_result, sess.remove_backend(node.remote(), backend_id));

	/* Request all backends status and check that removed backend is missed */
	ELLIPTICS_REQUIRE(async_status_result, sess.request_backends_status(node.remote()));
	sync_backend_status_result result = async_status_result;

	BOOST_REQUIRE_EQUAL(result.size(), 1);

	backend_status_result_entry entry = result.front();

	for (size_t i = 0; i < backends_count; ++i) {
		auto status = entry.backend(i);
		BOOST_REQUIRE_NE(status->backend_id, backend_id);
	}
}
Exemple #10
0
void cache_write_check_lru(session &sess, int id, const argument_data &data, long timeout,
                           lru_list_emulator_t &lru_list_emulator, ioremap::cache::cache_manager *cache) {

	key idKey = key(boost::lexical_cast<std::string>(id));

	int objects_number_before = cache->get_total_cache_stats().number_of_objects;
	ELLIPTICS_REQUIRE(write_result, sess.write_cache(idKey, data, timeout));
	lru_list_emulator.add(id);
	int objects_number_after = cache->get_total_cache_stats().number_of_objects;

	int objects_removed = objects_number_before - objects_number_after + 1;
	for (int i = 0; i < objects_removed; ++i) {
		lru_list_emulator.remove_last();
	}
}
Exemple #11
0
static void test_make_backend_writeable(session &sess)
{
	server_node &node = global_data->nodes.back();
	const key id = std::string("read_only_key");
	const std::string data = "read_only_data";

	ELLIPTICS_REQUIRE(async_readonly_result, sess.make_writable(node.remote(), 4));

	backend_status_result_entry result = async_readonly_result.get_one();
	BOOST_REQUIRE(result.is_valid());
	BOOST_REQUIRE_EQUAL(result.count(), 1);

	dnet_backend_status *status = result.backend(0);
	BOOST_REQUIRE_EQUAL(status->backend_id, 4);
	BOOST_REQUIRE_EQUAL(status->read_only, false);

	session new_sess = sess.clone();
	new_sess.set_direct_id(node.remote(), 4);

	ELLIPTICS_REQUIRE(write_result, new_sess.write_data(id, data, 0));
	ELLIPTICS_REQUIRE(read_result, new_sess.read_data(id, 0, 0));

	ELLIPTICS_REQUIRE_ERROR(second_async_readonly_result, sess.make_writable(node.remote(), 4), -EALREADY);
}
Exemple #12
0
/*
 * After writing of a key to cache, keys data will be synced to disk cache_sync_timeout seconds later.
 * Before syncing a key, dnet_oplock() taken for this key. After syncing a key, key's oplock released.
 *
 * Following test checks this mechanics by calling write_data(key, data) multiple times with the same data,
 * then writing to cache by calling write_cache(key, cache_data) cache data, waiting cache_sync_timeout seconds
 * until cache is synced back to disk (backend), thereby taking oplock. Then called write_data(key, result_data).
 * If last write_data() operation timeouted, then dnet_opunlock() (after cache sync) is not properly realeased key's oplock.
 */
static void test_oplock(session &sess)
{
	const key id(std::string("oplock_key"));
	const std::string data = "some_data";
	const std::string cache_data = "cache_data";
	const std::string result_data = "result_data";

	const size_t num_writes = 10;
	std::unique_ptr<async_write_result[]> results(new async_write_result[num_writes]);

	for (size_t i = 0; i < num_writes; ++i) {
		results[i] = std::move(sess.write_data(id, data, 0));
	}
	for (size_t i = 0; i < num_writes; ++i) {
		results[i].wait();
	}
	ELLIPTICS_COMPARE_REQUIRE(read_data_result, sess.read_data(id, 0, 0), data);

	ELLIPTICS_REQUIRE(async_cache_write, sess.write_cache(id, cache_data, 0));
	sleep(cache_sync_timeout + 1);
	ELLIPTICS_COMPARE_REQUIRE(read_cache_result, sess.read_data(id, 0, 0), cache_data);
	ELLIPTICS_REQUIRE(async_write, sess.write_data(id, result_data, 0));
	ELLIPTICS_COMPARE_REQUIRE(read_result, sess.read_data(id, 0, 0), result_data);
}
Exemple #13
0
static void init_application(session &sess, const std::string &app_name)
{
    key key_id = app_name;
    key_id.transform(sess);
    dnet_id id = key_id.id();

    node_info info;
    info.groups = { 1 };
    info.path = global_data->directory.path();

    for (auto it = global_data->nodes.begin(); it != global_data->nodes.end(); ++it)
        info.remotes.push_back(it->remote());

    ELLIPTICS_REQUIRE(exec_result, sess.exec(&id, app_name + "@init", info.pack()));

    sync_exec_result result = exec_result;
    BOOST_REQUIRE_EQUAL(result.size(), 1);
    BOOST_REQUIRE_EQUAL(result[0].context().data().to_string(), "inited");
}
Exemple #14
0
void cache_read_check_lru(session &sess, int id, lru_list_emulator_t &lru_list_emulator,
                          ioremap::cache::cache_manager *cache) {

	key idKey = key(boost::lexical_cast<std::string>(id));
	std::unique_ptr<async_read_result> read_result;

	int objects_number_before = cache->get_total_cache_stats().number_of_objects;
	if (!lru_list_emulator.contains(id)) {
		ELLIPTICS_WARN_ERROR(read_result, sess.read_data(idKey, 0, 0), -ENOENT);
	} else {
		ELLIPTICS_REQUIRE(read_result, sess.read_data(idKey, 0, 0));
		lru_list_emulator.update(id);
	}
	int objects_number_after = cache->get_total_cache_stats().number_of_objects;

	int objects_removed = objects_number_before - objects_number_after;
	for (int i = 0; i < objects_removed; ++i) {
		lru_list_emulator.remove_last();
	}
}
Exemple #15
0
static void test_enable_backend_at_empty_node(session &sess)
{
	server_node &node = global_data->nodes.back();

	std::string host = node.remote().to_string();
	auto tuple = std::make_tuple(host, groups_count, 1);

	auto unique_hosts = get_unique_hosts(sess);

	BOOST_REQUIRE_MESSAGE(unique_hosts.find(tuple) == unique_hosts.end(),
		"Host must not exist: " + host + ", group: 2, backend: 1");

	ELLIPTICS_REQUIRE(enable_result, sess.enable_backend(node.remote(), 1));

	// Wait 0.1 secs to ensure that route list was changed
	usleep(100 * 1000);

	unique_hosts = get_unique_hosts(sess);

	BOOST_REQUIRE_MESSAGE(unique_hosts.find(tuple) != unique_hosts.end(),
		"Host must exist: " + host + ", group: 2, backend: 1");
}
Exemple #16
0
static void test_backend_status(session &sess)
{
	server_node &node = global_data->nodes[0];

	ELLIPTICS_REQUIRE(async_status_result, sess.request_backends_status(node.remote()));
	sync_backend_status_result result = async_status_result;

	BOOST_REQUIRE_EQUAL(result.size(), 1);

	backend_status_result_entry entry = result.front();

	BOOST_REQUIRE_EQUAL(entry.count(), backends_count);

	for (size_t i = 0; i < backends_count; ++i) {
		dnet_backend_status *status = entry.backend(i);
		BOOST_REQUIRE_EQUAL(status->backend_id, i);
		if (i < 2 || i == 3) {
			BOOST_REQUIRE_EQUAL(status->state, DNET_BACKEND_ENABLED);
		} else {
			BOOST_REQUIRE_EQUAL(status->state, DNET_BACKEND_DISABLED);
		}
	}
}
static void test_check_initial_config(session &sess) {
	auto &node = get_setup()->nodes.back();
	static const uint32_t backend_id = 4;

	ELLIPTICS_REQUIRE(result, sess.monitor_stat(node.remote(), DNET_MONITOR_BACKEND));
	BOOST_REQUIRE_EQUAL(result.get().size(), 1);

	auto monitor_initial_config = [&] () {
		std::istringstream stream(result.get().front().statistics());
		auto monitor_statistics = kora::dynamic::read_json(stream);
		return monitor_statistics.as_object()["backends"]
			.as_object()[std::to_string(backend_id)]
			.as_object()["backend"]
			.as_object()["initial_config"];
	} ();

	auto config_initial_config = [&] () {
		std::ifstream stream(node.config_path());
		auto config = kora::dynamic::read_json(stream);
		return config.as_object()["backends"].as_array()[backend_id];
	} ();
	BOOST_REQUIRE_EQUAL(monitor_initial_config, config_initial_config);
}
Exemple #18
0
// Writing of keys to all groups updates backend weights for every backend they
// were written. Writes to slow backend leads to significant reduction of this
// backend weigth comparing to faster ones.
// read_data() uses backend weights to choose fastest group via dnet_mix_states().
//
// Following test checks this mechanics by reading of previously written keys and
// checking read distribution among backends. Slow backend simulated by setting artificial delay.
// Expected outcome should be that reads would be rarely sent to that slow backend.
//
// We define "rarely" as no more than 1% of total reads. This value was empirically found.
static void test_backend_weights(session &sess)
{
	// set backends delay to simulate slow backends i/o behaviour for particular group
	set_backends_delay_for_group(sess, slow_group_id, backend_delay);

	const int num_keys = 10;
	for (int i = 0; i < num_keys; ++i) {
		const key id = std::string("key_") + std::to_string(static_cast<long long>(i));
		const std::string data = "some_data";
		ELLIPTICS_REQUIRE(async_write, sess.write_data(id, data, 0));
	}

	const int num_reads = 1000;
	int num_slow_group_reads = 0;
	for (int i = 0; i < num_reads; ++i) {
		const key id = std::string("key_") + std::to_string(static_cast<long long>(i % num_keys));
		auto async_result = sess.read_data(id, 0, 0);
		async_result.wait();

		read_result_entry read_result;
		async_result.get(read_result);

		const dnet_cmd *cmd = read_result.command();
		const int group_id = cmd->id.group_id;
		if ( group_id == slow_group_id )
			++num_slow_group_reads;
	}

	const int max_reads_from_slow_group = 10;
	BOOST_REQUIRE_MESSAGE(num_slow_group_reads < max_reads_from_slow_group,
			      "Too much reads from slow group (it means that backend weights are not working or backend hardware is extremely slow): "
			      "num_slow_group_reads: " + std::to_string(static_cast<long long>(num_slow_group_reads)) +
			      ", max_reads_from_slow_group: " + std::to_string(static_cast<long long>(max_reads_from_slow_group)));

	set_backends_delay_for_group(sess, slow_group_id, 0);
}
static void test_enable_backend(session &sess, uint32_t backend_id)
{
	server_node &node = get_setup()->nodes[0];

	std::string host = node.remote().to_string();
	auto tuple = std::make_tuple(host, 0, backend_id);

	auto unique_hosts = get_unique_hosts(sess);

	BOOST_REQUIRE_MESSAGE(unique_hosts.find(tuple) == unique_hosts.end(),
	                      "Host must not exist: " + host + ", group: 0, backend: " +
	                          std::to_string(static_cast<long long>(backend_id)));

	ELLIPTICS_REQUIRE(enable_result, sess.enable_backend(node.remote(), backend_id));

	// Wait 0.1 secs to ensure that route list was changed
	usleep(100 * 1000);

	unique_hosts = get_unique_hosts(sess);

	BOOST_REQUIRE_MESSAGE(unique_hosts.find(tuple) != unique_hosts.end(),
	                      "Host must exist: " + host + ", group: 0, backend: " +
	                          std::to_string(static_cast<long long>(backend_id)));
}