示例#1
0
static void test_cache_timestamp(session &sess)
{
	argument_data data("this is a timestamp test");

	key k("this is a timestamp test key");
	sess.transform(k);

	dnet_io_control ctl;
	memset(&ctl, 0, sizeof(ctl));

	ctl.data = data.data();

	dnet_current_time(&ctl.io.timestamp);
	ctl.io.flags = DNET_IO_FLAGS_CACHE;
	ctl.io.start = 5;
	ctl.io.size = data.size();

	memcpy(&ctl.id, &k.id(), sizeof(dnet_id));
	ctl.fd = -1;

	ELLIPTICS_REQUIRE(write_result, sess.write_data(ctl));

	sleep(ctl.io.start + 2);

	ELLIPTICS_REQUIRE(read_result, sess.read_data(k, 0, 0));
	auto io = read_result.get_one().io_attribute();

	BOOST_REQUIRE_EQUAL(io->timestamp.tsec, ctl.io.timestamp.tsec);
	BOOST_REQUIRE_EQUAL(io->timestamp.tnsec, ctl.io.timestamp.tnsec);
}
示例#2
0
void key::transform(session &sess)
{
	if (m_by_id)
		return;

	memset(&m_id, 0, sizeof(m_id));
	sess.transform(m_remote, m_id);
	m_id.type = m_type;
}
示例#3
0
/*
 * Multiple writes with same key must be processed in the same order as
 * they were initiated by client.
 *
 * Following test checks this mechanics by calling write_cas() with data containing
 * counter that is incremented after every write_cas() and checking that previosly stored
 * counter is one unit less than current counter. Also this test writes multiple different
 * keys (with repetitions) in different order, thereby modelling real workload case.
 */
static void test_write_order_execution(session &sess)
{
	const int num_write_repetitions = 5;
	const int num_different_keys = 10;
	std::vector<std::pair<key, int>> keys;
	for (int i = 0; i < num_different_keys; ++i) {
		key id(std::to_string(static_cast<unsigned long long>(i)));
		for (int j = 0; j < num_write_repetitions; ++j) {
			keys.push_back(std::make_pair(id, i));
		}
	}

	std::unique_ptr<async_write_result[]> results(new async_write_result[keys.size()]);
	dnet_id old_csum;

	const int num_iterations = 30;
	for (int i = 0; i < num_iterations; ++i) {
		// every key is associated with counter, which is initialized by zero
		std::vector<int> write_counter(num_different_keys, 0);

		std::random_shuffle(keys.begin(), keys.end());

		for (size_t j = 0; j < keys.size(); ++j) {
			// increment counter associated with key identified by key_id
			const int key_id = keys[j].second;
			const int new_value = write_counter[key_id]++;
			if (new_value > 0) {
				const int prev_value = new_value - 1;
				memset(&old_csum, 0, sizeof(old_csum));
				sess.transform(std::to_string(static_cast<unsigned long long>(prev_value)), old_csum);
				results[j] = std::move(sess.write_cas(keys[j].first, std::to_string(static_cast<unsigned long long>(new_value)), old_csum, 0));
			} else {
				// first write
				results[j] = std::move(sess.write_data(keys[j].first, std::to_string(static_cast<unsigned long long>(new_value)), 0));
			}
		}

		for (size_t j = 0; j < keys.size(); ++j) {
			results[j].wait();
			const int err = results[j].error().code();
			BOOST_REQUIRE_MESSAGE(err == 0,
					      "write_cas() failed (err=" + std::to_string(static_cast<unsigned long long>(err)) + "): "
					      "multiple consecutive writes are executed out-of-order "
					      "or overlapped. Oplock mechanism of backend's request queue is broken.");
		}
	}
}