Пример #1
0
/**
 * Enable statistics with a set interval, make sure the stats callbacks are
 * called within reasonable intervals.
 */
static void do_test_stats_timer (void) {
    rd_kafka_t *rk;
    rd_kafka_conf_t *conf;
    const int exp_calls = 10;
    char errstr[512];
    struct state state;
    test_timing_t t_new;

    memset(&state, 0, sizeof(state));

    state.interval = 600*1000;

    test_conf_init(&conf, NULL, 200);

    test_conf_set(conf, "statistics.interval.ms", "600");
    rd_kafka_conf_set_stats_cb(conf, stats_cb);
    rd_kafka_conf_set_opaque(conf, &state);


    TIMING_START(&t_new, "rd_kafka_new()");
    rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
    TIMING_STOP(&t_new);
    if (!rk)
        TEST_FAIL("Failed to create instance: %s\n", errstr);

    TEST_SAY("Starting wait loop for %d expected stats_cb calls "
             "with an interval of %dms\n",
             exp_calls, state.interval/1000);


    while (state.calls < exp_calls) {
        test_timing_t t_poll;
        TIMING_START(&t_poll, "rd_kafka_poll()");
        rd_kafka_poll(rk, 100);
        TIMING_STOP(&t_poll);

        if (TIMING_DURATION(&t_poll) > 150*1000)
            TEST_WARN("rd_kafka_poll(rk,100) "
                      "took more than 50%% extra\n");
    }

    rd_kafka_destroy(rk);

    if (state.calls > exp_calls)
        TEST_SAY("Got more calls than expected: %d > %d\n",
                 state.calls, exp_calls);

    if (state.fails)
        TEST_FAIL("%d/%d intervals failed\n", state.fails, state.calls);
    else
        TEST_SAY("All %d intervals okay\n", state.calls);
}
Пример #2
0
int main_0035_api_version (int argc, char **argv) {
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	const struct rd_kafka_metadata *metadata;
	rd_kafka_resp_err_t err;
	test_timing_t t_meta;

	test_conf_init(&conf, NULL, 30);
	test_conf_set(conf, "socket.timeout.ms", "12000");
	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

	TEST_SAY("Querying for metadata\n");
	TIMING_START(&t_meta, "metadata()");
	err = rd_kafka_metadata(rk, 0, NULL, &metadata, 10*1000);
	TIMING_STOP(&t_meta);
	if (err)
		TEST_FAIL("metadata() failed: %s",
			  rd_kafka_err2str(err));

	if (TIMING_DURATION(&t_meta) / 1000 > 11*1000)
		TEST_FAIL("metadata() took too long: %.3fms",
			  (float)TIMING_DURATION(&t_meta) / 1000.0f);

	rd_kafka_metadata_destroy(metadata);

	TEST_SAY("Metadata succeeded\n");

	rd_kafka_destroy(rk);

	return 0;
}
Пример #3
0
static int run_test0 (struct run_args *run_args) {
	test_timing_t t_run;
	int r;

	test_curr = run_args->testname;
	TEST_SAY("================= Running test %s =================\n",
		 run_args->testname);
	TIMING_START(&t_run, run_args->testname);
	test_start = t_run.ts_start;
	r = run_args->test_main(run_args->argc, run_args->argv);
	TIMING_STOP(&t_run);

	if (r)
		TEST_SAY("\033[31m"
			 "================= Test %s FAILED ================="
			 "\033[0m\n",
			 run_args->testname);
	else
		TEST_SAY("\033[32m"
			 "================= Test %s PASSED ================="
			 "\033[0m\n",
			 run_args->testname);

	return r;
}
Пример #4
0
static void test_producer_no_connection (void) {
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_t *rkt;
	int i;
	const int partition_cnt = 2;
	int msgcnt = 0;
	test_timing_t t_destroy;

	test_conf_init(&conf, NULL, 20);

	test_conf_set(conf, "bootstrap.servers", NULL);

	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
	rkt = test_create_topic_object(rk, __FUNCTION__,
				       "message.timeout.ms", "5000", NULL);

	test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 100,
				 NULL, 100, 0, &msgcnt);
	for (i = 0 ; i < partition_cnt ; i++)
		test_produce_msgs_nowait(rk, rkt, 0, i,
					 0, 100, NULL, 100, 0, &msgcnt);

	rd_kafka_poll(rk, 1000);

	TEST_SAY("%d messages in queue\n", rd_kafka_outq_len(rk));

	rd_kafka_topic_destroy(rkt);

	TIMING_START(&t_destroy, "rd_kafka_destroy()");
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_destroy);
}
Пример #5
0
int test_consumer_poll (const char *what, rd_kafka_t *rk, uint64_t testid,
                        int exp_eof_cnt, int exp_msg_base, int exp_cnt) {
        int eof_cnt = 0;
        int cnt = 0;
        test_timing_t t_cons;

        TEST_SAY("%s: consume %d messages\n", what, exp_cnt);

        TIMING_START(&t_cons, "CONSUME");

        while ((exp_eof_cnt == -1 || eof_cnt < exp_eof_cnt) &&
               (cnt < exp_cnt)) {
                rd_kafka_message_t *rkmessage;

                rkmessage = rd_kafka_consumer_poll(rk, 10*1000);
                if (!rkmessage) /* Shouldn't take this long to get a msg */
                        TEST_FAIL("%s: consumer_poll() timeout\n", what);


                if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
                        TEST_SAY("%s [%"PRId32"] reached EOF at "
                                 "offset %"PRId64"\n",
                                 rd_kafka_topic_name(rkmessage->rkt),
                                 rkmessage->partition,
                                 rkmessage->offset);
                        eof_cnt++;

                } else if (rkmessage->err) {
                        TEST_SAY("%s [%"PRId32"] error (offset %"PRId64"): %s",
                                 rkmessage->rkt ?
                                 rd_kafka_topic_name(rkmessage->rkt) :
                                 "(no-topic)",
                                 rkmessage->partition,
                                 rkmessage->offset,
                                 rd_kafka_message_errstr(rkmessage));

                } else {
			if (test_level > 2)
				TEST_SAY("%s [%"PRId32"] "
					 "message at offset %"PRId64"\n",
					 rd_kafka_topic_name(rkmessage->rkt),
					 rkmessage->partition,
					 rkmessage->offset);

                        test_verify_rkmessage(rkmessage, testid, -1, -1);
                        cnt++;
                }

                rd_kafka_message_destroy(rkmessage);
        }

        TIMING_STOP(&t_cons);

        TEST_SAY("%s: consumed %d/%d messages (%d/%d EOFs)\n",
                 what, cnt, exp_cnt, eof_cnt, exp_eof_cnt);
        return cnt;
}
Пример #6
0
static int run_test0 (struct run_args *run_args) {
        struct test *test = run_args->test;
	test_timing_t t_run;
	int r;
        char stats_file[256];

        rd_snprintf(stats_file, sizeof(stats_file), "stats_%s_%"PRIu64".json",
                    test->name, test_id_generate());
        if (!(test->stats_fp = fopen(stats_file, "w+")))
                TEST_SAY("=== Failed to create stats file %s: %s ===\n",
                         stats_file, strerror(errno));

	test_curr = test;
	TEST_SAY("================= Running test %s =================\n",
		 test->name);
        if (test->stats_fp)
                TEST_SAY("==== Stats written to file %s ====\n", stats_file);
	TIMING_START(&t_run, test->name);
        test->start = t_run.ts_start;
	r = test->mainfunc(run_args->argc, run_args->argv);
	TIMING_STOP(&t_run);

        TEST_LOCK();
        test->duration = TIMING_DURATION(&t_run);
	if (r) {
                test->state = TEST_FAILED;
		TEST_SAY("\033[31m"
			 "================= Test %s FAILED ================="
			 "\033[0m\n",
                         run_args->test->name);
        } else {
                test->state = TEST_PASSED;
		TEST_SAY("\033[32m"
			 "================= Test %s PASSED ================="
			 "\033[0m\n",
                         run_args->test->name);
        }
        TEST_UNLOCK();

        if (test->stats_fp) {
                long pos = ftell(test->stats_fp);
                fclose(test->stats_fp);
                test->stats_fp = NULL;
                /* Delete file if nothing was written */
                if (pos == 0) {
#ifndef _MSC_VER
                        unlink(stats_file);
#else
                        _unlink(stats_file);
#endif
                }
        }

	return r;
}
Пример #7
0
/**
 * Waits for the messages tracked by counter \p msgcounterp to be delivered.
 */
void test_wait_delivery (rd_kafka_t *rk, int *msgcounterp) {
	test_timing_t t_all;

        TIMING_START(&t_all, "PRODUCE.DELIVERY.WAIT");

	/* Wait for messages to be delivered */
	while (*msgcounterp > 0 && rd_kafka_outq_len(rk) > 0)
		rd_kafka_poll(rk, 10);

	TIMING_STOP(&t_all);

}
Пример #8
0
void test_consumer_unassign (const char *what, rd_kafka_t *rk) {
        rd_kafka_resp_err_t err;
        test_timing_t timing;

        TIMING_START(&timing, "UNASSIGN.PARTITIONS");
        err = rd_kafka_assign(rk, NULL);
        TIMING_STOP(&timing);
        if (err)
                TEST_FAIL("%s: failed to unassign current partitions: %s\n",
                          what, rd_kafka_err2str(err));
        else
                TEST_SAY("%s: unassigned current partitions\n", what);
}
Пример #9
0
void test_consumer_close (rd_kafka_t *rk) {
        rd_kafka_resp_err_t err;
        test_timing_t timing;

        TEST_SAY("Closing consumer\n");

        TIMING_START(&timing, "CONSUMER.CLOSE");
        err = rd_kafka_consumer_close(rk);
        TIMING_STOP(&timing);
        if (err)
                TEST_FAIL("Failed to close consumer: %s\n",
                          rd_kafka_err2str(err));
}
Пример #10
0
int main_0001_multiobj (int argc, char **argv) {
	int partition = RD_KAFKA_PARTITION_UA; /* random */
	int i;
	const int NUM_ITER = 10;
        const char *topic = NULL;

	TEST_SAY("Creating and destroying %i kafka instances\n", NUM_ITER);

	/* Create, use and destroy NUM_ITER kafka instances. */
	for (i = 0 ; i < NUM_ITER ; i++) {
		rd_kafka_t *rk;
		rd_kafka_topic_t *rkt;
		rd_kafka_conf_t *conf;
		rd_kafka_topic_conf_t *topic_conf;
		char msg[128];
                test_timing_t t_destroy;

		test_conf_init(&conf, &topic_conf, 30);

                if (!topic)
                        topic = test_mk_topic_name("0001", 0);

		rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
		if (!rkt)
			TEST_FAIL("Failed to create topic for "
				  "rdkafka instance #%i: %s\n",
				  i, rd_kafka_err2str(rd_kafka_errno2err(errno)));

		rd_snprintf(msg, sizeof(msg), "%s test message for iteration #%i",
			 argv[0], i);

		/* Produce a message */
		rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY,
				 msg, strlen(msg), NULL, 0, NULL);
		
		/* Wait for it to be sent (and possibly acked) */
		rd_kafka_flush(rk, -1);

		/* Destroy topic */
		rd_kafka_topic_destroy(rkt);

		/* Destroy rdkafka instance */
                TIMING_START(&t_destroy, "rd_kafka_destroy()");
		rd_kafka_destroy(rk);
                TIMING_STOP(&t_destroy);
	}

	return 0;
}
Пример #11
0
void test_produce_msgs (rd_kafka_t *rk, rd_kafka_topic_t *rkt,
                        uint64_t testid, int32_t partition,
                        int msg_base, int cnt,
			const char *payload, size_t size) {
	int msg_id;
	test_timing_t t_all;
	int remains = 0;

	TEST_SAY("Produce to %s [%"PRId32"]: messages #%d..%d\n",
		 rd_kafka_topic_name(rkt), partition, msg_base, msg_base+cnt);

	TIMING_START(&t_all, "PRODUCE");

	for (msg_id = msg_base ; msg_id < msg_base + cnt ; msg_id++) {
		char key[128];
		char buf[128];
		const char *use_payload;
		size_t use_size;

		if (payload) {
			use_payload = payload;
			use_size = size;
		} else {
			test_msg_fmt(key, sizeof(key), testid, partition,
				     msg_id);
			rd_snprintf(buf, sizeof(buf), "data: %s", key);
			use_payload = buf;
			use_size = strlen(buf);
		}

		remains++;

		if (rd_kafka_produce(rkt, partition,
				     RD_KAFKA_MSG_F_COPY,
				     (void *)use_payload, use_size,
				     key, strlen(key),
				     &remains) == -1)
			TEST_FAIL("Failed to produce message %i "
				  "to partition %i: %s",
				  msg_id, (int)partition,
				  rd_kafka_err2str(rd_kafka_errno2err(errno)));

        }


	/* Wait for messages to be delivered */
	while (remains > 0 && rd_kafka_outq_len(rk) > 0)
		rd_kafka_poll(rk, 10);

	TIMING_STOP(&t_all);
}
Пример #12
0
void test_consumer_assign (const char *what, rd_kafka_t *rk,
			   rd_kafka_topic_partition_list_t *partitions) {
        rd_kafka_resp_err_t err;
        test_timing_t timing;

        TIMING_START(&timing, "ASSIGN.PARTITIONS");
        err = rd_kafka_assign(rk, partitions);
        TIMING_STOP(&timing);
        if (err)
                TEST_FAIL("%s: failed to assign %d partition(s): %s\n",
			  what, partitions->cnt, rd_kafka_err2str(err));
        else
                TEST_SAY("%s: assigned %d partition(s)\n",
			 what, partitions->cnt);
}
Пример #13
0
static void legacy_consume_many (char **topics, int topic_cnt, uint64_t testid){
	rd_kafka_t *rk;
        test_timing_t t_rkt_create;
        int i;
	rd_kafka_topic_t **rkts;
	int msg_base = 0;

	TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);

	test_conf_init(NULL, NULL, 60);

	rk = test_create_consumer(NULL, NULL, NULL, NULL);

	TEST_SAY("Creating %d topic objects\n", topic_cnt);
		 
	rkts = malloc(sizeof(*rkts) * topic_cnt);
	TIMING_START(&t_rkt_create, "Topic object create");
	for (i = 0 ; i < topic_cnt ; i++)
		rkts[i] = test_create_topic_object(rk, topics[i], NULL);
	TIMING_STOP(&t_rkt_create);

	TEST_SAY("Start consumer for %d topics\n", topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++)
		test_consumer_start("legacy", rkts[i], 0,
				    RD_KAFKA_OFFSET_BEGINNING);
	
	TEST_SAY("Consuming from %d messages from each %d topics\n",
		 msgs_per_topic, topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++) {
		test_consume_msgs("legacy", rkts[i], testid, 0, TEST_NO_SEEK,
				  msg_base, msgs_per_topic, 1);
		msg_base += msgs_per_topic;
	}

	TEST_SAY("Stopping consumers\n");
	for (i = 0 ; i < topic_cnt ; i++)
		test_consumer_stop("legacy", rkts[i], 0);


	TEST_SAY("Destroying %d topic objects\n", topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++)
		rd_kafka_topic_destroy(rkts[i]);

	free(rkts);

	rd_kafka_destroy(rk);
}
Пример #14
0
/**
 * @brief Verify that an unclean rd_kafka_destroy() does not hang.
 */
static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) {
        rd_kafka_t *rk;
        char errstr[512];
        rd_kafka_conf_t *conf;
        rd_kafka_queue_t *q;
        rd_kafka_event_t *rkev;
        rd_kafka_DeleteTopic_t *topic;
        test_timing_t t_destroy;

        test_conf_init(&conf, NULL, 0);
        /* Remove brokers, if any, since this is a local test and we
         * rely on the controller not being found. */
        test_conf_set(conf, "bootstrap.servers", "");
        test_conf_set(conf, "socket.timeout.ms", "60000");

        rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr));
        TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr);

        TEST_SAY(_C_MAG "[ Test unclean destroy for %s using %s]\n", rd_kafka_name(rk),
                 with_mainq ? "mainq" : "tempq");

        if (with_mainq)
                q = rd_kafka_queue_get_main(rk);
        else
                q = rd_kafka_queue_new(rk);

        topic = rd_kafka_DeleteTopic_new("test");
        rd_kafka_DeleteTopics(rk, &topic, 1, NULL, q);
        rd_kafka_DeleteTopic_destroy(topic);

        /* We're not expecting a result yet since DeleteTopics will attempt
         * to look up the controller for socket.timeout.ms (1 minute). */
        rkev = rd_kafka_queue_poll(q, 100);
        TEST_ASSERT(!rkev, "Did not expect result: %s", rd_kafka_event_name(rkev));

        rd_kafka_queue_destroy(q);

        TEST_SAY("Giving rd_kafka_destroy() 5s to finish, "
                 "despite Admin API request being processed\n");
        test_timeout_set(5);
        TIMING_START(&t_destroy, "rd_kafka_destroy()");
        rd_kafka_destroy(rk);
        TIMING_STOP(&t_destroy);

        /* Restore timeout */
        test_timeout_set(60);
}
Пример #15
0
/**
 * Produces \p cnt messages and returns immediately.
 * Does not wait for delivery.
 * \p msgcounterp is incremented for each produced messages and passed
 * as \p msg_opaque which is later used in test_dr_cb to decrement
 * the counter on delivery.
 */
void test_produce_msgs_nowait (rd_kafka_t *rk, rd_kafka_topic_t *rkt,
                               uint64_t testid, int32_t partition,
                               int msg_base, int cnt,
                               const char *payload, size_t size,
                               int *msgcounterp) {
	int msg_id;
	test_timing_t t_all;

	TEST_SAY("Produce to %s [%"PRId32"]: messages #%d..%d\n",
		 rd_kafka_topic_name(rkt), partition, msg_base, msg_base+cnt);

	TIMING_START(&t_all, "PRODUCE");

	for (msg_id = msg_base ; msg_id < msg_base + cnt ; msg_id++) {
		char key[128];
		char buf[128];
		const char *use_payload;
		size_t use_size;

		if (payload) {
			use_payload = payload;
			use_size = size;
		} else {
			test_msg_fmt(key, sizeof(key), testid, partition,
				     msg_id);
			rd_snprintf(buf, sizeof(buf), "%s: data", key);
			use_payload = buf;
			use_size = strlen(buf);
		}

		if (rd_kafka_produce(rkt, partition,
				     RD_KAFKA_MSG_F_COPY,
				     (void *)use_payload, use_size,
				     key, strlen(key),
				     msgcounterp) == -1)
			TEST_FAIL("Failed to produce message %i "
				  "to partition %i: %s",
				  msg_id, (int)partition,
				  rd_kafka_err2str(rd_kafka_errno2err(errno)));

                (*msgcounterp)++;

        }

	TIMING_STOP(&t_all);
}
Пример #16
0
/* evict_block should evict exactly one block if it is successful */
static int wb_evict_block(BD_t * object, bool only_dirty)
{
	struct cache_info * info = (struct cache_info *) object;
	
#ifdef __KERNEL__
	revision_tail_process_landing_requests();
#endif
	for(;;)
	{
		int r = FLUSH_EMPTY;
		struct cache_slot * slot;
		for(slot = info->blocks[0].lru; slot != &info->blocks[0]; slot = slot->prev)
		{
			int code = wb_flush_block(object, slot);
			if(code == FLUSH_DONE || (!only_dirty && code == FLUSH_EMPTY))
			{
				wb_pop_block(info, slot->block->cache_number, (uint32_t) (slot - &info->blocks[0]));
				return 0;
			}
			r |= code;
		}
#ifdef __KERNEL__
		/* For both FLUSH_NONE and FLUSH_SOME we must wait to make
		 * progress if there are any flights in progress. For FLUSH_NONE
		 * this is obvious; for FLUSH_SOME you must consider that the
		 * only way more blocks can be written is by waiting for the
		 * blocks that were just written to be completed, assuming that
		 * we do not have stacked caches. */
		if(revision_tail_flights_exist())
		{
			KERNEL_INTERVAL(wait);
			TIMING_START(wait);
			revision_tail_wait_for_landing_requests();
			revision_tail_process_landing_requests();
			TIMING_STOP(wait, wait);
		}
		else
#endif
		if(r == FLUSH_NONE)
			return -EBUSY;
	}
}
Пример #17
0
/**
 * Create producer, produce \p msgcnt messages to \p topic \p partition,
 * destroy consumer, and returns the used testid.
 */
uint64_t
test_produce_msgs_easy (const char *topic, uint64_t testid,
                        int32_t partition, int msgcnt) {
        rd_kafka_t *rk;
        rd_kafka_topic_t *rkt;
        test_timing_t t_produce;

        if (!testid)
                testid = test_id_generate();
        rk = test_create_producer();
        rkt = test_create_producer_topic(rk, topic, NULL);

        TIMING_START(&t_produce, "PRODUCE");
        test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, 0);
        TIMING_STOP(&t_produce);
        rd_kafka_topic_destroy(rkt);
        rd_kafka_destroy(rk);

        return testid;
}
Пример #18
0
static void test_producer_partition_cnt_change (void) {
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_t *rkt;
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	const int partition_cnt = 4;
	int msgcnt = 100000;
	test_timing_t t_destroy;
	int produced = 0;

	test_kafka_topics("--create --topic %s --replication-factor 1 "
			  "--partitions %d",
			  topic, partition_cnt/2);

	test_conf_init(&conf, NULL, 20);

	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
	rkt = test_create_topic_object(rk, __FUNCTION__,
				       "message.timeout.ms",
                                       tsprintf("%d", tmout_multip(5000)),
                                       NULL);

	test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt/2,
				 NULL, 100, &produced);

	test_kafka_topics("--alter --topic %s --partitions %d",
			  topic, partition_cnt);

	test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA,
				 msgcnt/2, msgcnt/2,
				 NULL, 100, &produced);

	test_wait_delivery(rk, &produced);

	rd_kafka_topic_destroy(rkt);

	TIMING_START(&t_destroy, "rd_kafka_destroy()");
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_destroy);
}
Пример #19
0
/**
 * @brief Verify that an unclean rd_kafka_destroy() does not hang.
 */
static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) {
        rd_kafka_t *rk;
        char errstr[512];
        rd_kafka_conf_t *conf;
        rd_kafka_queue_t *q;
        rd_kafka_NewTopic_t *topic;
        test_timing_t t_destroy;

        test_conf_init(&conf, NULL, 0);

        rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr));
        TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr);

        TEST_SAY(_C_MAG "[ Test unclean destroy for %s using %s]\n", rd_kafka_name(rk),
                 with_mainq ? "mainq" : "tempq");

        if (with_mainq)
                q = rd_kafka_queue_get_main(rk);
        else
                q = rd_kafka_queue_new(rk);

        topic = rd_kafka_NewTopic_new(test_mk_topic_name(__FUNCTION__, 1),
                                      3, 1, NULL, 0);
        rd_kafka_CreateTopics(rk, &topic, 1, NULL, q);
        rd_kafka_NewTopic_destroy(topic);

        rd_kafka_queue_destroy(q);

        TEST_SAY("Giving rd_kafka_destroy() 5s to finish, "
                 "despite Admin API request being processed\n");
        test_timeout_set(5);
        TIMING_START(&t_destroy, "rd_kafka_destroy()");
        rd_kafka_destroy(rk);
        TIMING_STOP(&t_destroy);

        /* Restore timeout */
        test_timeout_set(60);;
}
Пример #20
0
static void produce_many (char **topics, int topic_cnt, uint64_t testid) {
	rd_kafka_t *rk;
        test_timing_t t_rkt_create;
        int i;
	rd_kafka_topic_t **rkts;

	TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);

	rk = test_create_producer();
	
	TEST_SAY("Creating %d topic objects\n", topic_cnt);
		 
	rkts = malloc(sizeof(*rkts) * topic_cnt);
	TIMING_START(&t_rkt_create, "Topic object create");
	for (i = 0 ; i < topic_cnt ; i++) {
		rkts[i] = test_create_topic_object(rk, topics[i], NULL);
	}
	TIMING_STOP(&t_rkt_create);

	TEST_SAY("Producing %d messages to each %d topics\n",
		 msgs_per_topic, topic_cnt);
        /* Produce messages to each topic (so they are created) */
	for (i = 0 ; i < topic_cnt ; i++) {
		test_produce_msgs(rk, rkts[i], testid, 0,
				  i * msgs_per_topic, msgs_per_topic,
				  NULL, 100);
	}

	TEST_SAY("Destroying %d topic objects\n", topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++) {
		rd_kafka_topic_destroy(rkts[i]);
	}
	free(rkts);

	test_flush(rk, 30000);

	rd_kafka_destroy(rk);
}
Пример #21
0
static int do_test_consume_batch (void) {
#define topic_cnt 2
	const char *topics[topic_cnt] = {
                test_mk_topic_name(__FUNCTION__, 1),
                test_mk_topic_name(__FUNCTION__, 1)
        };
        const int partition_cnt = 2;
	rd_kafka_t *rk;
        rd_kafka_queue_t *rkq;
        rd_kafka_topic_t *rkts[topic_cnt];
	rd_kafka_resp_err_t err;
        const int msgcnt = 10000;
        uint64_t testid;
        int i, p;
        int batch_cnt = 0;
        int remains;

        testid = test_id_generate();

        /* Produce messages */
        for (i = 0 ; i < topic_cnt ; i++) {
                for (p = 0 ; p < partition_cnt ; p++)
                        test_produce_msgs_easy(topics[i], testid, p,
                                               msgcnt / topic_cnt /
                                               partition_cnt);
        }


        /* Create simple consumer */
        rk = test_create_consumer(NULL, NULL, NULL, NULL);

        /* Create generic consume queue */
        rkq = rd_kafka_queue_new(rk);

        for (i = 0 ; i < topic_cnt ; i++) {
                /* Create topic object */
                rkts[i] = test_create_topic(rk, topics[i],
                                            "auto.offset.reset", "smallest",
                                            NULL);

                /* Start consuming each partition and redirect
                 * messages to queue */

                TEST_SAY("Start consuming topic %s partitions 0..%d\n",
                         rd_kafka_topic_name(rkts[i]), partition_cnt);

                for (p = 0 ; p < partition_cnt ; p++) {
                        err = rd_kafka_consume_start_queue(
                                rkts[i], p, RD_KAFKA_OFFSET_BEGINNING, rkq);
                        if (err)
                                TEST_FAIL("Failed to start consuming: %s\n",
                                          rd_kafka_err2str(err));
                }
        }

        remains = msgcnt;

        /* Consume messages from common queue using batch interface. */
        TEST_SAY("Consume %d messages from queue\n", remains);
        while (remains > 0) {
                rd_kafka_message_t *rkmessage[1000];
                ssize_t r;
                test_timing_t t_batch;

                TIMING_START(&t_batch, "CONSUME.BATCH");
                r = rd_kafka_consume_batch_queue(rkq, 1000, rkmessage, 1000);
                TIMING_STOP(&t_batch);

                TEST_SAY("Batch consume iteration #%d: Consumed %"PRIdsz
                         "/1000 messages\n", batch_cnt, r);

                if (r == -1)
                        TEST_FAIL("Failed to consume messages: %s\n",
                                  rd_kafka_err2str(rd_kafka_errno2err(errno)));

                remains -= r;

                for (i = 0 ; i < r ; i++)
                        rd_kafka_message_destroy(rkmessage[i]);

                batch_cnt++;
        }


        TEST_SAY("Stopping consumer\n");
        for (i = 0 ; i < topic_cnt ; i++) {
                for (p = 0 ; p < partition_cnt ; p++) {
                        err = rd_kafka_consume_stop(rkts[i], p);
                        if (err)
                                TEST_FAIL("Failed to stop consuming: %s\n",
                                          rd_kafka_err2str(err));
                }

                rd_kafka_topic_destroy(rkts[i]);
        }

        rd_kafka_queue_destroy(rkq);

        rd_kafka_destroy(rk);

        return 0;
}
Пример #22
0
int main_0038_performance (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	const int partition = 0;
	const int msgsize = 100;
	uint64_t testid;
	rd_kafka_conf_t *conf;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	test_timing_t t_create, t_produce, t_consume;
	int totsize = 1024*1024*128;
	int msgcnt;

	if (!strcmp(test_mode, "valgrind") || !strcmp(test_mode, "helgrind") ||
	    !strcmp(test_mode, "drd"))
		totsize = 1024*1024*8; /* 8 meg, valgrind is slow. */

	msgcnt = totsize / msgsize;

	TEST_SAY("Producing %d messages of size %d to %s [%d]\n",
		 msgcnt, (int)msgsize, topic, partition);
	testid = test_id_generate();
	test_conf_init(&conf, NULL, 120);
	rd_kafka_conf_set_dr_cb(conf, test_dr_cb);
	test_conf_set(conf, "queue.buffering.max.messages", "10000000");
	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
	rkt = test_create_producer_topic(rk, topic, "acks", "1", NULL);

	/* First produce one message to create the topic, etc, this might take 
	 * a while and we dont want this to affect the throughput timing. */
	TIMING_START(&t_create, "CREATE TOPIC");
	test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, msgsize);
	TIMING_STOP(&t_create);

	TIMING_START(&t_produce, "PRODUCE");
	test_produce_msgs(rk, rkt, testid, partition, 1, msgcnt-1, NULL, msgsize);
	TIMING_STOP(&t_produce);

	TEST_SAY("Destroying producer\n");
	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	TEST_SAY("Creating consumer\n");
	test_conf_init(&conf, NULL, 120);
	rk = test_create_consumer(NULL, NULL, conf, NULL, NULL);
	rkt = rd_kafka_topic_new(rk, topic, NULL);

	test_consumer_start("CONSUME", rkt, partition,
			    RD_KAFKA_OFFSET_BEGINNING);
	TIMING_START(&t_consume, "CONSUME");
	test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK,
			  0, msgcnt, 1);
	TIMING_STOP(&t_consume);
	test_consumer_stop("CONSUME", rkt, partition);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	TEST_REPORT("{ \"producer\": "
		    " { \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f },"
		    " \"consumer\": "
		    "{ \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f } "
		    "}",
		    (double)
		    (totsize/((double)TIMING_DURATION(&t_produce)/1000000.0f)) /
		    1000000.0f,
		    (float)
		    (msgcnt/((double)TIMING_DURATION(&t_produce)/1000000.0f)),
		    (double)
		    (totsize/((double)TIMING_DURATION(&t_consume)/1000000.0f)) /
		    1000000.0f,
		    (float)
		    (msgcnt/((double)TIMING_DURATION(&t_consume)/1000000.0f)));
	return 0;
}
Пример #23
0
static void do_offset_test (const char *what, int auto_commit, int auto_store,
			    int async) {
	test_timing_t t_all;
	char groupid[64];
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	int cnt = 0;
	const int extra_cnt = 5;
	rd_kafka_resp_err_t err;
	rd_kafka_topic_partition_list_t *parts;
	rd_kafka_topic_partition_t *rktpar;
	int64_t next_offset = -1;

	test_conf_init(&conf, &tconf, 20);
	test_conf_set(conf, "enable.auto.commit", auto_commit ? "true":"false");
	test_conf_set(conf, "enable.auto.offset.store", auto_store ?"true":"false");
	test_conf_set(conf, "auto.commit.interval.ms", "500");
	rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
	test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
	test_str_id_generate(groupid, sizeof(groupid));
	test_conf_set(conf, "group.id", groupid);
	rd_kafka_conf_set_default_topic_conf(conf, tconf);

	TEST_SAY(_C_MAG "[ do_offset_test: %s with group.id %s ]\n",
		 what, groupid);

	TIMING_START(&t_all, what);

	expected_offset  = 0;
	committed_offset = -1;

	/* MO:
	 *  - Create consumer.
	 *  - Start consuming from beginning
	 *  - Perform store & commits according to settings
	 *  - Stop storing&committing when half of the messages are consumed,
	 *  - but consume 5 more to check against.
	 *  - Query position.
	 *  - Destroy consumer.
	 *  - Create new consumer with same group.id using stored offsets
	 *  - Should consume the expected message.
	 */

	/* Create kafka instance */
	rk = test_create_handle(RD_KAFKA_CONSUMER, rd_kafka_conf_dup(conf));

	rd_kafka_poll_set_consumer(rk);

	test_consumer_subscribe(rk, topic);

	while (cnt - extra_cnt < msgcnt / 2) {
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, 10*1000);
		if (!rkm)
			continue;

		if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
			TEST_FAIL("%s: Timed out waiting for message %d", what,cnt);
		else if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			rd_kafka_message_destroy(rkm);
			continue;
		} else if (rkm->err)
			TEST_FAIL("%s: Consumer error: %s",
				  what, rd_kafka_message_errstr(rkm));

		/* Offset of next message. */
		next_offset = rkm->offset + 1;

		if (cnt < msgcnt / 2) {
			if (!auto_store) {
				err = rd_kafka_offset_store(rkm->rkt,rkm->partition,
							    rkm->offset);
				if (err)
					TEST_FAIL("%s: offset_store failed: %s\n",
						  what, rd_kafka_err2str(err));
			}
			expected_offset = rkm->offset+1;
			if (!auto_commit) {
				test_timing_t t_commit;
				TIMING_START(&t_commit,
					     async?"commit.async":"commit.sync");
				err = rd_kafka_commit_message(rk, rkm, async);
				TIMING_STOP(&t_commit);
				if (err)
					TEST_FAIL("%s: commit failed: %s\n",
						  what, rd_kafka_err2str(err));
			}

		} else if (auto_store && auto_commit)
			expected_offset = rkm->offset+1;

		rd_kafka_message_destroy(rkm);
		cnt++;
	}

	TEST_SAY("%s: done consuming after %d messages, at offset %"PRId64"\n",
		 what, cnt, expected_offset);

	if ((err = rd_kafka_assignment(rk, &parts)))
		TEST_FAIL("%s: failed to get assignment(): %s\n",
			  what, rd_kafka_err2str(err));

	/* Verify position */
	if ((err = rd_kafka_position(rk, parts)))
		TEST_FAIL("%s: failed to get position(): %s\n",
			  what, rd_kafka_err2str(err));
	if (!(rktpar = rd_kafka_topic_partition_list_find(parts,
							  topic, partition)))
		TEST_FAIL("%s: position(): topic lost\n", what);
	if (rktpar->offset != next_offset)
		TEST_FAIL("%s: Expected position() offset %"PRId64", got %"PRId64,
			  what, next_offset, rktpar->offset);
	TEST_SAY("%s: Position is at %"PRId64", good!\n",
		 what, rktpar->offset);

	/* Pause messages while waiting so we can serve callbacks
	 * without having more messages received. */
	if ((err = rd_kafka_pause_partitions(rk, parts)))
		TEST_FAIL("%s: failed to pause partitions: %s\n",
			  what, rd_kafka_err2str(err));
	rd_kafka_topic_partition_list_destroy(parts);

	/* Fire off any enqueued offset_commit_cb */
	test_consumer_poll_no_msgs(what, rk, testid, 0);

	TEST_SAY("%s: committed_offset %"PRId64", expected_offset %"PRId64"\n",
		 what, committed_offset, expected_offset);

	if (!auto_commit && !async) {
		/* Sync commits should be up to date at this point. */
		if (committed_offset != expected_offset)
			TEST_FAIL("%s: Sync commit: committed offset %"PRId64
				  " should be same as expected offset "
				  "%"PRId64,
				  what, committed_offset, expected_offset);
	} else {

		/* Wait for offset commits to catch up */
		while (committed_offset < expected_offset) {
			TEST_SAYL(3, "%s: Wait for committed offset %"PRId64
				  " to reach expected offset %"PRId64"\n",
				  what, committed_offset, expected_offset);
			test_consumer_poll_no_msgs(what, rk, testid, 1000);
		}

	}

	TEST_SAY("%s: phase 1 complete, %d messages consumed, "
		 "next expected offset is %"PRId64"\n",
		 what, cnt, expected_offset);

        /* Issue #827: cause committed() to return prematurely by specifying
         *             low timeout. The bug (use after free) will only
         *             be catched by valgrind. */
        do {
                parts = rd_kafka_topic_partition_list_new(1);
                rd_kafka_topic_partition_list_add(parts, topic, partition);
                err = rd_kafka_committed(rk, parts, 1);
                rd_kafka_topic_partition_list_destroy(parts);
                TEST_SAY("Issue #827: committed() returned %s\n",
                         rd_kafka_err2str(err));
        } while (err != RD_KAFKA_RESP_ERR__TIMED_OUT);

	/* Query position */
	parts = rd_kafka_topic_partition_list_new(1);
	rd_kafka_topic_partition_list_add(parts, topic, partition);

	err = rd_kafka_committed(rk, parts, tmout_multip(5*1000));
	if (err)
		TEST_FAIL("%s: committed() failed: %s", what, rd_kafka_err2str(err));
	if (!(rktpar = rd_kafka_topic_partition_list_find(parts,
							  topic, partition)))
		TEST_FAIL("%s: committed(): topic lost\n", what);
	if (rktpar->offset != expected_offset)
		TEST_FAIL("%s: Expected committed() offset %"PRId64", got %"PRId64,
			  what, expected_offset, rktpar->offset);
	TEST_SAY("%s: Committed offset is at %"PRId64", good!\n",
		 what, rktpar->offset);

	rd_kafka_topic_partition_list_destroy(parts);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);



	/* Fire up a new consumer and continue from where we left off. */
	TEST_SAY("%s: phase 2: starting new consumer to resume consumption\n",what);
	rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
	rd_kafka_poll_set_consumer(rk);

	test_consumer_subscribe(rk, topic);

	while (cnt < msgcnt) {
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, 10*1000);
		if (!rkm)
			continue;

		if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
			TEST_FAIL("%s: Timed out waiting for message %d", what,cnt);
		else if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			rd_kafka_message_destroy(rkm);
			continue;
		} else if (rkm->err)
			TEST_FAIL("%s: Consumer error: %s",
				  what, rd_kafka_message_errstr(rkm));

		if (rkm->offset != expected_offset)
			TEST_FAIL("%s: Received message offset %"PRId64
				  ", expected %"PRId64" at msgcnt %d/%d\n",
				  what, rkm->offset, expected_offset,
				  cnt, msgcnt);

		rd_kafka_message_destroy(rkm);
		expected_offset++;
		cnt++;
	}


	TEST_SAY("%s: phase 2: complete\n", what);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	

	TIMING_STOP(&t_all);
}
Пример #24
0
int main_0029_assign_offset (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_topic_partition_list_t *parts;
        uint64_t testid;
	int i;
	test_timing_t t_simple, t_hl;
	test_msgver_t mv;

	test_conf_init(NULL, &tconf, 20 + (test_session_timeout_ms * 3 / 1000));
	test_topic_conf_set(tconf, "auto.offset.reset", "smallest");

	/* Produce X messages to Y partitions so we get a 
	 * nice seekable 0..X offset one each partition. */
        /* Produce messages */
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	parts = rd_kafka_topic_partition_list_new(partitions);

	for (i = 0 ; i < partitions ; i++) {
		test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0);
		rd_kafka_topic_partition_list_add(parts, topic, i)->offset =
			msgcnt / 2;
	}

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);


	/* Simple consumer */
	TIMING_START(&t_simple, "SIMPLE.CONSUMER");
	rk = test_create_consumer(topic, NULL, NULL, NULL);
	test_msgver_init(&mv, testid);
	test_consumer_assign("SIMPLE.ASSIGN", rk, parts);
	test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0,
			   partitions * (msgcnt / 2), &mv);
	for (i = 0 ; i < partitions ; i++)
		test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
					topic, i, msgcnt/2, msgcnt/2);
	test_msgver_clear(&mv);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_simple);

	rd_kafka_topic_partition_list_destroy(parts);
	

	/* High-level consumer.
	 * Offsets are set in rebalance callback. */
	TIMING_START(&t_hl, "HL.CONSUMER");
	test_msgver_init(&mv, testid);
	rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
	test_consumer_subscribe(rk, topic);
	test_consumer_poll("HL.CONSUME", rk, testid, -1, 0,
			   partitions * (msgcnt / 2), &mv);
	for (i = 0 ; i < partitions ; i++)
		test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
					topic, i, msgcnt/2, msgcnt/2);
	test_msgver_clear(&mv);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_hl);

	rd_kafka_topic_conf_destroy(tconf);

        return 0;
}
Пример #25
0
/**
 * Returns offset of the last message consumed
 */
int64_t test_consume_msgs (const char *what, rd_kafka_topic_t *rkt,
                           uint64_t testid, int32_t partition, int64_t offset,
                           int exp_msg_base, int exp_cnt, int parse_fmt) {
	int cnt = 0;
	int msg_next = exp_msg_base;
	int fails = 0;
	int64_t offset_last = -1;
	test_timing_t t_first, t_all;

	TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: expect msg #%d..%d "
		 "at offset %"PRId64"\n",
		 what, rd_kafka_topic_name(rkt), partition,
		 exp_msg_base, exp_cnt, offset);

	if (offset != TEST_NO_SEEK) {
		rd_kafka_resp_err_t err;
		test_timing_t t_seek;

		TIMING_START(&t_seek, "SEEK");
		if ((err = rd_kafka_seek(rkt, partition, offset, 5000)))
			TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: "
				  "seek to %"PRId64" failed: %s\n",
				  what, rd_kafka_topic_name(rkt), partition,
				  offset, rd_kafka_err2str(err));
		TIMING_STOP(&t_seek);
		TEST_SAY("%s: seeked to offset %"PRId64"\n", what, offset);
	}

	TIMING_START(&t_first, "FIRST MSG");
	TIMING_START(&t_all, "ALL MSGS");

	while (cnt < exp_cnt) {
		rd_kafka_message_t *rkmessage;
		int msg_id;

		rkmessage = rd_kafka_consume(rkt, partition, 5000);
		if (!rkmessage)
			TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: "
				  "expected msg #%d (%d/%d): timed out\n",
				  what, rd_kafka_topic_name(rkt), partition,
				  msg_next, cnt, exp_cnt);

		if (rkmessage->err)
			TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: "
				  "expected msg #%d (%d/%d): got error: %s\n",
				  what, rd_kafka_topic_name(rkt), partition,
				  msg_next, cnt, exp_cnt,
				  rd_kafka_err2str(rkmessage->err));

		if (cnt == 0)
			TIMING_STOP(&t_first);

		if (parse_fmt)
			test_msg_parse(testid, rkmessage->key,
				       rkmessage->key_len, partition, &msg_id);
		else
			msg_id = 0;

		if (test_level >= 3)
			TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: "
				 "got msg #%d at offset %"PRId64
				 " (expect #%d at offset %"PRId64")\n",
				 what, rd_kafka_topic_name(rkt), partition,
				 msg_id, rkmessage->offset,
				 msg_next,
				 offset >= 0 ? offset + cnt : -1);

		if (parse_fmt && msg_id != msg_next) {
			TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: "
				 "expected msg #%d (%d/%d): got msg #%d\n",
				 what, rd_kafka_topic_name(rkt), partition,
				 msg_next, cnt, exp_cnt, msg_id);
			fails++;
		}

		cnt++;
		msg_next++;
		offset_last = rkmessage->offset;

		rd_kafka_message_destroy(rkmessage);
	}

	TIMING_STOP(&t_all);

	if (fails)
		TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: %d failures\n",
			  what, rd_kafka_topic_name(rkt), partition, fails);

	TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: "
		 "%d/%d messages consumed succesfully\n",
		 what, rd_kafka_topic_name(rkt), partition,
		 cnt, exp_cnt);
	return offset_last;
}
Пример #26
0
int main(int argc, char **argv) {
	int r = 0;
        const char *tests_to_run = NULL; /* all */
        int i;
	test_timing_t t_all;

#ifndef _MSC_VER
        tests_to_run = getenv("TESTS");
#endif

        for (i = 1 ; i < argc ; i++) {
			if (!strcmp(argv[i], "-p"))
				tests_run_in_parallel = 1;
			else if (i == 1)
				tests_to_run = argv[i];
                else {
                        printf("Unknown option: %s\n"
                               "\n"
							   "Usage: %s [options] [<test-match-substr>]\n"
                               "Options:\n"
                               "  -p     Run tests in parallel\n"
                               "\n",
                               argv[0], argv[i]);
                        exit(1);
                }
        }

	test_curr = "<MAIN>";
	test_start = test_clock();

	TEST_SAY("Tests to run: %s\n", tests_to_run ? tests_to_run : "all");

#define RUN_TEST(NAME) do { \
	extern int main_ ## NAME (int, char **); \
        if (!tests_to_run || strstr(# NAME, tests_to_run)) {     \
                r |= run_test(# NAME, main_ ## NAME, argc, argv);	\
        } else { \
                TEST_SAY("================= Skipping test %s "	\
			 "================\n", # NAME );	\
        } \
	} while (0)

	TIMING_START(&t_all, "ALL-TESTS");
	RUN_TEST(0001_multiobj);
	RUN_TEST(0002_unkpart);
	RUN_TEST(0003_msgmaxsize);
	RUN_TEST(0004_conf);
	RUN_TEST(0005_order);
	RUN_TEST(0006_symbols);
	RUN_TEST(0007_autotopic);
	RUN_TEST(0008_reqacks);
	RUN_TEST(0011_produce_batch);
	RUN_TEST(0012_produce_consume);
        RUN_TEST(0013_null_msgs);
        RUN_TEST(0014_reconsume_191);
	RUN_TEST(0015_offsets_seek);
	RUN_TEST(0017_compression);
	RUN_TEST(0018_cgrp_term);

        if (tests_run_in_parallel) {
                while (tests_running_cnt > 0)
                        rd_sleep(1);
        }

	TIMING_STOP(&t_all);

        /* Wait for everything to be cleaned up since broker destroys are
	 * handled in its own thread. */
	test_wait_exit(tests_run_in_parallel ? 10 : 5);

	/* If we havent failed at this point then
	 * there were no threads leaked */

	TEST_SAY("\n============== ALL TESTS PASSED ==============\n");
	return r;
}
Пример #27
0
static int nonexist_part (void) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_t *rk;
	rd_kafka_topic_partition_list_t *parts;
	rd_kafka_resp_err_t err;
        test_timing_t t_pos;
        const int msgcnt = 1000;
        uint64_t testid;
        int i;
	int it, iterations = 5;

        /* Produce messages */
        testid = test_produce_msgs_easy(topic, 0,
                                        RD_KAFKA_PARTITION_UA, msgcnt);

	for (it = 0 ; it < iterations ; it++) {
		char group_id[32];

		test_str_id_generate(group_id, sizeof(group_id));

		TEST_SAY("Iteration %d/%d, using group.id %s\n", it, iterations,
			 group_id);

		/* Consume messages */
		test_consume_msgs_easy(group_id, topic, testid, -1,
				       msgcnt, NULL);

		/*
		 * Now start a new consumer and query stored offsets (positions)
		 */

		rk = test_create_consumer(group_id, NULL, NULL, NULL, NULL);

		/* Fill in partition set */
		parts = rd_kafka_topic_partition_list_new(2);
		/* existing */
		rd_kafka_topic_partition_list_add(parts, topic, 0);
		/* non-existing */
		rd_kafka_topic_partition_list_add(parts, topic, 123);


		TIMING_START(&t_pos, "COMMITTED");
		err = rd_kafka_committed(rk, parts, tmout_multip(5000));
		TIMING_STOP(&t_pos);
		if (err)
			TEST_FAIL("Failed to acquire committed offsets: %s\n",
				  rd_kafka_err2str(err));

		for (i = 0 ; i < parts->cnt ; i++) {
			TEST_SAY("%s [%"PRId32"] returned offset %"PRId64
				 ": %s\n",
				 parts->elems[i].topic,
				 parts->elems[i].partition,
				 parts->elems[i].offset,
				 rd_kafka_err2str(parts->elems[i].err));
			if (parts->elems[i].partition == 0 &&
			    parts->elems[i].offset <= 0)
				TEST_FAIL("Partition %"PRId32" should have a "
					  "proper offset, not %"PRId64"\n",
					  parts->elems[i].partition,
					  parts->elems[i].offset);
			else if (parts->elems[i].partition == 123 &&
				 parts->elems[i].offset !=
				 RD_KAFKA_OFFSET_INVALID)
				TEST_FAIL("Partition %"PRId32
					  " should have failed\n",
					  parts->elems[i].partition);
		}

		rd_kafka_topic_partition_list_destroy(parts);

		test_consumer_close(rk);

		/* Hangs if bug isn't fixed */
		rd_kafka_destroy(rk);
	}

        return 0;
}
Пример #28
0
int main(int argc, char **argv) {
        const char *tests_to_run = NULL; /* all */
        int test_flags = 0;
        int i, r;
	test_timing_t t_all;

	mtx_init(&test_mtx, mtx_plain);

        test_init();

#ifndef _MSC_VER
        tests_to_run = getenv("TESTS");
#endif

        for (i = 1 ; i < argc ; i++) {
                if (!strncmp(argv[i], "-p", 2) && strlen(argv[i]) > 2)
                        test_concurrent_max = strtod(argv[i]+2, NULL);
                else if (!strcmp(argv[i], "-l"))
                        test_flags |= TEST_F_LOCAL;
                else if (!strcmp(argv[i], "-a"))
                        test_assert_on_fail = 1;
                else if (*argv[i] != '-')
                        tests_to_run = argv[i];
                else {
                        printf("Unknown option: %s\n"
                               "\n"
                               "Usage: %s [options] [<test-match-substr>]\n"
                               "Options:\n"
                               "  -p<N>  Run N tests in parallel\n"
                               "  -l     Only run local tests (no broker needed)\n"
                               "  -a     Assert on failures\n"
                               "\n",
                               argv[0], argv[i]);
                        exit(1);
                }
        }

        test_curr = &tests[0];
        test_curr->state = TEST_PASSED;
        test_curr->start = test_clock();

	TEST_SAY("Tests to run: %s\n", tests_to_run ? tests_to_run : "all");
        TEST_SAY("Test filter: %s\n",
                 (test_flags & TEST_F_LOCAL) ?
                 "local tests only" : "no filter");
        TEST_SAY("Action on test failure: %s\n",
                 test_assert_on_fail ? "assert crash" : "continue other tests");

        test_timeout_set(20);

        TIMING_START(&t_all, "ALL-TESTS");

        run_tests(tests_to_run, test_flags, argc, argv);

        TEST_LOCK();
        while (tests_running_cnt > 0) {
                struct test *test;

                TEST_SAY("%d test(s) running:", tests_running_cnt);
                for (test = tests ; test->name ; test++)
                        if (test->state == TEST_RUNNING)
                                TEST_SAY0(" %s", test->name);
                TEST_SAY0("\n");
                TEST_UNLOCK();

                rd_sleep(1);
                TEST_LOCK();
        }

	TIMING_STOP(&t_all);

        test_curr = &tests[0];
        test_curr->duration = test_clock() - test_curr->start;

        TEST_UNLOCK();

        /* Wait for everything to be cleaned up since broker destroys are
	 * handled in its own thread. */
	test_wait_exit(10);

        r = test_summary(1/*lock*/) ? 1 : 0;

	/* If we havent failed at this point then
	 * there were no threads leaked */
        if (r == 0)
                TEST_SAY("\n============== ALL TESTS PASSED ==============\n");

	return r;
}
Пример #29
0
void fcs_gridsort_resort_create(fcs_gridsort_resort_t *gridsort_resort, fcs_gridsort_t *gs, MPI_Comm comm)
{
  int comm_size, comm_rank;

  fcs_int i;

  fcs_back_x_elements_t sin, sout;

  fcs_back_x_tproc_t tproc;

  fcs_resort_index_t *resort_indices;
  
#ifdef ALLTOALLV_PACKED
  fcs_int local_packed, global_packed, original_packed;
#endif

#ifdef DO_TIMING
  double t[4] = { 0, 0, 0, 0 };
#endif


  TIMING_SYNC(comm); TIMING_START(t[0]);

  MPI_Comm_size(comm, &comm_size);
  MPI_Comm_rank(comm, &comm_rank);

  fcs_resort_create(gridsort_resort);

  fcs_resort_set_original_particles(*gridsort_resort, gs->noriginal_particles);
  fcs_resort_set_sorted_particles(*gridsort_resort, gs->nresort_particles);

  if (gs->nresort_particles < 0)
  {
    fcs_resort_set_sorted_particles(*gridsort_resort, gs->noriginal_particles);
    return;
  }

  TIMING_SYNC(comm); TIMING_START(t[1]);

  resort_indices = fcs_resort_indices_alloc(gs->nresort_particles);

  fcs_resort_indices_init(gs->nresort_particles, resort_indices, comm_rank);

  TIMING_SYNC(comm); TIMING_STOP(t[1]);

/*  printf("nresort_particles = %" FCS_LMOD_INT "d\n", gs->nresort_particles);
  for (i = 0; i < gs->nresort_particles; ++i)
  {
    printf(" %" FCS_LMOD_INT "d: " GRIDSORT_INDEX_STR "  " FCS_RESORT_INDEX_STR "\n",
      i, GRIDSORT_INDEX_PARAM(gs->sorted_indices[i]), FCS_RESORT_INDEX_PARAM(resort_indices[i]));
  }*/
  
  fcs_back_x_SL_DEFCON(mpi.rank) = comm_rank;

  fcs_back_x_mpi_datatypes_init();

  fcs_back_x_elem_set_size(&sin, gs->nresort_particles);
  fcs_back_x_elem_set_max_size(&sin, gs->nresort_particles);
  fcs_back_x_elem_set_keys(&sin, gs->sorted_indices);
  fcs_back_x_elem_set_data(&sin, resort_indices);

  fcs_back_x_elem_set_size(&sout, 0);
  fcs_back_x_elem_set_max_size(&sout, 0);
  fcs_back_x_elem_set_keys(&sout, NULL);
  fcs_back_x_elem_set_data(&sout, NULL);

  fcs_back_x_tproc_create_tproc(&tproc, gridsort_fcs_back_x_tproc, fcs_back_x_TPROC_RESET_NULL, fcs_back_x_TPROC_EXDEF_NULL);

#ifdef GRIDSORT_RESORT_PROCLIST
  if (gs->procs) fcs_back_x_tproc_set_proclists(&tproc, gs->nprocs, gs->procs, gs->nprocs, gs->procs, comm_size, comm_rank, comm);
#endif

#ifdef ALLTOALLV_PACKED
  local_packed = ALLTOALLV_PACKED(comm_size, sin.size);
  MPI_Allreduce(&local_packed, &global_packed, 1, FCS_MPI_INT, MPI_SUM, comm);
  original_packed = fcs_back_x_SL_DEFCON(meas.packed); fcs_back_x_SL_DEFCON(meas.packed) = (global_packed > 0);
#endif

  TIMING_SYNC(comm); TIMING_START(t[2]);

  fcs_back_x_mpi_elements_alltoall_specific(&sin, &sout, NULL, tproc, NULL, comm_size, comm_rank, comm);

  TIMING_SYNC(comm); TIMING_STOP(t[2]);

#ifdef ALLTOALLV_PACKED
  fcs_back_x_SL_DEFCON(meas.packed) = original_packed;
#endif

  fcs_back_x_tproc_free(&tproc);

  fcs_resort_indices_free(resort_indices);

  if (gs->noriginal_particles != sout.size)
    fprintf(stderr, "%d: error: wanted %" FCS_LMOD_INT "d particles, but got only %" fcs_back_x_slint_fmt "!\n", comm_rank, gs->noriginal_particles, sout.size);

  fcs_back_x_mpi_datatypes_release();

/*  printf("noriginal_particles = %" fcs_back_x_slint_fmt "\n", sout.size);
  for (i = 0; i < sout.size; ++i)
  {
    printf(" %" FCS_LMOD_INT "d: " GRIDSORT_INDEX_STR "  " FCS_RESORT_INDEX_STR "\n",
      i, GRIDSORT_INDEX_PARAM(sout.keys[i]), FCS_RESORT_INDEX_PARAM(sout.data0[i]));
  }*/

  fcs_resort_alloc_indices(*gridsort_resort);
  
  resort_indices = fcs_resort_get_indices(*gridsort_resort);

  TIMING_SYNC(comm); TIMING_START(t[3]);

  for (i = 0; i < gs->noriginal_particles; ++i) resort_indices[GRIDSORT_INDEX_GET_POS(sout.keys[i])] = sout.data0[i];

  TIMING_SYNC(comm); TIMING_STOP(t[3]);

  fcs_back_x_elements_free(&sout);

#ifdef GRIDSORT_RESORT_PROCLIST
  if (gs->procs) fcs_resort_set_proclists(*gridsort_resort, gs->nprocs, gs->procs);
#endif

  TIMING_SYNC(comm); TIMING_STOP(t[0]);

  TIMING_CMD(
    if (comm_rank == 0)
      printf(TIMING_PRINT_PREFIX "fcs_gridsort_resort_create: %f  %f  %f  %f\n", t[0], t[1], t[2], t[3]);
  );
Пример #30
0
int main_0029_assign_offset (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_topic_partition_list_t *parts;
        uint64_t testid;
	int i;
	test_timing_t t_simple, t_hl;
	test_msgver_t mv;

	test_conf_init(NULL, NULL, 20 + (test_session_timeout_ms * 3 / 1000));

	/* Produce X messages to Y partitions so we get a 
	 * nice seekable 0..X offset one each partition. */
        /* Produce messages */
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	parts = rd_kafka_topic_partition_list_new(partitions);

	for (i = 0 ; i < partitions ; i++) {
		test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0);
		/* Set start offset */
		rd_kafka_topic_partition_list_add(parts, topic, i)->offset =
			msgcnt / 2;
	}

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);


	/* Simple consumer */
	TIMING_START(&t_simple, "SIMPLE.CONSUMER");
	rk = test_create_consumer(topic, NULL, NULL, NULL);
	test_msgver_init(&mv, testid);
	test_consumer_assign("SIMPLE.ASSIGN", rk, parts);
	test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0,
			   partitions * (msgcnt / 2), &mv);
	for (i = 0 ; i < partitions ; i++)
		test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
					topic, i, msgcnt/2, msgcnt/2);
	test_msgver_clear(&mv);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_simple);

	rd_kafka_topic_partition_list_destroy(parts);
	

	/* High-level consumer: method 1
	 * Offsets are set in rebalance callback. */
	if (test_broker_version >= TEST_BRKVER(0,9,0,0)) {
		reb_method = REB_METHOD_1;
		TIMING_START(&t_hl, "HL.CONSUMER");
		test_msgver_init(&mv, testid);
		rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
		test_consumer_subscribe(rk, topic);
		test_consumer_poll("HL.CONSUME", rk, testid, -1, 0,
				   partitions * (msgcnt / 2), &mv);
		for (i = 0 ; i < partitions ; i++)
			test_msgver_verify_part("HL.MSGS", &mv,
						TEST_MSGVER_ALL_PART,
						topic, i, msgcnt/2, msgcnt/2);
		test_msgver_clear(&mv);
		test_consumer_close(rk);
		rd_kafka_destroy(rk);
		TIMING_STOP(&t_hl);


		/* High-level consumer: method 2:
		 * first two partitions are with fixed absolute offset, rest are
		 * auto offset (stored, which is now at end). 
		 * Offsets are set in rebalance callback. */
		reb_method = REB_METHOD_2;
		TIMING_START(&t_hl, "HL.CONSUMER2");
		test_msgver_init(&mv, testid);
		rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
		test_consumer_subscribe(rk, topic);
		test_consumer_poll("HL.CONSUME2", rk, testid, partitions, 0,
				   2 * (msgcnt / 2), &mv);
		for (i = 0 ; i < partitions ; i++) {
			if (i < 2)
				test_msgver_verify_part("HL.MSGS2.A", &mv,
							TEST_MSGVER_ALL_PART,
							topic, i, msgcnt/2,
							msgcnt/2);
		}
		test_msgver_clear(&mv);
		test_consumer_close(rk);
		rd_kafka_destroy(rk);
		TIMING_STOP(&t_hl);
	}

        return 0;
}