int main_0028_long_topicnames (int argc, char **argv) {
        const int msgcnt = 1000;
        uint64_t testid;
	char topic[256];
	rd_kafka_t *rk_c;

	memset(topic, 'a', sizeof(topic)-1);
	topic[sizeof(topic)-1] = '\0';

	strncpy(topic, test_mk_topic_name(topic, 1), sizeof(topic)-1);

	TEST_SAY("Using topic name of %d bytes: %s\n",
		 (int)strlen(topic), topic);

	/* Create topic */
	test_create_topic(topic, 1, 1);

	/* First try a non-verifying consumer. The consumer has been known
	 * to crash when the broker bug kicks in. */
	rk_c = test_create_consumer(topic, NULL, NULL, NULL, NULL);
	test_consumer_subscribe(rk_c, topic);
	test_consumer_poll_no_msgs("consume.nomsgs", rk_c, 0, 5000);
	test_consumer_close(rk_c);

        /* Produce messages */
        testid = test_produce_msgs_easy(topic, 0,
                                        RD_KAFKA_PARTITION_UA, msgcnt);

	/* Consume messages */
	test_consume_msgs_easy(NULL, topic, testid, msgcnt);

        return 0;
}
Beispiel #2
0
static void do_test_non_exist_and_partchange (void) {
	char *topic_a = rd_strdup(test_mk_topic_name("topic_a", 1));
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_queue_t *queue;

	/**
	 * Test #1:
	 * - Subscribe to non-existing topic.
	 * - Verify empty assignment
	 * - Create topic
	 * - Verify new assignment containing topic
	 */
	TEST_SAY("#1 & #2 testing\n");
	test_conf_init(&conf, NULL, 60);

	/* Decrease metadata interval to speed up topic change discovery. */
	test_conf_set(conf, "metadata.max.age.ms", "5000");

	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
	rk = test_create_consumer(test_str_id_generate_tmp(),
				  NULL, conf, NULL, NULL);
	queue = rd_kafka_queue_get_consumer(rk);

	TEST_SAY("#1: Subscribing to %s\n", topic_a);
	test_consumer_subscribe(rk, topic_a);

	/* Should not see a rebalance since no topics are matched. */
	await_no_rebalance("#1: empty", rk, queue, 10000);

	TEST_SAY("#1: creating topic %s\n", topic_a);
	test_create_topic(topic_a, 2, 1);

	await_assignment("#1: proper", rk, queue, 1,
			 topic_a, 2);


	/**
	 * Test #2 (continue with #1 consumer)
	 * - Increase the partition count
	 * - Verify updated assignment
	 */
	test_kafka_topics("--alter --topic %s --partitions 4",
			  topic_a);
	await_revoke("#2", rk, queue);

	await_assignment("#2: more partitions", rk, queue, 1,
			 topic_a, 4);

	test_consumer_close(rk);
	rd_kafka_queue_destroy(queue);
	rd_kafka_destroy(rk);

	rd_free(topic_a);
}
Beispiel #3
0
/**
 * Trigger an empty cgrp commit (issue #803)
 */
static void do_empty_commit (void) {
	rd_kafka_t *rk;
	char group_id[64];
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_resp_err_t err, expect;

	test_conf_init(&conf, &tconf, 20);
	test_conf_set(conf, "enable.auto.commit", "false");
	test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
	test_str_id_generate(group_id, sizeof(group_id));

	TEST_SAY(_C_MAG "[ do_empty_commit group.id %s ]\n", group_id);

	rk = test_create_consumer(group_id, NULL, conf, tconf, NULL);

	test_consumer_subscribe(rk, topic);

	test_consumer_poll("consume", rk, testid, -1, -1, 100, NULL);

	TEST_SAY("First commit\n");
	expect = RD_KAFKA_RESP_ERR_NO_ERROR;
	err = rd_kafka_commit_queue(rk, NULL, NULL,
				    empty_offset_commit_cb, &expect);
	if (err != expect)
		TEST_FAIL("commit failed: %s", rd_kafka_err2str(err));
	else
		TEST_SAY("First commit returned %s\n",
			 rd_kafka_err2str(err));

	TEST_SAY("Second commit, should be empty\n");
	expect = RD_KAFKA_RESP_ERR__NO_OFFSET;
	err = rd_kafka_commit_queue(rk, NULL, NULL,
				    empty_offset_commit_cb, &expect);
	if (err != RD_KAFKA_RESP_ERR__NO_OFFSET)
		TEST_FAIL("unexpected commit result, wanted NO_OFFSET, got: %s",
			  rd_kafka_err2str(err));
	else
		TEST_SAY("Second commit returned %s\n",
			 rd_kafka_err2str(err));

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
Beispiel #4
0
static void do_test_consumer (const char *topic) {

        rd_kafka_conf_t *conf;
        int i;
        rd_kafka_t *rk;

        TEST_SAY(_C_MAG "[ %s ]\n" _C_CLR, __FUNCTION__);

        test_conf_init(&conf, NULL, 0);

        rd_kafka_conf_interceptor_add_on_new(conf, "on_new_consumer",
                                             on_new_consumer, NULL);

        test_conf_set(conf, "auto.offset.reset", "earliest");

        /* Create producer */
        rk = test_create_consumer(topic, NULL, conf, NULL);

        test_consumer_subscribe(rk, topic);

        /* Consume messages (-1 for the one that failed producing) */
        test_consumer_poll("interceptors.consume", rk, 0, -1, -1, msgcnt-1,
                           NULL);

        /* Verify on_consume */
        for (i = 0 ; i < msgcnt-1 ; i++) {
                struct msg_state *msg = &msgs[i];
                msg_verify_ic_cnt(msg, "on_consume", msg->bits[_ON_CONSUME],
                                  consumer_ic_cnt);
        }

        /* Verify that the produce-failed message didnt have
         * interceptors called */
        msg_verify_ic_cnt(&msgs[msgcnt-1], "on_consume",
                          msgs[msgcnt-1].bits[_ON_CONSUME], 0);

        test_consumer_close(rk);

        verify_ic_cnt("on_commit", on_commit_bits, consumer_ic_cnt);

        rd_kafka_destroy(rk);
}
Beispiel #5
0
int main_0093_holb_consumer (int argc, char **argv) {
        const char *topic = test_mk_topic_name("0093_holb_consumer", 1);
        int64_t testid;
        const int msgcnt = 100;
        struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT;
        rd_kafka_conf_t *conf;

        testid = test_id_generate();

        test_conf_init(&conf, NULL, 60);

        test_create_topic(topic, 1, 1);

        test_produce_msgs_easy(topic, testid, 0, msgcnt);

        test_conf_set(conf, "session.timeout.ms", "6000");
        test_conf_set(conf, "max.poll.interval.ms", "20000");
        test_conf_set(conf, "socket.timeout.ms", "3000");
        test_conf_set(conf, "auto.offset.reset", "earliest");
        /* Trigger other requests often */
        test_conf_set(conf, "topic.metadata.refresh.interval.ms", "500");
        rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);

        rd_kafka_conf_set_opaque(conf, &c[0]);
        c[0].rk = test_create_consumer(topic, NULL,
                                       rd_kafka_conf_dup(conf), NULL);

        rd_kafka_conf_set_opaque(conf, &c[1]);
        c[1].rk = test_create_consumer(topic, NULL, conf, NULL);

        test_consumer_subscribe(c[0].rk, topic);

        /* c0: assign */
        c[0].max_rebalance_cnt = 1;

        /* c1: none, hasn't joined yet */
        c[1].max_rebalance_cnt = 0;

        TEST_SAY("Waiting for c[0] assignment\n");
        while (1) {
                rd_kafka_topic_partition_list_t *parts = NULL;

                do_consume(&c[0], 1/*1s*/);

                if (rd_kafka_assignment(c[0].rk, &parts) !=
                    RD_KAFKA_RESP_ERR_NO_ERROR ||
                    !parts || parts->cnt == 0) {
                        if (parts)
                                rd_kafka_topic_partition_list_destroy(parts);
                        continue;
                }

                TEST_SAY("%s got assignment of %d partition(s)\n",
                         rd_kafka_name(c[0].rk), parts->cnt);
                rd_kafka_topic_partition_list_destroy(parts);
                break;
        }

        TEST_SAY("c[0] got assignment, consuming..\n");
        do_consume(&c[0], 5/*5s*/);

        TEST_SAY("Joining second consumer\n");
        test_consumer_subscribe(c[1].rk, topic);

        /* Just poll second consumer for 10s, the rebalance will not
         * finish until the first consumer polls */
        do_consume(&c[1], 10/*10s*/);

        /* c0: the next call to do_consume/poll will trigger
         *     its rebalance callback, first revoke then assign. */
        c[0].max_rebalance_cnt += 2;
        /* c1: first rebalance */
        c[1].max_rebalance_cnt++;

        TEST_SAY("Expected rebalances: c[0]: %d/%d, c[1]: %d/%d\n",
                 c[0].rebalance_cnt, c[0].max_rebalance_cnt,
                 c[1].rebalance_cnt, c[1].max_rebalance_cnt);

        /* Let rebalances kick in, then consume messages. */
        while (c[0].cnt + c[1].cnt < msgcnt) {
                do_consume(&c[0], 0);
                do_consume(&c[1], 0);
        }

        /* Allow the extra revoke rebalance on close() */
        c[0].max_rebalance_cnt++;
        c[1].max_rebalance_cnt++;

        test_consumer_close(c[0].rk);
        test_consumer_close(c[1].rk);

        rd_kafka_destroy(c[0].rk);
        rd_kafka_destroy(c[1].rk);

        return 0;
}
int main_0089_max_poll_interval (int argc, char **argv) {
        const char *topic = test_mk_topic_name("0089_max_poll_interval", 1);
        uint64_t testid;
        const int msgcnt = 10;
        rd_kafka_t *c[2];
        rd_kafka_conf_t *conf;
        int64_t ts_next[2] = { 0, 0 };
        int64_t ts_exp_msg[2] = { 0, 0 };
        int cmsgcnt = 0;
        int i;
        int bad = -1;

        testid = test_id_generate();

        test_create_topic(topic, 1, 1);

        test_produce_msgs_easy(topic, testid, -1, msgcnt);

        test_conf_init(&conf, NULL, 60);

        test_conf_set(conf, "session.timeout.ms", "6000");
        test_conf_set(conf, "max.poll.interval.ms", "10000" /*10s*/);
        test_conf_set(conf, "auto.offset.reset", "earliest");

        c[0] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
        c[1] = test_create_consumer(topic, NULL, conf, NULL);

        test_consumer_subscribe(c[0], topic);
        test_consumer_subscribe(c[1], topic);

        while (1) {
                for (i = 0 ; i < 2 ; i++) {
                        int64_t now;
                        rd_kafka_message_t *rkm;

                        /* Consumer is "processing" */
                        if (ts_next[i] > test_clock())
                                continue;

                        rkm = rd_kafka_consumer_poll(c[i], 100);
                        if (!rkm)
                                continue;

                        if (rkm->err) {
                                TEST_WARN("Consumer %d error: %s: "
                                          "ignoring\n", i,
                                          rd_kafka_message_errstr(rkm));
                                continue;
                        }

                        now = test_clock();

                        cmsgcnt++;

                        TEST_SAY("Consumer %d received message (#%d) "
                                 "at offset %"PRId64"\n",
                                 i, cmsgcnt, rkm->offset);

                        if (ts_exp_msg[i]) {
                                /* This consumer is expecting a message
                                 * after a certain time, namely after the
                                 * rebalance following max.poll.. being
                                 * exceeded in the other consumer */
                                TEST_ASSERT(now > ts_exp_msg[i],
                                            "Consumer %d: did not expect "
                                            "message for at least %dms",
                                            i,
                                            (int)((ts_exp_msg[i] - now)/1000));
                                TEST_ASSERT(now < ts_exp_msg[i] + 10000*1000,
                                            "Consumer %d: expected message "
                                            "within 10s, not after %dms",
                                            i,
                                            (int)((now - ts_exp_msg[i])/1000));
                                TEST_SAY("Consumer %d: received message "
                                         "at offset %"PRId64
                                         " after rebalance\n",
                                         i, rkm->offset);

                                rd_kafka_message_destroy(rkm);
                                goto done;

                        } else if (cmsgcnt == 1) {
                                /* Process this message for 20s */
                                ts_next[i] = now + (20000 * 1000);

                                /* Exp message on other consumer after
                                 * max.poll.interval.ms */
                                ts_exp_msg[i^1] = now + (10000 * 1000);

                                /* This is the bad consumer */
                                bad = i;

                                TEST_SAY("Consumer %d processing message at "
                                         "offset %"PRId64"\n",
                                         i, rkm->offset);
                                rd_kafka_message_destroy(rkm);
                        } else {
                                rd_kafka_message_destroy(rkm);

                                TEST_FAIL("Consumer %d did not expect "
                                          "a message", i);
                        }
                }
        }

 done:

        TEST_ASSERT(bad != -1, "Bad consumer not set");

        /* Wait for error ERR__MAX_POLL_EXCEEDED on the bad consumer. */
        while (1) {
                rd_kafka_message_t *rkm;

                rkm = rd_kafka_consumer_poll(c[bad], 1000);
                TEST_ASSERT(rkm, "Expected consumer result within 1s");

                TEST_ASSERT(rkm->err, "Did not expect message on bad consumer");

                TEST_SAY("Consumer error: %s: %s\n",
                         rd_kafka_err2name(rkm->err),
                         rd_kafka_message_errstr(rkm));

                if (rkm->err == RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) {
                        rd_kafka_message_destroy(rkm);
                        break;
                }

                rd_kafka_message_destroy(rkm);
        }


        for (i = 0 ; i < 2 ; i++)
                rd_kafka_destroy_flags(c[i],
                                       RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE);
        return 0;
}
Beispiel #7
0
static void do_test_regex (void) {
	char *base_topic = rd_strdup(test_mk_topic_name("topic", 1));
	char *topic_b = rd_strdup(tsprintf("%s_b", base_topic));
	char *topic_c = rd_strdup(tsprintf("%s_c", base_topic));
	char *topic_d = rd_strdup(tsprintf("%s_d", base_topic));
	char *topic_e = rd_strdup(tsprintf("%s_e", base_topic));
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_queue_t *queue;

	/**
	 * Regex test:
	 * - Create topic b
	 * - Subscribe to b & d & e
	 * - Verify b assignment
	 * - Create topic c
	 * - Verify no rebalance
	 * - Create topic d
	 * - Verify b & d assignment
	 */
	TEST_SAY("Regex testing\n");
	test_conf_init(&conf, NULL, 60);

	/* Decrease metadata interval to speed up topic change discovery. */
	test_conf_set(conf, "metadata.max.age.ms", "5000");

	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
	rk = test_create_consumer(test_str_id_generate_tmp(),
				  NULL, conf, NULL, NULL);
	queue = rd_kafka_queue_get_consumer(rk);

	TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_b);
	test_create_topic(topic_b, 2, 1);
	rd_sleep(1); // FIXME: do check&wait loop instead

	TEST_SAY("Regex: Subscribing to %s & %s & %s\n",
		 topic_b, topic_d, topic_e);
	test_consumer_subscribe(rk, tsprintf("^%s_[bde]$", base_topic));

	await_assignment("Regex: just one topic exists", rk, queue, 1,
			 topic_b, 2);

	TEST_SAY("Regex: creating topic %s (not subscribed)\n", topic_c);
	test_create_topic(topic_c, 4, 1);

	/* Should not see a rebalance since no topics are matched. */
	await_no_rebalance("Regex: empty", rk, queue, 10000);

	TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_d);
	test_create_topic(topic_d, 1, 1);

	await_revoke("Regex: rebalance after topic creation", rk, queue);

	await_assignment("Regex: two topics exist", rk, queue, 2,
			 topic_b, 2,
			 topic_d, 1);

	test_consumer_close(rk);
	rd_kafka_queue_destroy(queue);
	rd_kafka_destroy(rk);

	rd_free(base_topic);
	rd_free(topic_b);
	rd_free(topic_c);
	rd_free(topic_d);
	rd_free(topic_e);
}
int main_0029_assign_offset (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_topic_partition_list_t *parts;
        uint64_t testid;
	int i;
	test_timing_t t_simple, t_hl;
	test_msgver_t mv;

	test_conf_init(NULL, &tconf, 20 + (test_session_timeout_ms * 3 / 1000));
	test_topic_conf_set(tconf, "auto.offset.reset", "smallest");

	/* Produce X messages to Y partitions so we get a 
	 * nice seekable 0..X offset one each partition. */
        /* Produce messages */
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	parts = rd_kafka_topic_partition_list_new(partitions);

	for (i = 0 ; i < partitions ; i++) {
		test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0);
		rd_kafka_topic_partition_list_add(parts, topic, i)->offset =
			msgcnt / 2;
	}

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);


	/* Simple consumer */
	TIMING_START(&t_simple, "SIMPLE.CONSUMER");
	rk = test_create_consumer(topic, NULL, NULL, NULL);
	test_msgver_init(&mv, testid);
	test_consumer_assign("SIMPLE.ASSIGN", rk, parts);
	test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0,
			   partitions * (msgcnt / 2), &mv);
	for (i = 0 ; i < partitions ; i++)
		test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
					topic, i, msgcnt/2, msgcnt/2);
	test_msgver_clear(&mv);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_simple);

	rd_kafka_topic_partition_list_destroy(parts);
	

	/* High-level consumer.
	 * Offsets are set in rebalance callback. */
	TIMING_START(&t_hl, "HL.CONSUMER");
	test_msgver_init(&mv, testid);
	rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
	test_consumer_subscribe(rk, topic);
	test_consumer_poll("HL.CONSUME", rk, testid, -1, 0,
			   partitions * (msgcnt / 2), &mv);
	for (i = 0 ; i < partitions ; i++)
		test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
					topic, i, msgcnt/2, msgcnt/2);
	test_msgver_clear(&mv);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_hl);

	rd_kafka_topic_conf_destroy(tconf);

        return 0;
}
int main_0040_io_event (int argc, char **argv) {
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_t *rk_p, *rk_c;
	const char *topic;
	rd_kafka_topic_t *rkt_p;
	rd_kafka_queue_t *queue;
	uint64_t testid;
	int msgcnt = 100;
	int recvd = 0;
	int fds[2];
	int wait_multiplier = 1;
	struct pollfd pfd;
        int r;
	enum {
		_NOPE,
		_YEP,
		_REBALANCE
	} expecting_io = _REBALANCE;

	testid = test_id_generate();
	topic = test_mk_topic_name(__FUNCTION__, 1);

	rk_p = test_create_producer();
	rkt_p = test_create_producer_topic(rk_p, topic, NULL);
	test_auto_create_topic_rkt(rk_p, rkt_p);

	test_conf_init(&conf, &tconf, 0);
	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
	test_conf_set(conf, "session.timeout.ms", "6000");
	test_conf_set(conf, "enable.partition.eof", "false");
	/* Speed up propagation of new topics */
	test_conf_set(conf, "metadata.max.age.ms", "5000");
	test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
	rk_c = test_create_consumer(topic, NULL, conf, tconf);

	queue = rd_kafka_queue_get_consumer(rk_c);

	test_consumer_subscribe(rk_c, topic);

#ifndef _MSC_VER
        r = pipe(fds);
#else
        r = _pipe(fds, 2, _O_BINARY);
#endif
        if (r == -1)
		TEST_FAIL("pipe() failed: %s\n", strerror(errno));
	
	rd_kafka_queue_io_event_enable(queue, fds[1], "1", 1);

	pfd.fd = fds[0];
	pfd.events = POLLIN;
	pfd.revents = 0;

	/**
	 * 1) Wait for rebalance event
	 * 2) Wait 1 interval (1s) expecting no IO (nothing produced).
	 * 3) Produce half the messages
	 * 4) Expect IO
	 * 5) Consume the available messages
	 * 6) Wait 1 interval expecting no IO.
	 * 7) Produce remaing half
	 * 8) Expect IO
	 * 9) Done.
	 */
	while (recvd < msgcnt) {
		int r;

#ifndef _MSC_VER
		r = poll(&pfd, 1, 1000 * wait_multiplier);
#else
                r = WSAPoll(&pfd, 1, 1000 * wait_multiplier);
#endif
		if (r == -1) {
			TEST_FAIL("poll() failed: %s", strerror(errno));
			
		} else if (r == 1) {
			rd_kafka_event_t *rkev;
			char b;
			int eventcnt = 0;

			if (pfd.events & POLLERR)
				TEST_FAIL("Poll error\n");
			if (!(pfd.events & POLLIN)) {
				TEST_SAY("Stray event 0x%x\n", (int)pfd.events);
				continue;
			}

			TEST_SAY("POLLIN\n");
                        /* Read signaling token to purge socket queue and
                         * eventually silence POLLIN */
#ifndef _MSC_VER
			r = read(pfd.fd, &b, 1);
#else
			r = _read((int)pfd.fd, &b, 1);
#endif
			if (r == -1)
				TEST_FAIL("read failed: %s\n", strerror(errno));

			if (!expecting_io)
				TEST_WARN("Got unexpected IO after %d/%d msgs\n",
					  recvd, msgcnt);

			while ((rkev = rd_kafka_queue_poll(queue, 0))) {
				eventcnt++;
				switch (rd_kafka_event_type(rkev))
				{
				case RD_KAFKA_EVENT_REBALANCE:
					TEST_SAY("Got %s: %s\n", rd_kafka_event_name(rkev),
						 rd_kafka_err2str(rd_kafka_event_error(rkev)));
					if (expecting_io != _REBALANCE)
						TEST_FAIL("Got Rebalance when expecting message\n");
					if (rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
						rd_kafka_assign(rk_c, rd_kafka_event_topic_partition_list(rkev));
						expecting_io = _NOPE;
					} else
						rd_kafka_assign(rk_c, NULL);
					break;
					
				case RD_KAFKA_EVENT_FETCH:
					if (expecting_io != _YEP)
						TEST_FAIL("Did not expect more messages at %d/%d\n",
							  recvd, msgcnt);
					recvd++;
					if (recvd == (msgcnt / 2) || recvd == msgcnt)
						expecting_io = _NOPE;
					break;

				case RD_KAFKA_EVENT_ERROR:
					TEST_FAIL("Error: %s\n", rd_kafka_event_error_string(rkev));
					break;

				default:
					TEST_SAY("Ignoring event %s\n", rd_kafka_event_name(rkev));
				}
					
				rd_kafka_event_destroy(rkev);
			}
			TEST_SAY("%d events, Consumed %d/%d messages\n", eventcnt, recvd, msgcnt);

			wait_multiplier = 1;

		} else {
			if (expecting_io == _REBALANCE) {
				continue;
			} else if (expecting_io == _YEP) {
				TEST_FAIL("Did not see expected IO after %d/%d msgs\n",
					  recvd, msgcnt);
			}

			TEST_SAY("IO poll timeout (good)\n");

			TEST_SAY("Got idle period, producing\n");
			test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, msgcnt/2,
					  NULL, 10);

			expecting_io = _YEP;
			/* When running slowly (e.g., valgrind) it might take
			 * some time before the first message is received
			 * after producing. */
			wait_multiplier = 3;
		}
	}
	TEST_SAY("Done\n");

	rd_kafka_topic_destroy(rkt_p);
	rd_kafka_destroy(rk_p);

	rd_kafka_queue_destroy(queue);
	rd_kafka_consumer_close(rk_c);
	rd_kafka_destroy(rk_c);

#ifndef _MSC_VER
	close(fds[0]);
	close(fds[1]);
#else
        _close(fds[0]);
        _close(fds[1]);
#endif

	return 0;
}
Beispiel #10
0
static void do_offset_test (const char *what, int auto_commit, int auto_store,
			    int async) {
	test_timing_t t_all;
	char groupid[64];
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	int cnt = 0;
	const int extra_cnt = 5;
	rd_kafka_resp_err_t err;
	rd_kafka_topic_partition_list_t *parts;
	rd_kafka_topic_partition_t *rktpar;
	int64_t next_offset = -1;

	test_conf_init(&conf, &tconf, 20);
	test_conf_set(conf, "enable.auto.commit", auto_commit ? "true":"false");
	test_conf_set(conf, "enable.auto.offset.store", auto_store ?"true":"false");
	test_conf_set(conf, "auto.commit.interval.ms", "500");
	rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
	test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
	test_str_id_generate(groupid, sizeof(groupid));
	test_conf_set(conf, "group.id", groupid);
	rd_kafka_conf_set_default_topic_conf(conf, tconf);

	TEST_SAY(_C_MAG "[ do_offset_test: %s with group.id %s ]\n",
		 what, groupid);

	TIMING_START(&t_all, what);

	expected_offset  = 0;
	committed_offset = -1;

	/* MO:
	 *  - Create consumer.
	 *  - Start consuming from beginning
	 *  - Perform store & commits according to settings
	 *  - Stop storing&committing when half of the messages are consumed,
	 *  - but consume 5 more to check against.
	 *  - Query position.
	 *  - Destroy consumer.
	 *  - Create new consumer with same group.id using stored offsets
	 *  - Should consume the expected message.
	 */

	/* Create kafka instance */
	rk = test_create_handle(RD_KAFKA_CONSUMER, rd_kafka_conf_dup(conf));

	rd_kafka_poll_set_consumer(rk);

	test_consumer_subscribe(rk, topic);

	while (cnt - extra_cnt < msgcnt / 2) {
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, 10*1000);
		if (!rkm)
			continue;

		if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
			TEST_FAIL("%s: Timed out waiting for message %d", what,cnt);
		else if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			rd_kafka_message_destroy(rkm);
			continue;
		} else if (rkm->err)
			TEST_FAIL("%s: Consumer error: %s",
				  what, rd_kafka_message_errstr(rkm));

		/* Offset of next message. */
		next_offset = rkm->offset + 1;

		if (cnt < msgcnt / 2) {
			if (!auto_store) {
				err = rd_kafka_offset_store(rkm->rkt,rkm->partition,
							    rkm->offset);
				if (err)
					TEST_FAIL("%s: offset_store failed: %s\n",
						  what, rd_kafka_err2str(err));
			}
			expected_offset = rkm->offset+1;
			if (!auto_commit) {
				test_timing_t t_commit;
				TIMING_START(&t_commit,
					     async?"commit.async":"commit.sync");
				err = rd_kafka_commit_message(rk, rkm, async);
				TIMING_STOP(&t_commit);
				if (err)
					TEST_FAIL("%s: commit failed: %s\n",
						  what, rd_kafka_err2str(err));
			}

		} else if (auto_store && auto_commit)
			expected_offset = rkm->offset+1;

		rd_kafka_message_destroy(rkm);
		cnt++;
	}

	TEST_SAY("%s: done consuming after %d messages, at offset %"PRId64"\n",
		 what, cnt, expected_offset);

	if ((err = rd_kafka_assignment(rk, &parts)))
		TEST_FAIL("%s: failed to get assignment(): %s\n",
			  what, rd_kafka_err2str(err));

	/* Verify position */
	if ((err = rd_kafka_position(rk, parts)))
		TEST_FAIL("%s: failed to get position(): %s\n",
			  what, rd_kafka_err2str(err));
	if (!(rktpar = rd_kafka_topic_partition_list_find(parts,
							  topic, partition)))
		TEST_FAIL("%s: position(): topic lost\n", what);
	if (rktpar->offset != next_offset)
		TEST_FAIL("%s: Expected position() offset %"PRId64", got %"PRId64,
			  what, next_offset, rktpar->offset);
	TEST_SAY("%s: Position is at %"PRId64", good!\n",
		 what, rktpar->offset);

	/* Pause messages while waiting so we can serve callbacks
	 * without having more messages received. */
	if ((err = rd_kafka_pause_partitions(rk, parts)))
		TEST_FAIL("%s: failed to pause partitions: %s\n",
			  what, rd_kafka_err2str(err));
	rd_kafka_topic_partition_list_destroy(parts);

	/* Fire off any enqueued offset_commit_cb */
	test_consumer_poll_no_msgs(what, rk, testid, 0);

	TEST_SAY("%s: committed_offset %"PRId64", expected_offset %"PRId64"\n",
		 what, committed_offset, expected_offset);

	if (!auto_commit && !async) {
		/* Sync commits should be up to date at this point. */
		if (committed_offset != expected_offset)
			TEST_FAIL("%s: Sync commit: committed offset %"PRId64
				  " should be same as expected offset "
				  "%"PRId64,
				  what, committed_offset, expected_offset);
	} else {

		/* Wait for offset commits to catch up */
		while (committed_offset < expected_offset) {
			TEST_SAYL(3, "%s: Wait for committed offset %"PRId64
				  " to reach expected offset %"PRId64"\n",
				  what, committed_offset, expected_offset);
			test_consumer_poll_no_msgs(what, rk, testid, 1000);
		}

	}

	TEST_SAY("%s: phase 1 complete, %d messages consumed, "
		 "next expected offset is %"PRId64"\n",
		 what, cnt, expected_offset);

        /* Issue #827: cause committed() to return prematurely by specifying
         *             low timeout. The bug (use after free) will only
         *             be catched by valgrind. */
        do {
                parts = rd_kafka_topic_partition_list_new(1);
                rd_kafka_topic_partition_list_add(parts, topic, partition);
                err = rd_kafka_committed(rk, parts, 1);
                rd_kafka_topic_partition_list_destroy(parts);
                TEST_SAY("Issue #827: committed() returned %s\n",
                         rd_kafka_err2str(err));
        } while (err != RD_KAFKA_RESP_ERR__TIMED_OUT);

	/* Query position */
	parts = rd_kafka_topic_partition_list_new(1);
	rd_kafka_topic_partition_list_add(parts, topic, partition);

	err = rd_kafka_committed(rk, parts, tmout_multip(5*1000));
	if (err)
		TEST_FAIL("%s: committed() failed: %s", what, rd_kafka_err2str(err));
	if (!(rktpar = rd_kafka_topic_partition_list_find(parts,
							  topic, partition)))
		TEST_FAIL("%s: committed(): topic lost\n", what);
	if (rktpar->offset != expected_offset)
		TEST_FAIL("%s: Expected committed() offset %"PRId64", got %"PRId64,
			  what, expected_offset, rktpar->offset);
	TEST_SAY("%s: Committed offset is at %"PRId64", good!\n",
		 what, rktpar->offset);

	rd_kafka_topic_partition_list_destroy(parts);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);



	/* Fire up a new consumer and continue from where we left off. */
	TEST_SAY("%s: phase 2: starting new consumer to resume consumption\n",what);
	rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
	rd_kafka_poll_set_consumer(rk);

	test_consumer_subscribe(rk, topic);

	while (cnt < msgcnt) {
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, 10*1000);
		if (!rkm)
			continue;

		if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
			TEST_FAIL("%s: Timed out waiting for message %d", what,cnt);
		else if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			rd_kafka_message_destroy(rkm);
			continue;
		} else if (rkm->err)
			TEST_FAIL("%s: Consumer error: %s",
				  what, rd_kafka_message_errstr(rkm));

		if (rkm->offset != expected_offset)
			TEST_FAIL("%s: Received message offset %"PRId64
				  ", expected %"PRId64" at msgcnt %d/%d\n",
				  what, rkm->offset, expected_offset,
				  cnt, msgcnt);

		rd_kafka_message_destroy(rkm);
		expected_offset++;
		cnt++;
	}


	TEST_SAY("%s: phase 2: complete\n", what);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	

	TIMING_STOP(&t_all);
}
int main_0049_consume_conn_close (int argc, char **argv) {
        rd_kafka_t *rk;
        const char *topic = test_mk_topic_name("0049_consume_conn_close", 1);
        uint64_t testid;
        int msgcnt = test_on_ci ? 1000 : 10000;
        test_msgver_t mv;
        rd_kafka_conf_t *conf;
        rd_kafka_topic_conf_t *tconf;
        rd_kafka_topic_partition_list_t *assignment;
        rd_kafka_resp_err_t err;

        if (!test_conf_match(NULL, "sasl.mechanisms", "GSSAPI")) {
                TEST_SKIP("KNOWN ISSUE: ApiVersionRequest+SaslHandshake "
                          "will not play well with sudden disconnects\n");
                return 0;
        }

        test_conf_init(&conf, &tconf, 60);
        /* Want an even number so it is divisable by two without surprises */
        msgcnt = (msgcnt / (int)test_timeout_multiplier) & ~1;

        testid = test_id_generate();
        test_produce_msgs_easy(topic, testid, RD_KAFKA_PARTITION_UA, msgcnt);


        test_socket_enable(conf);
        test_curr->connect_cb = connect_cb;
        test_curr->is_fatal_cb = is_fatal_cb;

        test_topic_conf_set(tconf, "auto.offset.reset", "smallest");

        rk = test_create_consumer(topic, NULL, conf, tconf);

        test_consumer_subscribe(rk, topic);

        test_msgver_init(&mv, testid);

        test_consumer_poll("consume.up", rk, testid, -1, 0, msgcnt/2, &mv);

        err = rd_kafka_assignment(rk, &assignment);
        TEST_ASSERT(!err, "assignment() failed: %s", rd_kafka_err2str(err));
        TEST_ASSERT(assignment->cnt > 0, "empty assignment");

        TEST_SAY("Bringing down the network\n");

        TEST_LOCK();
        simulate_network_down = 1;
        TEST_UNLOCK();
        test_socket_close_all(test_curr, 1/*reinit*/);

        TEST_SAY("Waiting for session timeout to expire (6s), and then some\n");

        /* Commit an offset, which should fail, to trigger the offset commit
         * callback fallback (CONSUMER_ERR) */
        assignment->elems[0].offset = 123456789;
        TEST_SAY("Committing offsets while down, should fail eventually\n");
        err = rd_kafka_commit(rk, assignment, 1/*async*/);
        TEST_ASSERT(!err, "async commit failed: %s", rd_kafka_err2str(err));
        rd_kafka_topic_partition_list_destroy(assignment);

        rd_sleep(10);

        TEST_SAY("Bringing network back up\n");
        TEST_LOCK();
        simulate_network_down = 0;
        TEST_UNLOCK();

        TEST_SAY("Continuing to consume..\n");
        test_consumer_poll("consume.up2", rk, testid, -1, msgcnt/2, msgcnt/2,
                           &mv);

        test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER|TEST_MSGVER_DUP,
                           0, msgcnt);

        test_msgver_clear(&mv);

        test_consumer_close(rk);
        rd_kafka_destroy(rk);

        return 0;
}
Beispiel #12
0
int main_0029_assign_offset (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_topic_partition_list_t *parts;
        uint64_t testid;
	int i;
	test_timing_t t_simple, t_hl;
	test_msgver_t mv;

	test_conf_init(NULL, NULL, 20 + (test_session_timeout_ms * 3 / 1000));

	/* Produce X messages to Y partitions so we get a 
	 * nice seekable 0..X offset one each partition. */
        /* Produce messages */
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	parts = rd_kafka_topic_partition_list_new(partitions);

	for (i = 0 ; i < partitions ; i++) {
		test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0);
		/* Set start offset */
		rd_kafka_topic_partition_list_add(parts, topic, i)->offset =
			msgcnt / 2;
	}

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);


	/* Simple consumer */
	TIMING_START(&t_simple, "SIMPLE.CONSUMER");
	rk = test_create_consumer(topic, NULL, NULL, NULL);
	test_msgver_init(&mv, testid);
	test_consumer_assign("SIMPLE.ASSIGN", rk, parts);
	test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0,
			   partitions * (msgcnt / 2), &mv);
	for (i = 0 ; i < partitions ; i++)
		test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
					topic, i, msgcnt/2, msgcnt/2);
	test_msgver_clear(&mv);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_simple);

	rd_kafka_topic_partition_list_destroy(parts);
	

	/* High-level consumer: method 1
	 * Offsets are set in rebalance callback. */
	if (test_broker_version >= TEST_BRKVER(0,9,0,0)) {
		reb_method = REB_METHOD_1;
		TIMING_START(&t_hl, "HL.CONSUMER");
		test_msgver_init(&mv, testid);
		rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
		test_consumer_subscribe(rk, topic);
		test_consumer_poll("HL.CONSUME", rk, testid, -1, 0,
				   partitions * (msgcnt / 2), &mv);
		for (i = 0 ; i < partitions ; i++)
			test_msgver_verify_part("HL.MSGS", &mv,
						TEST_MSGVER_ALL_PART,
						topic, i, msgcnt/2, msgcnt/2);
		test_msgver_clear(&mv);
		test_consumer_close(rk);
		rd_kafka_destroy(rk);
		TIMING_STOP(&t_hl);


		/* High-level consumer: method 2:
		 * first two partitions are with fixed absolute offset, rest are
		 * auto offset (stored, which is now at end). 
		 * Offsets are set in rebalance callback. */
		reb_method = REB_METHOD_2;
		TIMING_START(&t_hl, "HL.CONSUMER2");
		test_msgver_init(&mv, testid);
		rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
		test_consumer_subscribe(rk, topic);
		test_consumer_poll("HL.CONSUME2", rk, testid, partitions, 0,
				   2 * (msgcnt / 2), &mv);
		for (i = 0 ; i < partitions ; i++) {
			if (i < 2)
				test_msgver_verify_part("HL.MSGS2.A", &mv,
							TEST_MSGVER_ALL_PART,
							topic, i, msgcnt/2,
							msgcnt/2);
		}
		test_msgver_clear(&mv);
		test_consumer_close(rk);
		rd_kafka_destroy(rk);
		TIMING_STOP(&t_hl);
	}

        return 0;
}
int main_0056_balanced_group_mt (int argc, char **argv) {
        const char *topic = test_mk_topic_name(__FUNCTION__, 1);
        rd_kafka_t *rk_p, *rk_c;
        rd_kafka_topic_t *rkt_p;
        int msg_cnt = 1000;
        int msg_base = 0;
        int partition_cnt = 2;
        int partition;
        uint64_t testid;
        rd_kafka_topic_conf_t *default_topic_conf;
        rd_kafka_topic_partition_list_t *sub, *topics;
        rd_kafka_resp_err_t err;
        test_timing_t t_assign, t_close, t_consume;
        int i;

        exp_msg_cnt = msg_cnt * partition_cnt;

        testid = test_id_generate();

        /* Produce messages */
        rk_p = test_create_producer();
        rkt_p = test_create_producer_topic(rk_p, topic, NULL);

        for (partition = 0; partition < partition_cnt; partition++) {
                test_produce_msgs(rk_p, rkt_p, testid, partition,
                                  msg_base + (partition * msg_cnt), msg_cnt,
                                  NULL, 0);
        }

        rd_kafka_topic_destroy(rkt_p);
        rd_kafka_destroy(rk_p);

        if (mtx_init(&lock, mtx_plain) != thrd_success)
                TEST_FAIL("Cannot create mutex.");

        test_conf_init(NULL, &default_topic_conf,
                       (test_session_timeout_ms * 3) / 1000);

        test_topic_conf_set(default_topic_conf, "auto.offset.reset",
                            "smallest");

        /* Fill in topic subscription set */
        topics = rd_kafka_topic_partition_list_new(1);
        rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA);

        /* Create consumers and start subscription */
        rk_c = test_create_consumer(
                topic /*group_id*/, rebalance_cb, NULL,
                default_topic_conf);

        test_consumer_subscribe(rk_c, topic);

        rd_kafka_topic_partition_list_destroy(topics);

        /* Wait for both consumers to get an assignment */
        TIMING_START(&t_assign, "WAIT.ASSIGN");
        get_assignment(rk_c);
        TIMING_STOP(&t_assign);

        TIMING_START(&t_consume, "CONSUME.WAIT");
        for (i = 0; i < MAX_THRD_CNT; ++i) {
                if (tids[i] != 0)
                        thrd_join(tids[i], NULL);
        }
        TIMING_STOP(&t_consume);

        TEST_SAY("Closing remaining consumers\n");
        /* Query subscription */
        err = rd_kafka_subscription(rk_c, &sub);
        TEST_ASSERT(!err, "%s: subscription () failed: %s", rd_kafka_name(rk_c),
                    rd_kafka_err2str(err));
        TEST_SAY("%s: subscription (%d):\n", rd_kafka_name(rk_c), sub->cnt);
        for (i = 0; i < sub->cnt; ++i)
                TEST_SAY(" %s\n", sub->elems[i].topic);
        rd_kafka_topic_partition_list_destroy(sub);

        /* Run an explicit unsubscribe () (async) prior to close ()
         * to trigger race condition issues on termination. */
        TEST_SAY("Unsubscribing instance %s\n", rd_kafka_name(rk_c));
        err = rd_kafka_unsubscribe(rk_c);
        TEST_ASSERT(!err, "%s: unsubscribe failed: %s", rd_kafka_name(rk_c),
                    rd_kafka_err2str(err));

        TEST_SAY("Closing %s\n", rd_kafka_name(rk_c));
        TIMING_START(&t_close, "CONSUMER.CLOSE");
        err = rd_kafka_consumer_close(rk_c);
        TIMING_STOP(&t_close);
        TEST_ASSERT(!err, "consumer_close failed: %s", rd_kafka_err2str(err));

        rd_kafka_destroy(rk_c);
        rk_c = NULL;

        TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, exp_msg_cnt);
        TEST_ASSERT(consumed_msg_cnt >= exp_msg_cnt,
                    "Only %d/%d messages were consumed", consumed_msg_cnt,
                    exp_msg_cnt);

        if (consumed_msg_cnt > exp_msg_cnt)
                TEST_SAY("At least %d/%d messages were consumed "
                         "multiple times\n",
                         consumed_msg_cnt - exp_msg_cnt, exp_msg_cnt);

        mtx_destroy(&lock);

        return 0;
}