Example #1
0
static void do_nonexist_commit (void) {
	rd_kafka_t *rk;
	char group_id[64];
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_topic_partition_list_t *offsets;
	const char *unk_topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_resp_err_t err;

	test_conf_init(&conf, &tconf, 20);
	test_str_id_generate(group_id, sizeof(group_id));

	TEST_SAY(_C_MAG "[ do_nonexist_commit group.id %s ]\n", group_id);

	rk = test_create_consumer(group_id, NULL, conf, tconf, NULL);

	TEST_SAY("Try nonexist commit\n");
	offsets = rd_kafka_topic_partition_list_new(2);
	rd_kafka_topic_partition_list_add(offsets, unk_topic, 0)->offset = 123;
	rd_kafka_topic_partition_list_add(offsets, unk_topic, 1)->offset = 456;

	err = rd_kafka_commit_queue(rk, offsets, NULL,
				    nonexist_offset_commit_cb, NULL);
	TEST_SAY("nonexist commit returned %s\n", rd_kafka_err2str(err));
	if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
		TEST_FAIL("commit() should succeed, not: %s",
			  rd_kafka_err2str(err));

	rd_kafka_topic_partition_list_destroy(offsets);

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
Example #2
0
/**
 * Trigger an empty cgrp commit (issue #803)
 */
static void do_empty_commit (void) {
	rd_kafka_t *rk;
	char group_id[64];
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_resp_err_t err, expect;

	test_conf_init(&conf, &tconf, 20);
	test_conf_set(conf, "enable.auto.commit", "false");
	test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
	test_str_id_generate(group_id, sizeof(group_id));

	TEST_SAY(_C_MAG "[ do_empty_commit group.id %s ]\n", group_id);

	rk = test_create_consumer(group_id, NULL, conf, tconf, NULL);

	test_consumer_subscribe(rk, topic);

	test_consumer_poll("consume", rk, testid, -1, -1, 100, NULL);

	TEST_SAY("First commit\n");
	expect = RD_KAFKA_RESP_ERR_NO_ERROR;
	err = rd_kafka_commit_queue(rk, NULL, NULL,
				    empty_offset_commit_cb, &expect);
	if (err != expect)
		TEST_FAIL("commit failed: %s", rd_kafka_err2str(err));
	else
		TEST_SAY("First commit returned %s\n",
			 rd_kafka_err2str(err));

	TEST_SAY("Second commit, should be empty\n");
	expect = RD_KAFKA_RESP_ERR__NO_OFFSET;
	err = rd_kafka_commit_queue(rk, NULL, NULL,
				    empty_offset_commit_cb, &expect);
	if (err != RD_KAFKA_RESP_ERR__NO_OFFSET)
		TEST_FAIL("unexpected commit result, wanted NO_OFFSET, got: %s",
			  rd_kafka_err2str(err));
	else
		TEST_SAY("Second commit returned %s\n",
			 rd_kafka_err2str(err));

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
Example #3
0
/**
 * Create high-level consumer subscribing to \p topic from BEGINNING
 * and expects \d exp_msgcnt with matching \p testid
 * Destroys consumer when done.
 *
 * If \p group_id is NULL a new unique group is generated
 */
void
test_consume_msgs_easy (const char *group_id, const char *topic,
                        uint64_t testid, int exp_msgcnt) {
        rd_kafka_t *rk;
        rd_kafka_topic_conf_t *tconf;
        rd_kafka_resp_err_t err;
        rd_kafka_topic_partition_list_t *topics;
	char grpid0[64];

        test_conf_init(NULL, &tconf, 0);

	if (!group_id)
		group_id = test_str_id_generate(grpid0, sizeof(grpid0));

        test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
        rk = test_create_consumer(group_id, NULL, tconf, NULL);

        rd_kafka_poll_set_consumer(rk);

        topics = rd_kafka_topic_partition_list_new(1);
        rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA);

        TEST_SAY("Subscribing to topic %s in group %s "
                 "(expecting %d msgs with testid %"PRIu64")\n",
                 topic, group_id, exp_msgcnt, testid);

        err = rd_kafka_subscribe(rk, topics);
        if (err)
                TEST_FAIL("Failed to subscribe to %s: %s\n",
                          topic, rd_kafka_err2str(err));

        rd_kafka_topic_partition_list_destroy(topics);

        /* Consume messages */
        test_consumer_poll("consume.easy", rk, testid, -1, -1, exp_msgcnt);

        test_consumer_close(rk);

        rd_kafka_destroy(rk);
}
Example #4
0
static void do_offset_test (const char *what, int auto_commit, int auto_store,
			    int async) {
	test_timing_t t_all;
	char groupid[64];
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	int cnt = 0;
	const int extra_cnt = 5;
	rd_kafka_resp_err_t err;
	rd_kafka_topic_partition_list_t *parts;
	rd_kafka_topic_partition_t *rktpar;
	int64_t next_offset = -1;

	test_conf_init(&conf, &tconf, 20);
	test_conf_set(conf, "enable.auto.commit", auto_commit ? "true":"false");
	test_conf_set(conf, "enable.auto.offset.store", auto_store ?"true":"false");
	test_conf_set(conf, "auto.commit.interval.ms", "500");
	rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
	test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
	test_str_id_generate(groupid, sizeof(groupid));
	test_conf_set(conf, "group.id", groupid);
	rd_kafka_conf_set_default_topic_conf(conf, tconf);

	TEST_SAY(_C_MAG "[ do_offset_test: %s with group.id %s ]\n",
		 what, groupid);

	TIMING_START(&t_all, what);

	expected_offset  = 0;
	committed_offset = -1;

	/* MO:
	 *  - Create consumer.
	 *  - Start consuming from beginning
	 *  - Perform store & commits according to settings
	 *  - Stop storing&committing when half of the messages are consumed,
	 *  - but consume 5 more to check against.
	 *  - Query position.
	 *  - Destroy consumer.
	 *  - Create new consumer with same group.id using stored offsets
	 *  - Should consume the expected message.
	 */

	/* Create kafka instance */
	rk = test_create_handle(RD_KAFKA_CONSUMER, rd_kafka_conf_dup(conf));

	rd_kafka_poll_set_consumer(rk);

	test_consumer_subscribe(rk, topic);

	while (cnt - extra_cnt < msgcnt / 2) {
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, 10*1000);
		if (!rkm)
			continue;

		if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
			TEST_FAIL("%s: Timed out waiting for message %d", what,cnt);
		else if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			rd_kafka_message_destroy(rkm);
			continue;
		} else if (rkm->err)
			TEST_FAIL("%s: Consumer error: %s",
				  what, rd_kafka_message_errstr(rkm));

		/* Offset of next message. */
		next_offset = rkm->offset + 1;

		if (cnt < msgcnt / 2) {
			if (!auto_store) {
				err = rd_kafka_offset_store(rkm->rkt,rkm->partition,
							    rkm->offset);
				if (err)
					TEST_FAIL("%s: offset_store failed: %s\n",
						  what, rd_kafka_err2str(err));
			}
			expected_offset = rkm->offset+1;
			if (!auto_commit) {
				test_timing_t t_commit;
				TIMING_START(&t_commit,
					     async?"commit.async":"commit.sync");
				err = rd_kafka_commit_message(rk, rkm, async);
				TIMING_STOP(&t_commit);
				if (err)
					TEST_FAIL("%s: commit failed: %s\n",
						  what, rd_kafka_err2str(err));
			}

		} else if (auto_store && auto_commit)
			expected_offset = rkm->offset+1;

		rd_kafka_message_destroy(rkm);
		cnt++;
	}

	TEST_SAY("%s: done consuming after %d messages, at offset %"PRId64"\n",
		 what, cnt, expected_offset);

	if ((err = rd_kafka_assignment(rk, &parts)))
		TEST_FAIL("%s: failed to get assignment(): %s\n",
			  what, rd_kafka_err2str(err));

	/* Verify position */
	if ((err = rd_kafka_position(rk, parts)))
		TEST_FAIL("%s: failed to get position(): %s\n",
			  what, rd_kafka_err2str(err));
	if (!(rktpar = rd_kafka_topic_partition_list_find(parts,
							  topic, partition)))
		TEST_FAIL("%s: position(): topic lost\n", what);
	if (rktpar->offset != next_offset)
		TEST_FAIL("%s: Expected position() offset %"PRId64", got %"PRId64,
			  what, next_offset, rktpar->offset);
	TEST_SAY("%s: Position is at %"PRId64", good!\n",
		 what, rktpar->offset);

	/* Pause messages while waiting so we can serve callbacks
	 * without having more messages received. */
	if ((err = rd_kafka_pause_partitions(rk, parts)))
		TEST_FAIL("%s: failed to pause partitions: %s\n",
			  what, rd_kafka_err2str(err));
	rd_kafka_topic_partition_list_destroy(parts);

	/* Fire off any enqueued offset_commit_cb */
	test_consumer_poll_no_msgs(what, rk, testid, 0);

	TEST_SAY("%s: committed_offset %"PRId64", expected_offset %"PRId64"\n",
		 what, committed_offset, expected_offset);

	if (!auto_commit && !async) {
		/* Sync commits should be up to date at this point. */
		if (committed_offset != expected_offset)
			TEST_FAIL("%s: Sync commit: committed offset %"PRId64
				  " should be same as expected offset "
				  "%"PRId64,
				  what, committed_offset, expected_offset);
	} else {

		/* Wait for offset commits to catch up */
		while (committed_offset < expected_offset) {
			TEST_SAYL(3, "%s: Wait for committed offset %"PRId64
				  " to reach expected offset %"PRId64"\n",
				  what, committed_offset, expected_offset);
			test_consumer_poll_no_msgs(what, rk, testid, 1000);
		}

	}

	TEST_SAY("%s: phase 1 complete, %d messages consumed, "
		 "next expected offset is %"PRId64"\n",
		 what, cnt, expected_offset);

        /* Issue #827: cause committed() to return prematurely by specifying
         *             low timeout. The bug (use after free) will only
         *             be catched by valgrind. */
        do {
                parts = rd_kafka_topic_partition_list_new(1);
                rd_kafka_topic_partition_list_add(parts, topic, partition);
                err = rd_kafka_committed(rk, parts, 1);
                rd_kafka_topic_partition_list_destroy(parts);
                TEST_SAY("Issue #827: committed() returned %s\n",
                         rd_kafka_err2str(err));
        } while (err != RD_KAFKA_RESP_ERR__TIMED_OUT);

	/* Query position */
	parts = rd_kafka_topic_partition_list_new(1);
	rd_kafka_topic_partition_list_add(parts, topic, partition);

	err = rd_kafka_committed(rk, parts, tmout_multip(5*1000));
	if (err)
		TEST_FAIL("%s: committed() failed: %s", what, rd_kafka_err2str(err));
	if (!(rktpar = rd_kafka_topic_partition_list_find(parts,
							  topic, partition)))
		TEST_FAIL("%s: committed(): topic lost\n", what);
	if (rktpar->offset != expected_offset)
		TEST_FAIL("%s: Expected committed() offset %"PRId64", got %"PRId64,
			  what, expected_offset, rktpar->offset);
	TEST_SAY("%s: Committed offset is at %"PRId64", good!\n",
		 what, rktpar->offset);

	rd_kafka_topic_partition_list_destroy(parts);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);



	/* Fire up a new consumer and continue from where we left off. */
	TEST_SAY("%s: phase 2: starting new consumer to resume consumption\n",what);
	rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
	rd_kafka_poll_set_consumer(rk);

	test_consumer_subscribe(rk, topic);

	while (cnt < msgcnt) {
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, 10*1000);
		if (!rkm)
			continue;

		if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
			TEST_FAIL("%s: Timed out waiting for message %d", what,cnt);
		else if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			rd_kafka_message_destroy(rkm);
			continue;
		} else if (rkm->err)
			TEST_FAIL("%s: Consumer error: %s",
				  what, rd_kafka_message_errstr(rkm));

		if (rkm->offset != expected_offset)
			TEST_FAIL("%s: Received message offset %"PRId64
				  ", expected %"PRId64" at msgcnt %d/%d\n",
				  what, rkm->offset, expected_offset,
				  cnt, msgcnt);

		rd_kafka_message_destroy(rkm);
		expected_offset++;
		cnt++;
	}


	TEST_SAY("%s: phase 2: complete\n", what);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	

	TIMING_STOP(&t_all);
}
static int nonexist_part (void) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_t *rk;
	rd_kafka_topic_partition_list_t *parts;
	rd_kafka_resp_err_t err;
        test_timing_t t_pos;
        const int msgcnt = 1000;
        uint64_t testid;
        int i;
	int it, iterations = 5;

        /* Produce messages */
        testid = test_produce_msgs_easy(topic, 0,
                                        RD_KAFKA_PARTITION_UA, msgcnt);

	for (it = 0 ; it < iterations ; it++) {
		char group_id[32];

		test_str_id_generate(group_id, sizeof(group_id));

		TEST_SAY("Iteration %d/%d, using group.id %s\n", it, iterations,
			 group_id);

		/* Consume messages */
		test_consume_msgs_easy(group_id, topic, testid, -1,
				       msgcnt, NULL);

		/*
		 * Now start a new consumer and query stored offsets (positions)
		 */

		rk = test_create_consumer(group_id, NULL, NULL, NULL, NULL);

		/* Fill in partition set */
		parts = rd_kafka_topic_partition_list_new(2);
		/* existing */
		rd_kafka_topic_partition_list_add(parts, topic, 0);
		/* non-existing */
		rd_kafka_topic_partition_list_add(parts, topic, 123);


		TIMING_START(&t_pos, "COMMITTED");
		err = rd_kafka_committed(rk, parts, tmout_multip(5000));
		TIMING_STOP(&t_pos);
		if (err)
			TEST_FAIL("Failed to acquire committed offsets: %s\n",
				  rd_kafka_err2str(err));

		for (i = 0 ; i < parts->cnt ; i++) {
			TEST_SAY("%s [%"PRId32"] returned offset %"PRId64
				 ": %s\n",
				 parts->elems[i].topic,
				 parts->elems[i].partition,
				 parts->elems[i].offset,
				 rd_kafka_err2str(parts->elems[i].err));
			if (parts->elems[i].partition == 0 &&
			    parts->elems[i].offset <= 0)
				TEST_FAIL("Partition %"PRId32" should have a "
					  "proper offset, not %"PRId64"\n",
					  parts->elems[i].partition,
					  parts->elems[i].offset);
			else if (parts->elems[i].partition == 123 &&
				 parts->elems[i].offset !=
				 RD_KAFKA_OFFSET_INVALID)
				TEST_FAIL("Partition %"PRId32
					  " should have failed\n",
					  parts->elems[i].partition);
		}

		rd_kafka_topic_partition_list_destroy(parts);

		test_consumer_close(rk);

		/* Hangs if bug isn't fixed */
		rd_kafka_destroy(rk);
	}

        return 0;
}
Example #6
0
int main_0019_list_groups (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
#define _CONS_CNT 2
        char *groups[_CONS_CNT];
	rd_kafka_t *rk, *rk_c[_CONS_CNT];
	rd_kafka_topic_partition_list_t *topics;
	rd_kafka_resp_err_t err;
        test_timing_t t_grps;
	int i;
        int groups_seen;
	rd_kafka_topic_t *rkt;

        /* Handle for group listings */
        rk = test_create_producer();

	/* Produce messages so that topic is auto created */
	rkt = test_create_topic_object(rk, topic, NULL);
	test_produce_msgs(rk, rkt, 0, 0, 0, 10, NULL, 64);
	rd_kafka_topic_destroy(rkt);

        /* Query groups before creation, should not list our groups. */
        groups_seen = list_groups(rk, NULL, 0, "should be none");
        if (groups_seen != 0)
                TEST_FAIL("Saw %d groups when there wasn't "
                          "supposed to be any\n", groups_seen);

	/* Fill in topic subscription set */
	topics = rd_kafka_topic_partition_list_new(1);
	rd_kafka_topic_partition_list_add(topics, topic, -1);

	/* Create consumers and start subscription */
	for (i = 0 ; i < _CONS_CNT ; i++) {
                groups[i] = malloc(32);
                test_str_id_generate(groups[i], 32);
		rk_c[i] = test_create_consumer(groups[i],
					       NULL, NULL, NULL);

		err = rd_kafka_poll_set_consumer(rk_c[i]);
		if (err)
			TEST_FAIL("poll_set_consumer: %s\n",
				  rd_kafka_err2str(err));

		err = rd_kafka_subscribe(rk_c[i], topics);
		if (err)
			TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err));
	}

        rd_kafka_topic_partition_list_destroy(topics);


        TIMING_START(&t_grps, "WAIT.GROUPS");
        /* Query groups again until both groups are seen. */
        while (1) {
                int groups_seen = list_groups(rk, (char **)groups, _CONS_CNT,
                                              "should see my groups");
                if (groups_seen == _CONS_CNT)
                        break;
                rd_sleep(1);
        }
        TIMING_STOP(&t_grps);


	TEST_SAY("Closing remaining consumers\n");
	for (i = 0 ; i < _CONS_CNT ; i++) {
		test_timing_t t_close;
		if (!rk_c[i])
			continue;

		TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i]));
		TIMING_START(&t_close, "CONSUMER.CLOSE");
		err = rd_kafka_consumer_close(rk_c[i]);
		TIMING_STOP(&t_close);
		if (err)
			TEST_FAIL("consumer_close failed: %s\n",
				  rd_kafka_err2str(err));

		rd_kafka_destroy(rk_c[i]);
		rk_c[i] = NULL;

                free(groups[i]);
	}

        rd_kafka_destroy(rk);

        return 0;
}