Esempio n. 1
0
RdKafka::ErrorCode
RdKafka::KafkaConsumerImpl::subscription (std::vector<std::string> &topics) {
  rd_kafka_topic_partition_list_t *c_topics;
  rd_kafka_resp_err_t err;

  if ((err = rd_kafka_subscription(rk_, &c_topics)))
    return static_cast<RdKafka::ErrorCode>(err);

  topics.resize(c_topics->cnt);
  for (int i = 0 ; i < c_topics->cnt ; i++)
    topics[i] = std::string(c_topics->elems[i].topic);

  rd_kafka_topic_partition_list_destroy(c_topics);

  return RdKafka::ERR_NO_ERROR;
}
Esempio n. 2
0
int main_0056_balanced_group_mt (int argc, char **argv) {
        const char *topic = test_mk_topic_name(__FUNCTION__, 1);
        rd_kafka_t *rk_p, *rk_c;
        rd_kafka_topic_t *rkt_p;
        int msg_cnt = 1000;
        int msg_base = 0;
        int partition_cnt = 2;
        int partition;
        uint64_t testid;
        rd_kafka_topic_conf_t *default_topic_conf;
        rd_kafka_topic_partition_list_t *sub, *topics;
        rd_kafka_resp_err_t err;
        test_timing_t t_assign, t_close, t_consume;
        int i;

        exp_msg_cnt = msg_cnt * partition_cnt;

        testid = test_id_generate();

        /* Produce messages */
        rk_p = test_create_producer();
        rkt_p = test_create_producer_topic(rk_p, topic, NULL);

        for (partition = 0; partition < partition_cnt; partition++) {
                test_produce_msgs(rk_p, rkt_p, testid, partition,
                                  msg_base + (partition * msg_cnt), msg_cnt,
                                  NULL, 0);
        }

        rd_kafka_topic_destroy(rkt_p);
        rd_kafka_destroy(rk_p);

        if (mtx_init(&lock, mtx_plain) != thrd_success)
                TEST_FAIL("Cannot create mutex.");

        test_conf_init(NULL, &default_topic_conf,
                       (test_session_timeout_ms * 3) / 1000);

        test_topic_conf_set(default_topic_conf, "auto.offset.reset",
                            "smallest");

        /* Fill in topic subscription set */
        topics = rd_kafka_topic_partition_list_new(1);
        rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA);

        /* Create consumers and start subscription */
        rk_c = test_create_consumer(
                topic /*group_id*/, rebalance_cb, NULL,
                default_topic_conf);

        test_consumer_subscribe(rk_c, topic);

        rd_kafka_topic_partition_list_destroy(topics);

        /* Wait for both consumers to get an assignment */
        TIMING_START(&t_assign, "WAIT.ASSIGN");
        get_assignment(rk_c);
        TIMING_STOP(&t_assign);

        TIMING_START(&t_consume, "CONSUME.WAIT");
        for (i = 0; i < MAX_THRD_CNT; ++i) {
                if (tids[i] != 0)
                        thrd_join(tids[i], NULL);
        }
        TIMING_STOP(&t_consume);

        TEST_SAY("Closing remaining consumers\n");
        /* Query subscription */
        err = rd_kafka_subscription(rk_c, &sub);
        TEST_ASSERT(!err, "%s: subscription () failed: %s", rd_kafka_name(rk_c),
                    rd_kafka_err2str(err));
        TEST_SAY("%s: subscription (%d):\n", rd_kafka_name(rk_c), sub->cnt);
        for (i = 0; i < sub->cnt; ++i)
                TEST_SAY(" %s\n", sub->elems[i].topic);
        rd_kafka_topic_partition_list_destroy(sub);

        /* Run an explicit unsubscribe () (async) prior to close ()
         * to trigger race condition issues on termination. */
        TEST_SAY("Unsubscribing instance %s\n", rd_kafka_name(rk_c));
        err = rd_kafka_unsubscribe(rk_c);
        TEST_ASSERT(!err, "%s: unsubscribe failed: %s", rd_kafka_name(rk_c),
                    rd_kafka_err2str(err));

        TEST_SAY("Closing %s\n", rd_kafka_name(rk_c));
        TIMING_START(&t_close, "CONSUMER.CLOSE");
        err = rd_kafka_consumer_close(rk_c);
        TIMING_STOP(&t_close);
        TEST_ASSERT(!err, "consumer_close failed: %s", rd_kafka_err2str(err));

        rd_kafka_destroy(rk_c);
        rk_c = NULL;

        TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, exp_msg_cnt);
        TEST_ASSERT(consumed_msg_cnt >= exp_msg_cnt,
                    "Only %d/%d messages were consumed", consumed_msg_cnt,
                    exp_msg_cnt);

        if (consumed_msg_cnt > exp_msg_cnt)
                TEST_SAY("At least %d/%d messages were consumed "
                         "multiple times\n",
                         consumed_msg_cnt - exp_msg_cnt, exp_msg_cnt);

        mtx_destroy(&lock);

        return 0;
}
Esempio n. 3
0
int main_0006_symbols (int argc, char **argv) {

        if (argc < 0 /* always false */) {
                rd_kafka_version();
                rd_kafka_version_str();
		rd_kafka_get_debug_contexts();
		rd_kafka_get_err_descs(NULL, NULL);
                rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR);
		rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR);
                rd_kafka_errno2err(EINVAL);
                rd_kafka_errno();
		rd_kafka_last_error();
                rd_kafka_conf_new();
                rd_kafka_conf_destroy(NULL);
                rd_kafka_conf_dup(NULL);
                rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_conf_set_dr_cb(NULL, NULL);
                rd_kafka_conf_set_dr_msg_cb(NULL, NULL);
                rd_kafka_conf_set_error_cb(NULL, NULL);
                rd_kafka_conf_set_stats_cb(NULL, NULL);
                rd_kafka_conf_set_log_cb(NULL, NULL);
                rd_kafka_conf_set_socket_cb(NULL, NULL);
		rd_kafka_conf_set_rebalance_cb(NULL, NULL);
		rd_kafka_conf_set_offset_commit_cb(NULL, NULL);
		rd_kafka_conf_set_throttle_cb(NULL, NULL);
		rd_kafka_conf_set_default_topic_conf(NULL, NULL);
		rd_kafka_conf_get(NULL, NULL, NULL, NULL);
#ifndef _MSC_VER
		rd_kafka_conf_set_open_cb(NULL, NULL);
#endif
		rd_kafka_conf_set_opaque(NULL, NULL);
                rd_kafka_opaque(NULL);
                rd_kafka_conf_dump(NULL, NULL);
                rd_kafka_topic_conf_dump(NULL, NULL);
                rd_kafka_conf_dump_free(NULL, 0);
                rd_kafka_conf_properties_show(NULL);
                rd_kafka_topic_conf_new();
                rd_kafka_topic_conf_dup(NULL);
                rd_kafka_topic_conf_destroy(NULL);
                rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_topic_conf_set_opaque(NULL, NULL);
		rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL);
                rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL);
                rd_kafka_topic_partition_available(NULL, 0);
		rd_kafka_topic_opaque(NULL);
                rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_new(0, NULL, NULL, 0);
                rd_kafka_destroy(NULL);
                rd_kafka_name(NULL);
		rd_kafka_memberid(NULL);
                rd_kafka_topic_new(NULL, NULL, NULL);
                rd_kafka_topic_destroy(NULL);
                rd_kafka_topic_name(NULL);
                rd_kafka_message_destroy(NULL);
                rd_kafka_message_errstr(NULL);
		rd_kafka_message_timestamp(NULL, NULL);
                rd_kafka_consume_start(NULL, 0, 0);
                rd_kafka_consume_stop(NULL, 0);
                rd_kafka_consume(NULL, 0, 0);
                rd_kafka_consume_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL);
                rd_kafka_offset_store(NULL, 0, 0);
                rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL);
                rd_kafka_produce_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_poll(NULL, 0);
                rd_kafka_brokers_add(NULL, NULL);
                /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */
                rd_kafka_set_log_level(NULL, 0);
                rd_kafka_log_print(NULL, 0, NULL, NULL);
#ifndef _MSC_VER
                rd_kafka_log_syslog(NULL, 0, NULL, NULL);
#endif
                rd_kafka_outq_len(NULL);
                rd_kafka_dump(NULL, NULL);
                rd_kafka_thread_cnt();
                rd_kafka_wait_destroyed(0);
                rd_kafka_metadata(NULL, 0, NULL, NULL, 0);
                rd_kafka_metadata_destroy(NULL);
                rd_kafka_queue_destroy(NULL);
                rd_kafka_consume_start_queue(NULL, 0, 0, NULL);
                rd_kafka_consume_queue(NULL, 0);
                rd_kafka_consume_batch_queue(NULL, 0, NULL, 0);
                rd_kafka_consume_callback_queue(NULL, 0, NULL, NULL);
                rd_kafka_seek(NULL, 0, 0, 0);
                rd_kafka_yield(NULL);
                rd_kafka_mem_free(NULL, NULL);
                rd_kafka_list_groups(NULL, NULL, NULL, 0);
                rd_kafka_group_list_destroy(NULL);

		/* KafkaConsumer API */
		rd_kafka_subscribe(NULL, NULL);
		rd_kafka_unsubscribe(NULL);
		rd_kafka_subscription(NULL, NULL);
		rd_kafka_consumer_poll(NULL, 0);
		rd_kafka_consumer_close(NULL);
		rd_kafka_assign(NULL, NULL);
		rd_kafka_assignment(NULL, NULL);
		rd_kafka_commit(NULL, NULL, 0);
		rd_kafka_commit_message(NULL, NULL, 0);
                rd_kafka_committed(NULL, NULL, 0);
		rd_kafka_position(NULL, NULL);

		/* TopicPartition */
		rd_kafka_topic_partition_list_new(0);
		rd_kafka_topic_partition_list_destroy(NULL);
		rd_kafka_topic_partition_list_add(NULL, NULL, 0);
		rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0);
		rd_kafka_topic_partition_list_del(NULL, NULL, 0);
		rd_kafka_topic_partition_list_del_by_idx(NULL, 0);
		rd_kafka_topic_partition_list_copy(NULL);
		rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0);
		rd_kafka_topic_partition_list_find(NULL, NULL, 0);
		rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0);
		rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL);
        }


	return 0;
}
Esempio n. 4
0
int main_0018_cgrp_term (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
#define _CONS_CNT 2
	rd_kafka_t *rk_p, *rk_c[_CONS_CNT];
        rd_kafka_topic_t *rkt_p;
	int msg_cnt = 1000;
	int msg_base = 0;
        int partition_cnt = 2;
        int partition;
	uint64_t testid;
        rd_kafka_topic_conf_t *default_topic_conf;
	rd_kafka_topic_partition_list_t *topics;
	rd_kafka_resp_err_t err;
	test_timing_t t_assign, t_consume;
	char errstr[512];
	int i;

	testid = test_id_generate();

	/* Produce messages */
	rk_p = test_create_producer();
	rkt_p = test_create_producer_topic(rk_p, topic, NULL);

        for (partition = 0 ; partition < partition_cnt ; partition++) {
                test_produce_msgs(rk_p, rkt_p, testid, partition,
                                  msg_base+(partition*msg_cnt), msg_cnt,
				  NULL, 0);
        }

	rd_kafka_topic_destroy(rkt_p);
	rd_kafka_destroy(rk_p);


        test_conf_init(NULL, &default_topic_conf,
		       (test_session_timeout_ms * 3) / 1000);
        if (rd_kafka_topic_conf_set(default_topic_conf, "auto.offset.reset",
				    "smallest", errstr, sizeof(errstr)) !=
	    RD_KAFKA_CONF_OK)
		TEST_FAIL("%s\n", errstr);

	/* Fill in topic subscription set */
	topics = rd_kafka_topic_partition_list_new(1);
	rd_kafka_topic_partition_list_add(topics, topic, -1);

	/* Create consumers and start subscription */
	for (i = 0 ; i < _CONS_CNT ; i++) {
		rk_c[i] = test_create_consumer(topic/*group_id*/,
					       rebalance_cb, NULL,
					       rd_kafka_topic_conf_dup(
						       default_topic_conf),
					       NULL);

		err = rd_kafka_poll_set_consumer(rk_c[i]);
		if (err)
			TEST_FAIL("poll_set_consumer: %s\n",
				  rd_kafka_err2str(err));

		err = rd_kafka_subscribe(rk_c[i], topics);
		if (err)
			TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err));
	}

        rd_kafka_topic_conf_destroy(default_topic_conf);

        rd_kafka_topic_partition_list_destroy(topics);


	/* Wait for both consumers to get an assignment */
	TIMING_START(&t_assign, "WAIT.ASSIGN");
	while (assign_cnt < _CONS_CNT)
		consume_all(rk_c, _CONS_CNT, msg_cnt,
			    test_session_timeout_ms + 3000);
	TIMING_STOP(&t_assign);

	/* Now close one of the consumers, this will cause a rebalance. */
	TEST_SAY("Closing down 1/%d consumer(s): %s\n", _CONS_CNT,
		 rd_kafka_name(rk_c[0]));
	err = rd_kafka_consumer_close(rk_c[0]);
	if (err)
		TEST_FAIL("consumer_close failed: %s\n", rd_kafka_err2str(err));
	rd_kafka_destroy(rk_c[0]);
	rk_c[0] = NULL;

	/* Let remaining consumers run for a while to take over the now
	 * lost partitions. */

	if (assign_cnt != _CONS_CNT-1)
		TEST_FAIL("assign_cnt %d, should be %d\n",
			  assign_cnt, _CONS_CNT-1);

	TIMING_START(&t_consume, "CONSUME.WAIT");
	consume_all(rk_c, _CONS_CNT, msg_cnt, test_session_timeout_ms + 3000);
	TIMING_STOP(&t_consume);

	TEST_SAY("Closing remaining consumers\n");
	for (i = 0 ; i < _CONS_CNT ; i++) {
		test_timing_t t_close;
                rd_kafka_topic_partition_list_t *sub;
                int j;

		if (!rk_c[i])
			continue;

                /* Query subscription */
                err = rd_kafka_subscription(rk_c[i], &sub);
                if (err)
                        TEST_FAIL("%s: subscription() failed: %s\n",
                                  rd_kafka_name(rk_c[i]),
                                  rd_kafka_err2str(err));
                TEST_SAY("%s: subscription (%d):\n",
                         rd_kafka_name(rk_c[i]), sub->cnt);
                for (j = 0 ; j < sub->cnt ; j++)
                        TEST_SAY(" %s\n", sub->elems[j].topic);
                rd_kafka_topic_partition_list_destroy(sub);

                /* Run an explicit unsubscribe() (async) prior to close()
                 * to trigger race condition issues on termination. */
                TEST_SAY("Unsubscribing instance %s\n", rd_kafka_name(rk_c[i]));
                err = rd_kafka_unsubscribe(rk_c[i]);
                if (err)
                        TEST_FAIL("%s: unsubscribe failed: %s\n",
                                  rd_kafka_name(rk_c[i]),
                                  rd_kafka_err2str(err));

		TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i]));
		TIMING_START(&t_close, "CONSUMER.CLOSE");
		err = rd_kafka_consumer_close(rk_c[i]);
		TIMING_STOP(&t_close);
		if (err)
			TEST_FAIL("consumer_close failed: %s\n",
				  rd_kafka_err2str(err));

		rd_kafka_destroy(rk_c[i]);
		rk_c[i] = NULL;
	}

	TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, msg_cnt);
	if (consumed_msg_cnt < msg_cnt)
		TEST_FAIL("Only %d/%d messages were consumed\n",
			  consumed_msg_cnt, msg_cnt);
	else if (consumed_msg_cnt > msg_cnt)
		TEST_SAY("At least %d/%d messages were consumed "
			 "multiple times\n",
			 consumed_msg_cnt - msg_cnt, msg_cnt);
	
	return 0;
}