Example #1
0
RdKafka::Consumer *RdKafka::Consumer::create (RdKafka::Conf *conf,
                                              std::string &errstr) {
  char errbuf[512];
  RdKafka::ConfImpl *confimpl = dynamic_cast<RdKafka::ConfImpl *>(conf);
  RdKafka::ConsumerImpl *rkc = new RdKafka::ConsumerImpl();
  rd_kafka_conf_t *rk_conf = NULL;

  if (confimpl) {
    if (!confimpl->rk_conf_) {
      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
      delete rkc;
      return NULL;
    }

    rkc->set_common_config(confimpl);

    rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_);
  }

  rd_kafka_t *rk;
  if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf,
                          errbuf, sizeof(errbuf)))) {
    errstr = errbuf;
    delete rkc;
    return NULL;
  }

  rkc->rk_ = rk;


  return rkc;
}
Example #2
0
int main (int argc, char **argv) {

        if (argc < 0 /* always false */) {
                rd_kafka_version();
                rd_kafka_version_str();
                rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR);
                rd_kafka_errno2err(EINVAL);
                rd_kafka_conf_new();
                rd_kafka_conf_destroy(NULL);
                rd_kafka_conf_dup(NULL);
                rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_conf_set_dr_cb(NULL, NULL);
                rd_kafka_conf_set_error_cb(NULL, NULL);
                rd_kafka_conf_set_stats_cb(NULL, NULL);
                rd_kafka_conf_set_opaque(NULL, NULL);
                rd_kafka_conf_dump(NULL, NULL);
                rd_kafka_topic_conf_dump(NULL, NULL);
                rd_kafka_conf_dump_free(NULL, 0);
                rd_kafka_conf_properties_show(NULL);
                rd_kafka_topic_conf_new();
                rd_kafka_topic_conf_dup(NULL);
                rd_kafka_topic_conf_destroy(NULL);
                rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_topic_conf_set_opaque(NULL, NULL);
                rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL);
                rd_kafka_topic_partition_available(NULL, 0);
                rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_new(0, NULL, NULL, 0);
                rd_kafka_destroy(NULL);
                rd_kafka_name(NULL);
                rd_kafka_topic_new(NULL, NULL, NULL);
                rd_kafka_topic_destroy(NULL);
                rd_kafka_topic_name(NULL);
                rd_kafka_message_destroy(NULL);
                rd_kafka_message_errstr(NULL);
                rd_kafka_consume_start(NULL, 0, 0);
                rd_kafka_consume_stop(NULL, 0);
                rd_kafka_consume(NULL, 0, 0);
                rd_kafka_consume_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL);
                rd_kafka_offset_store(NULL, 0, 0);
                rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL);
                rd_kafka_poll(NULL, 0);
                rd_kafka_brokers_add(NULL, NULL);
                rd_kafka_set_logger(NULL, NULL);
                rd_kafka_set_log_level(NULL, 0);
                rd_kafka_log_print(NULL, 0, NULL, NULL);
                rd_kafka_log_syslog(NULL, 0, NULL, NULL);
                rd_kafka_outq_len(NULL);
                rd_kafka_dump(NULL, NULL);
                rd_kafka_thread_cnt();
                rd_kafka_wait_destroyed(0);
        }


	return 0;
}
Example #3
0
static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */
{
    char                         errbuf[1024];
    rd_kafka_conf_t             *conf;
    rd_kafka_topic_conf_t       *topic_conf;

    if (ctx->kafka != NULL && ctx->topic != NULL)
        return(0);

    if (ctx->kafka == NULL) {
        if ((conf = rd_kafka_conf_dup(ctx->kafka_conf)) == NULL) {
            ERROR("write_kafka plugin: cannot duplicate kafka config");
            return(1);
        }

        if ((ctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
                                    errbuf, sizeof(errbuf))) == NULL) {
            ERROR("write_kafka plugin: cannot create kafka handle.");
            return 1;
        }

        rd_kafka_conf_destroy(ctx->kafka_conf);
        ctx->kafka_conf = NULL;

        INFO ("write_kafka plugin: created KAFKA handle : %s", rd_kafka_name(ctx->kafka));

#if defined(HAVE_LIBRDKAFKA_LOGGER) && !defined(HAVE_LIBRDKAFKA_LOG_CB)
        rd_kafka_set_logger(ctx->kafka, kafka_log);
#endif
    }

    if (ctx->topic == NULL ) {
        if ((topic_conf = rd_kafka_topic_conf_dup(ctx->conf)) == NULL) {
            ERROR("write_kafka plugin: cannot duplicate kafka topic config");
            return 1;
        }

        if ((ctx->topic = rd_kafka_topic_new(ctx->kafka, ctx->topic_name,
                                            topic_conf)) == NULL) {
            ERROR("write_kafka plugin: cannot create topic : %s\n",
            rd_kafka_err2str(rd_kafka_errno2err(errno)));
            return errno;
        }

        rd_kafka_topic_conf_destroy(ctx->conf);
        ctx->conf = NULL;

        INFO ("write_kafka plugin: handle created for topic : %s", rd_kafka_topic_name(ctx->topic));
    }

    return(0);

} /* }}} int kafka_handle */
Example #4
0
RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (RdKafka::Conf *conf,
                                                        std::string &errstr) {
  char errbuf[512];
  RdKafka::ConfImpl *confimpl = dynamic_cast<RdKafka::ConfImpl *>(conf);
  RdKafka::KafkaConsumerImpl *rkc = new RdKafka::KafkaConsumerImpl();
  rd_kafka_conf_t *rk_conf = NULL;
  size_t grlen;

  if (!confimpl->rk_conf_) {
    errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
    delete rkc;
    return NULL;
  }

  if (rd_kafka_conf_get(confimpl->rk_conf_, "group.id",
                        NULL, &grlen) != RD_KAFKA_CONF_OK ||
      grlen <= 1 /* terminating null only */) {
    errstr = "\"group.id\" must be configured";
    delete rkc;
    return NULL;
  }

  rkc->set_common_config(confimpl);

  rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_);

  rd_kafka_t *rk;
  if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf,
                          errbuf, sizeof(errbuf)))) {
    errstr = errbuf;
    delete rkc;
    return NULL;
  }

  rkc->rk_ = rk;

  /* Redirect handle queue to cgrp's queue to provide a single queue point */
  rd_kafka_poll_set_consumer(rk);

  return rkc;
}
Example #5
0
/**
 * @brief Interceptors must not be copied automatically by conf_dup()
 *        unless the interceptors have added on_conf_dup().
 *        This behaviour makes sure an interceptor's instance
 *        is not duplicated without the interceptor's knowledge or
 *        assistance.
 */
static void do_test_conf_copy (const char *topic) {
        rd_kafka_conf_t *conf, *conf2;
        int i;
        rd_kafka_t *rk;

        TEST_SAY(_C_MAG "[ %s ]\n" _C_CLR, __FUNCTION__);

        memset(&msgs[0], 0, sizeof(msgs));

        test_conf_init(&conf, NULL, 0);

        rd_kafka_conf_interceptor_add_on_new(conf, "on_new_conf_copy",
                                             on_new_producer, NULL);

        /* Now copy the configuration to verify that interceptors are
         * NOT copied. */
        conf2 = conf;
        conf = rd_kafka_conf_dup(conf2);
        rd_kafka_conf_destroy(conf2);

        /* Create producer */
        rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

        for (i = 0 ; i < msgcnt-1 ; i++)
                do_test_produce(rk, topic, RD_KAFKA_PARTITION_UA, i, 0, 0);

        /* Wait for messages to be delivered */
        test_flush(rk, -1);

        /* Verify acks */
        for (i = 0 ; i < msgcnt ; i++) {
                struct msg_state *msg = &msgs[i];
                msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], 0);
        }

        rd_kafka_destroy(rk);
}
Example #6
0
int main_0004_conf (int argc, char **argv) {
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_conf_t *ignore_conf, *conf, *conf2;
	rd_kafka_topic_conf_t *ignore_topic_conf, *tconf, *tconf2;
	char errstr[512];
	const char **arr_orig, **arr_dup;
	size_t cnt_orig, cnt_dup;
	int i;
        const char *topic;
	static const char *gconfs[] = {
		"message.max.bytes", "12345", /* int property */
		"client.id", "my id", /* string property */
		"debug", "topic,metadata", /* S2F property */
		"topic.blacklist", "__.*", /* #778 */
                "auto.offset.reset", "earliest", /* Global->Topic fallthru */
#if WITH_ZLIB
		"compression.codec", "gzip", /* S2I property */
#endif
		NULL
	};
	static const char *tconfs[] = {
		"request.required.acks", "-1", /* int */
		"auto.commit.enable", "false", /* bool */
		"auto.offset.reset", "error",  /* S2I */
		"offset.store.path", "my/path", /* string */
		NULL
	};

	test_conf_init(&ignore_conf, &ignore_topic_conf, 10);
	rd_kafka_conf_destroy(ignore_conf);
	rd_kafka_topic_conf_destroy(ignore_topic_conf);

        topic = test_mk_topic_name("0004", 0);

	/* Set up a global config object */
	conf = rd_kafka_conf_new();

	rd_kafka_conf_set_dr_cb(conf, dr_cb);
	rd_kafka_conf_set_error_cb(conf, error_cb);

	for (i = 0 ; gconfs[i] ; i += 2) {
		if (rd_kafka_conf_set(conf, gconfs[i], gconfs[i+1],
				      errstr, sizeof(errstr)) !=
		    RD_KAFKA_CONF_OK)
			TEST_FAIL("%s\n", errstr);
	}

	/* Set up a topic config object */
	tconf = rd_kafka_topic_conf_new();

	rd_kafka_topic_conf_set_partitioner_cb(tconf, partitioner);
	rd_kafka_topic_conf_set_opaque(tconf, (void *)0xbeef);

	for (i = 0 ; tconfs[i] ; i += 2) {
		if (rd_kafka_topic_conf_set(tconf, tconfs[i], tconfs[i+1],
				      errstr, sizeof(errstr)) !=
		    RD_KAFKA_CONF_OK)
			TEST_FAIL("%s\n", errstr);
	}


	/* Verify global config */
	arr_orig = rd_kafka_conf_dump(conf, &cnt_orig);
	conf_verify(__LINE__, arr_orig, cnt_orig, gconfs);

	/* Verify copied global config */
	conf2 = rd_kafka_conf_dup(conf);
	arr_dup = rd_kafka_conf_dump(conf2, &cnt_dup);
	conf_verify(__LINE__, arr_dup, cnt_dup, gconfs);
	conf_cmp("global", arr_orig, cnt_orig, arr_dup, cnt_dup);
	rd_kafka_conf_dump_free(arr_orig, cnt_orig);
	rd_kafka_conf_dump_free(arr_dup, cnt_dup);

	/* Verify topic config */
	arr_orig = rd_kafka_topic_conf_dump(tconf, &cnt_orig);
	conf_verify(__LINE__, arr_orig, cnt_orig, tconfs);

	/* Verify copied topic config */
	tconf2 = rd_kafka_topic_conf_dup(tconf);
	arr_dup = rd_kafka_topic_conf_dump(tconf2, &cnt_dup);
	conf_verify(__LINE__, arr_dup, cnt_dup, tconfs);
	conf_cmp("topic", arr_orig, cnt_orig, arr_dup, cnt_dup);
	rd_kafka_conf_dump_free(arr_orig, cnt_orig);
	rd_kafka_conf_dump_free(arr_dup, cnt_dup);


	/*
	 * Create kafka instances using original and copied confs
	 */

	/* original */
	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

	rkt = rd_kafka_topic_new(rk, topic, tconf);
	if (!rkt)
		TEST_FAIL("Failed to create topic: %s\n",
			  rd_strerror(errno));

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	/* copied */
	rk = test_create_handle(RD_KAFKA_PRODUCER, conf2);

	rkt = rd_kafka_topic_new(rk, topic, tconf2);
	if (!rkt)
		TEST_FAIL("Failed to create topic: %s\n",
			  rd_strerror(errno));

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);


	/* Incremental S2F property.
	 * NOTE: The order of fields returned in get() is hardcoded here. */
	{
		static const char *s2fs[] = {
			"generic,broker,queue,cgrp",
			"generic,broker,queue,cgrp",

			"-broker,+queue,topic",
			"generic,topic,queue,cgrp",

			"-all,security,-fetch,+metadata",
			"metadata,security",

			NULL
		};

		TEST_SAY("Incremental S2F tests\n");
		conf = rd_kafka_conf_new();

		for (i = 0 ; s2fs[i] ; i += 2) {
			const char *val;

			TEST_SAY("  Set: %s\n", s2fs[i]);
			test_conf_set(conf, "debug", s2fs[i]);
			val = test_conf_get(conf, "debug");
			TEST_SAY("  Now: %s\n", val);

			if (strcmp(val, s2fs[i+1]))
				TEST_FAIL_LATER("\n"
						"Expected: %s\n"
						"     Got: %s",
						s2fs[i+1], val);
		}
		rd_kafka_conf_destroy(conf);
	}

	/* Canonical int values, aliases, s2i-verified strings */
	{
		static const struct {
			const char *prop;
			const char *val;
			const char *exp;
			int is_global;
		} props[] = {
			{ "request.required.acks", "0", "0" },
			{ "request.required.acks", "-1", "-1" },
			{ "request.required.acks", "1", "1" },
			{ "acks", "3", "3" }, /* alias test */
			{ "request.required.acks", "393", "393" },
			{ "request.required.acks", "bad", NULL },
			{ "request.required.acks", "all", "-1" },
                        { "request.required.acks", "all", "-1", 1/*fallthru*/ },
			{ "acks", "0", "0" }, /* alias test */
#if WITH_SASL
			{ "sasl.mechanisms", "GSSAPI", "GSSAPI", 1 },
			{ "sasl.mechanisms", "PLAIN", "PLAIN", 1  },
			{ "sasl.mechanisms", "GSSAPI,PLAIN", NULL, 1  },
			{ "sasl.mechanisms", "", NULL, 1  },
#endif
			{ NULL }
		};

		TEST_SAY("Canonical tests\n");
		tconf = rd_kafka_topic_conf_new();
		conf = rd_kafka_conf_new();

		for (i = 0 ; props[i].prop ; i++) {
			char dest[64];
			size_t destsz;
			rd_kafka_conf_res_t res;

			TEST_SAY("  Set: %s=%s expect %s (%s)\n",
				 props[i].prop, props[i].val, props[i].exp,
                                 props[i].is_global ? "global":"topic");


			/* Set value */
			if (props[i].is_global)
				res = rd_kafka_conf_set(conf,
						      props[i].prop,
						      props[i].val,
						      errstr, sizeof(errstr));
			else
				res = rd_kafka_topic_conf_set(tconf,
							      props[i].prop,
							      props[i].val,
							      errstr,
							      sizeof(errstr));
			if ((res == RD_KAFKA_CONF_OK ? 1:0) !=
			    (props[i].exp ? 1:0))
				TEST_FAIL("Expected %s, got %s",
					  props[i].exp ? "success" : "failure",
					  (res == RD_KAFKA_CONF_OK ? "OK" :
					   (res == RD_KAFKA_CONF_INVALID ? "INVALID" :
					    "UNKNOWN")));

			if (!props[i].exp)
				continue;

			/* Get value and compare to expected result */
			destsz = sizeof(dest);
			if (props[i].is_global)
				res = rd_kafka_conf_get(conf,
							props[i].prop,
							dest, &destsz);
			else
				res = rd_kafka_topic_conf_get(tconf,
							      props[i].prop,
							      dest, &destsz);
			TEST_ASSERT(res == RD_KAFKA_CONF_OK,
				    ".._conf_get(%s) returned %d",
                                    props[i].prop, res);

			TEST_ASSERT(!strcmp(props[i].exp, dest),
				    "Expected \"%s\", got \"%s\"",
				    props[i].exp, dest);
		}
		rd_kafka_topic_conf_destroy(tconf);
		rd_kafka_conf_destroy(conf);
	}

	return 0;
}
Example #7
0
int main_0093_holb_consumer (int argc, char **argv) {
        const char *topic = test_mk_topic_name("0093_holb_consumer", 1);
        int64_t testid;
        const int msgcnt = 100;
        struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT;
        rd_kafka_conf_t *conf;

        testid = test_id_generate();

        test_conf_init(&conf, NULL, 60);

        test_create_topic(topic, 1, 1);

        test_produce_msgs_easy(topic, testid, 0, msgcnt);

        test_conf_set(conf, "session.timeout.ms", "6000");
        test_conf_set(conf, "max.poll.interval.ms", "20000");
        test_conf_set(conf, "socket.timeout.ms", "3000");
        test_conf_set(conf, "auto.offset.reset", "earliest");
        /* Trigger other requests often */
        test_conf_set(conf, "topic.metadata.refresh.interval.ms", "500");
        rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);

        rd_kafka_conf_set_opaque(conf, &c[0]);
        c[0].rk = test_create_consumer(topic, NULL,
                                       rd_kafka_conf_dup(conf), NULL);

        rd_kafka_conf_set_opaque(conf, &c[1]);
        c[1].rk = test_create_consumer(topic, NULL, conf, NULL);

        test_consumer_subscribe(c[0].rk, topic);

        /* c0: assign */
        c[0].max_rebalance_cnt = 1;

        /* c1: none, hasn't joined yet */
        c[1].max_rebalance_cnt = 0;

        TEST_SAY("Waiting for c[0] assignment\n");
        while (1) {
                rd_kafka_topic_partition_list_t *parts = NULL;

                do_consume(&c[0], 1/*1s*/);

                if (rd_kafka_assignment(c[0].rk, &parts) !=
                    RD_KAFKA_RESP_ERR_NO_ERROR ||
                    !parts || parts->cnt == 0) {
                        if (parts)
                                rd_kafka_topic_partition_list_destroy(parts);
                        continue;
                }

                TEST_SAY("%s got assignment of %d partition(s)\n",
                         rd_kafka_name(c[0].rk), parts->cnt);
                rd_kafka_topic_partition_list_destroy(parts);
                break;
        }

        TEST_SAY("c[0] got assignment, consuming..\n");
        do_consume(&c[0], 5/*5s*/);

        TEST_SAY("Joining second consumer\n");
        test_consumer_subscribe(c[1].rk, topic);

        /* Just poll second consumer for 10s, the rebalance will not
         * finish until the first consumer polls */
        do_consume(&c[1], 10/*10s*/);

        /* c0: the next call to do_consume/poll will trigger
         *     its rebalance callback, first revoke then assign. */
        c[0].max_rebalance_cnt += 2;
        /* c1: first rebalance */
        c[1].max_rebalance_cnt++;

        TEST_SAY("Expected rebalances: c[0]: %d/%d, c[1]: %d/%d\n",
                 c[0].rebalance_cnt, c[0].max_rebalance_cnt,
                 c[1].rebalance_cnt, c[1].max_rebalance_cnt);

        /* Let rebalances kick in, then consume messages. */
        while (c[0].cnt + c[1].cnt < msgcnt) {
                do_consume(&c[0], 0);
                do_consume(&c[1], 0);
        }

        /* Allow the extra revoke rebalance on close() */
        c[0].max_rebalance_cnt++;
        c[1].max_rebalance_cnt++;

        test_consumer_close(c[0].rk);
        test_consumer_close(c[1].rk);

        rd_kafka_destroy(c[0].rk);
        rd_kafka_destroy(c[1].rk);

        return 0;
}
Example #8
0
int main (int argc, char **argv) {
    rd_kafka_t *rk;
    rd_kafka_topic_t *rkt;
    rd_kafka_conf_t *ignore_conf, *conf, *conf2;
    rd_kafka_topic_conf_t *ignore_topic_conf, *tconf, *tconf2;
    char errstr[512];
    const char **arr_orig, **arr_dup;
    size_t cnt_orig, cnt_dup;
    int i;
    const char *topic;
    static const char *gconfs[] = {
        "message.max.bytes", "12345", /* int property */
        "client.id", "my id", /* string property */
        "debug", "topic,metadata", /* S2F property */
        "compression.codec", "gzip", /* S2I property */
        NULL
    };
    static const char *tconfs[] = {
        "request.required.acks", "-1", /* int */
        "auto.commit.enable", "false", /* bool */
        "auto.offset.reset", "error",  /* S2I */
        "offset.store.path", "my/path", /* string */
        NULL
    };

    test_conf_init(&ignore_conf, &ignore_topic_conf, 10);
    rd_kafka_conf_destroy(ignore_conf);
    rd_kafka_topic_conf_destroy(ignore_topic_conf);

    topic = test_mk_topic_name("generic", 0);

    /* Set up a global config object */
    conf = rd_kafka_conf_new();

    rd_kafka_conf_set_dr_cb(conf, dr_cb);
    rd_kafka_conf_set_error_cb(conf, error_cb);

    for (i = 0 ; gconfs[i] ; i += 2) {
        if (rd_kafka_conf_set(conf, gconfs[i], gconfs[i+1],
                              errstr, sizeof(errstr)) !=
                RD_KAFKA_CONF_OK)
            TEST_FAIL("%s\n", errstr);
    }

    /* Set up a topic config object */
    tconf = rd_kafka_topic_conf_new();

    rd_kafka_topic_conf_set_partitioner_cb(tconf, partitioner);
    rd_kafka_topic_conf_set_opaque(tconf, (void *)0xbeef);

    for (i = 0 ; tconfs[i] ; i += 2) {
        if (rd_kafka_topic_conf_set(tconf, tconfs[i], tconfs[i+1],
                                    errstr, sizeof(errstr)) !=
                RD_KAFKA_CONF_OK)
            TEST_FAIL("%s\n", errstr);
    }


    /* Verify global config */
    arr_orig = rd_kafka_conf_dump(conf, &cnt_orig);
    conf_verify(__LINE__, arr_orig, cnt_orig, gconfs);

    /* Verify copied global config */
    conf2 = rd_kafka_conf_dup(conf);
    arr_dup = rd_kafka_conf_dump(conf2, &cnt_dup);
    conf_verify(__LINE__, arr_dup, cnt_dup, gconfs);
    conf_cmp("global", arr_orig, cnt_orig, arr_dup, cnt_dup);
    rd_kafka_conf_dump_free(arr_orig, cnt_orig);
    rd_kafka_conf_dump_free(arr_dup, cnt_dup);

    /* Verify topic config */
    arr_orig = rd_kafka_topic_conf_dump(tconf, &cnt_orig);
    conf_verify(__LINE__, arr_orig, cnt_orig, tconfs);

    /* Verify copied topic config */
    tconf2 = rd_kafka_topic_conf_dup(tconf);
    arr_dup = rd_kafka_topic_conf_dump(tconf2, &cnt_dup);
    conf_verify(__LINE__, arr_dup, cnt_dup, tconfs);
    conf_cmp("topic", arr_orig, cnt_orig, arr_dup, cnt_dup);
    rd_kafka_conf_dump_free(arr_orig, cnt_orig);
    rd_kafka_conf_dump_free(arr_dup, cnt_dup);


    /*
     * Create kafka instances using original and copied confs
     */

    /* original */
    rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
                      errstr, sizeof(errstr));
    if (!rk)
        TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr);

    rkt = rd_kafka_topic_new(rk, topic, tconf);
    if (!rkt)
        TEST_FAIL("Failed to create topic: %s\n",
                  strerror(errno));

    rd_kafka_topic_destroy(rkt);
    rd_kafka_destroy(rk);

    /* copied */
    rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf2,
                      errstr, sizeof(errstr));
    if (!rk)
        TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr);

    rkt = rd_kafka_topic_new(rk, topic, tconf2);
    if (!rkt)
        TEST_FAIL("Failed to create topic: %s\n",
                  strerror(errno));

    rd_kafka_topic_destroy(rkt);
    rd_kafka_destroy(rk);


    /* Wait for everything to be cleaned up since broker destroys are
     * handled in its own thread. */
    test_wait_exit(2);

    /* If we havent failed at this point then
     * there were no threads leaked */
    return 0;
}
Example #9
0
static int kafka_config(oconfig_item_t *ci) /* {{{ */
{
	int                          i;
	oconfig_item_t              *child;
    rd_kafka_conf_t             *conf;
    rd_kafka_conf_t             *cloned;
    rd_kafka_conf_res_t          ret;
    char                         errbuf[1024];

    if ((conf = rd_kafka_conf_new()) == NULL) {
        WARNING("cannot allocate kafka configuration.");
        return -1;
    }

	for (i = 0; i < ci->children_num; i++)  {
		child = &ci->children[i];

		if (strcasecmp("Topic", child->key) == 0) {
            if ((cloned = rd_kafka_conf_dup(conf)) == NULL) {
                WARNING("write_kafka plugin: cannot allocate memory for kafka config");
                goto errout;
            }
			kafka_config_topic (cloned, child);
		} else if (strcasecmp(child->key, "Property") == 0) {
			char *key = NULL;
			char *val = NULL;

			if (child->values_num != 2) {
				WARNING("kafka properties need both a key and a value.");
                goto errout;
			}
			if (child->values[0].type != OCONFIG_TYPE_STRING ||
			    child->values[1].type != OCONFIG_TYPE_STRING) {
				WARNING("kafka properties needs string arguments.");
                goto errout;
			}
			if ((key = strdup(child->values[0].value.string)) == NULL) {
				WARNING("cannot allocate memory for attribute key.");
                goto errout;
			}
			if ((val = strdup(child->values[1].value.string)) == NULL) {
				WARNING("cannot allocate memory for attribute value.");
                goto errout;
			}
            ret = rd_kafka_conf_set(conf, key, val, errbuf, sizeof(errbuf));
            if (ret != RD_KAFKA_CONF_OK) {
                WARNING("cannot set kafka property %s to %s: %s",
                        key, val, errbuf);
                goto errout;
            }
			sfree(key);
			sfree(val);
		} else {
			WARNING ("write_kafka plugin: Ignoring unknown "
				 "configuration option \"%s\" at top level.",
				 child->key);
		}
	}
    if (conf != NULL)
        rd_kafka_conf_destroy(conf);
	return (0);
 errout:
    if (conf != NULL)
        rd_kafka_conf_destroy(conf);
    return -1;
} /* }}} int kafka_config */
int main_0089_max_poll_interval (int argc, char **argv) {
        const char *topic = test_mk_topic_name("0089_max_poll_interval", 1);
        uint64_t testid;
        const int msgcnt = 10;
        rd_kafka_t *c[2];
        rd_kafka_conf_t *conf;
        int64_t ts_next[2] = { 0, 0 };
        int64_t ts_exp_msg[2] = { 0, 0 };
        int cmsgcnt = 0;
        int i;
        int bad = -1;

        testid = test_id_generate();

        test_create_topic(topic, 1, 1);

        test_produce_msgs_easy(topic, testid, -1, msgcnt);

        test_conf_init(&conf, NULL, 60);

        test_conf_set(conf, "session.timeout.ms", "6000");
        test_conf_set(conf, "max.poll.interval.ms", "10000" /*10s*/);
        test_conf_set(conf, "auto.offset.reset", "earliest");

        c[0] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
        c[1] = test_create_consumer(topic, NULL, conf, NULL);

        test_consumer_subscribe(c[0], topic);
        test_consumer_subscribe(c[1], topic);

        while (1) {
                for (i = 0 ; i < 2 ; i++) {
                        int64_t now;
                        rd_kafka_message_t *rkm;

                        /* Consumer is "processing" */
                        if (ts_next[i] > test_clock())
                                continue;

                        rkm = rd_kafka_consumer_poll(c[i], 100);
                        if (!rkm)
                                continue;

                        if (rkm->err) {
                                TEST_WARN("Consumer %d error: %s: "
                                          "ignoring\n", i,
                                          rd_kafka_message_errstr(rkm));
                                continue;
                        }

                        now = test_clock();

                        cmsgcnt++;

                        TEST_SAY("Consumer %d received message (#%d) "
                                 "at offset %"PRId64"\n",
                                 i, cmsgcnt, rkm->offset);

                        if (ts_exp_msg[i]) {
                                /* This consumer is expecting a message
                                 * after a certain time, namely after the
                                 * rebalance following max.poll.. being
                                 * exceeded in the other consumer */
                                TEST_ASSERT(now > ts_exp_msg[i],
                                            "Consumer %d: did not expect "
                                            "message for at least %dms",
                                            i,
                                            (int)((ts_exp_msg[i] - now)/1000));
                                TEST_ASSERT(now < ts_exp_msg[i] + 10000*1000,
                                            "Consumer %d: expected message "
                                            "within 10s, not after %dms",
                                            i,
                                            (int)((now - ts_exp_msg[i])/1000));
                                TEST_SAY("Consumer %d: received message "
                                         "at offset %"PRId64
                                         " after rebalance\n",
                                         i, rkm->offset);

                                rd_kafka_message_destroy(rkm);
                                goto done;

                        } else if (cmsgcnt == 1) {
                                /* Process this message for 20s */
                                ts_next[i] = now + (20000 * 1000);

                                /* Exp message on other consumer after
                                 * max.poll.interval.ms */
                                ts_exp_msg[i^1] = now + (10000 * 1000);

                                /* This is the bad consumer */
                                bad = i;

                                TEST_SAY("Consumer %d processing message at "
                                         "offset %"PRId64"\n",
                                         i, rkm->offset);
                                rd_kafka_message_destroy(rkm);
                        } else {
                                rd_kafka_message_destroy(rkm);

                                TEST_FAIL("Consumer %d did not expect "
                                          "a message", i);
                        }
                }
        }

 done:

        TEST_ASSERT(bad != -1, "Bad consumer not set");

        /* Wait for error ERR__MAX_POLL_EXCEEDED on the bad consumer. */
        while (1) {
                rd_kafka_message_t *rkm;

                rkm = rd_kafka_consumer_poll(c[bad], 1000);
                TEST_ASSERT(rkm, "Expected consumer result within 1s");

                TEST_ASSERT(rkm->err, "Did not expect message on bad consumer");

                TEST_SAY("Consumer error: %s: %s\n",
                         rd_kafka_err2name(rkm->err),
                         rd_kafka_message_errstr(rkm));

                if (rkm->err == RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) {
                        rd_kafka_message_destroy(rkm);
                        break;
                }

                rd_kafka_message_destroy(rkm);
        }


        for (i = 0 ; i < 2 ; i++)
                rd_kafka_destroy_flags(c[i],
                                       RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE);
        return 0;
}
Example #11
0
static void kafka_config_topic(rd_kafka_conf_t *conf, oconfig_item_t *ci) /* {{{ */
{
    int                          status;
    int                          i;
    struct kafka_topic_context  *tctx;
    char                        *key = NULL;
    char                        *val;
    char                         callback_name[DATA_MAX_NAME_LEN];
    char                         errbuf[1024];
    user_data_t                  ud;
    oconfig_item_t              *child;
    rd_kafka_conf_res_t          ret;

    if ((tctx = calloc(1, sizeof (*tctx))) == NULL) {
        ERROR ("write_kafka plugin: calloc failed.");
        return;
    }

    tctx->escape_char = '.';
    tctx->store_rates = 1;
    tctx->format = KAFKA_FORMAT_JSON;
    tctx->key = NULL;

    if ((tctx->kafka_conf = rd_kafka_conf_dup(conf)) == NULL) {
        sfree(tctx);
        ERROR("write_kafka plugin: cannot allocate memory for kafka config");
        return;
    }

#ifdef HAVE_LIBRDKAFKA_LOG_CB
    rd_kafka_conf_set_log_cb(tctx->kafka_conf, kafka_log);
#endif

    if ((tctx->conf = rd_kafka_topic_conf_new()) == NULL) {
        rd_kafka_conf_destroy(tctx->kafka_conf);
        sfree(tctx);
        ERROR ("write_kafka plugin: cannot create topic configuration.");
        return;
    }

    if (ci->values_num != 1) {
        WARNING("kafka topic name needed.");
        goto errout;
    }

    if (ci->values[0].type != OCONFIG_TYPE_STRING) {
        WARNING("kafka topic needs a string argument.");
        goto errout;
    }

    if ((tctx->topic_name = strdup(ci->values[0].value.string)) == NULL) {
        ERROR("write_kafka plugin: cannot copy topic name.");
        goto errout;
    }

    for (i = 0; i < ci->children_num; i++) {
        /*
         * The code here could be simplified but makes room
         * for easy adding of new options later on.
         */
        child = &ci->children[i];
        status = 0;

        if (strcasecmp ("Property", child->key) == 0) {
            if (child->values_num != 2) {
                WARNING("kafka properties need both a key and a value.");
                goto errout;
            }
            if (child->values[0].type != OCONFIG_TYPE_STRING ||
                child->values[1].type != OCONFIG_TYPE_STRING) {
                WARNING("kafka properties needs string arguments.");
                goto errout;
            }
            key = child->values[0].value.string;
            val = child->values[1].value.string;
            ret = rd_kafka_topic_conf_set(tctx->conf,key, val,
                                          errbuf, sizeof(errbuf));
            if (ret != RD_KAFKA_CONF_OK) {
                WARNING("cannot set kafka topic property %s to %s: %s.",
                        key, val, errbuf);
                goto errout;
            }

        } else if (strcasecmp ("Key", child->key) == 0)  {
            cf_util_get_string (child, &tctx->key);
            assert (tctx->key != NULL);
        } else if (strcasecmp ("Format", child->key) == 0) {
            status = cf_util_get_string(child, &key);
            if (status != 0)
                goto errout;

            assert(key != NULL);

            if (strcasecmp(key, "Command") == 0) {
                tctx->format = KAFKA_FORMAT_COMMAND;

            } else if (strcasecmp(key, "Graphite") == 0) {
                tctx->format = KAFKA_FORMAT_GRAPHITE;

            } else if (strcasecmp(key, "Json") == 0) {
                tctx->format = KAFKA_FORMAT_JSON;

            } else {
                WARNING ("write_kafka plugin: Invalid format string: %s",
                         key);
            }

            sfree(key);

        } else if (strcasecmp ("StoreRates", child->key) == 0) {
            status = cf_util_get_boolean (child, &tctx->store_rates);
            (void) cf_util_get_flag (child, &tctx->graphite_flags,
                                     GRAPHITE_STORE_RATES);

        } else if (strcasecmp ("GraphiteSeparateInstances", child->key) == 0) {
            status = cf_util_get_flag (child, &tctx->graphite_flags,
                                       GRAPHITE_SEPARATE_INSTANCES);

        } else if (strcasecmp ("GraphiteAlwaysAppendDS", child->key) == 0) {
            status = cf_util_get_flag (child, &tctx->graphite_flags,
                                       GRAPHITE_ALWAYS_APPEND_DS);

        } else if (strcasecmp ("GraphitePrefix", child->key) == 0) {
            status = cf_util_get_string (child, &tctx->prefix);
        } else if (strcasecmp ("GraphitePostfix", child->key) == 0) {
            status = cf_util_get_string (child, &tctx->postfix);
        } else if (strcasecmp ("GraphiteEscapeChar", child->key) == 0) {
            char *tmp_buff = NULL;
            status = cf_util_get_string (child, &tmp_buff);
            if (strlen (tmp_buff) > 1)
                WARNING ("write_kafka plugin: The option \"GraphiteEscapeChar\" handles "
                        "only one character. Others will be ignored.");
            tctx->escape_char = tmp_buff[0];
            sfree (tmp_buff);
        } else {
            WARNING ("write_kafka plugin: Invalid directive: %s.", child->key);
        }

        if (status != 0)
            break;
    }

    rd_kafka_topic_conf_set_partitioner_cb(tctx->conf, kafka_partition);
    rd_kafka_topic_conf_set_opaque(tctx->conf, tctx);

    ssnprintf(callback_name, sizeof(callback_name),
              "write_kafka/%s", tctx->topic_name);

    ud.data = tctx;
    ud.free_func = kafka_topic_context_free;

    status = plugin_register_write (callback_name, kafka_write, &ud);
    if (status != 0) {
        WARNING ("write_kafka plugin: plugin_register_write (\"%s\") "
                "failed with status %i.",
                callback_name, status);
        goto errout;
    }

    pthread_mutex_init (&tctx->lock, /* attr = */ NULL);

    return;
 errout:
    if (tctx->topic_name != NULL)
        free(tctx->topic_name);
    if (tctx->conf != NULL)
        rd_kafka_topic_conf_destroy(tctx->conf);
    if (tctx->kafka_conf != NULL)
        rd_kafka_conf_destroy(tctx->kafka_conf);
    sfree(tctx);
} /* }}} int kafka_config_topic */
Example #12
0
static void do_offset_test (const char *what, int auto_commit, int auto_store,
			    int async) {
	test_timing_t t_all;
	char groupid[64];
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	int cnt = 0;
	const int extra_cnt = 5;
	rd_kafka_resp_err_t err;
	rd_kafka_topic_partition_list_t *parts;
	rd_kafka_topic_partition_t *rktpar;
	int64_t next_offset = -1;

	test_conf_init(&conf, &tconf, 20);
	test_conf_set(conf, "enable.auto.commit", auto_commit ? "true":"false");
	test_conf_set(conf, "enable.auto.offset.store", auto_store ?"true":"false");
	test_conf_set(conf, "auto.commit.interval.ms", "500");
	rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
	test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
	test_str_id_generate(groupid, sizeof(groupid));
	test_conf_set(conf, "group.id", groupid);
	rd_kafka_conf_set_default_topic_conf(conf, tconf);

	TEST_SAY(_C_MAG "[ do_offset_test: %s with group.id %s ]\n",
		 what, groupid);

	TIMING_START(&t_all, what);

	expected_offset  = 0;
	committed_offset = -1;

	/* MO:
	 *  - Create consumer.
	 *  - Start consuming from beginning
	 *  - Perform store & commits according to settings
	 *  - Stop storing&committing when half of the messages are consumed,
	 *  - but consume 5 more to check against.
	 *  - Query position.
	 *  - Destroy consumer.
	 *  - Create new consumer with same group.id using stored offsets
	 *  - Should consume the expected message.
	 */

	/* Create kafka instance */
	rk = test_create_handle(RD_KAFKA_CONSUMER, rd_kafka_conf_dup(conf));

	rd_kafka_poll_set_consumer(rk);

	test_consumer_subscribe(rk, topic);

	while (cnt - extra_cnt < msgcnt / 2) {
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, 10*1000);
		if (!rkm)
			continue;

		if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
			TEST_FAIL("%s: Timed out waiting for message %d", what,cnt);
		else if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			rd_kafka_message_destroy(rkm);
			continue;
		} else if (rkm->err)
			TEST_FAIL("%s: Consumer error: %s",
				  what, rd_kafka_message_errstr(rkm));

		/* Offset of next message. */
		next_offset = rkm->offset + 1;

		if (cnt < msgcnt / 2) {
			if (!auto_store) {
				err = rd_kafka_offset_store(rkm->rkt,rkm->partition,
							    rkm->offset);
				if (err)
					TEST_FAIL("%s: offset_store failed: %s\n",
						  what, rd_kafka_err2str(err));
			}
			expected_offset = rkm->offset+1;
			if (!auto_commit) {
				test_timing_t t_commit;
				TIMING_START(&t_commit,
					     async?"commit.async":"commit.sync");
				err = rd_kafka_commit_message(rk, rkm, async);
				TIMING_STOP(&t_commit);
				if (err)
					TEST_FAIL("%s: commit failed: %s\n",
						  what, rd_kafka_err2str(err));
			}

		} else if (auto_store && auto_commit)
			expected_offset = rkm->offset+1;

		rd_kafka_message_destroy(rkm);
		cnt++;
	}

	TEST_SAY("%s: done consuming after %d messages, at offset %"PRId64"\n",
		 what, cnt, expected_offset);

	if ((err = rd_kafka_assignment(rk, &parts)))
		TEST_FAIL("%s: failed to get assignment(): %s\n",
			  what, rd_kafka_err2str(err));

	/* Verify position */
	if ((err = rd_kafka_position(rk, parts)))
		TEST_FAIL("%s: failed to get position(): %s\n",
			  what, rd_kafka_err2str(err));
	if (!(rktpar = rd_kafka_topic_partition_list_find(parts,
							  topic, partition)))
		TEST_FAIL("%s: position(): topic lost\n", what);
	if (rktpar->offset != next_offset)
		TEST_FAIL("%s: Expected position() offset %"PRId64", got %"PRId64,
			  what, next_offset, rktpar->offset);
	TEST_SAY("%s: Position is at %"PRId64", good!\n",
		 what, rktpar->offset);

	/* Pause messages while waiting so we can serve callbacks
	 * without having more messages received. */
	if ((err = rd_kafka_pause_partitions(rk, parts)))
		TEST_FAIL("%s: failed to pause partitions: %s\n",
			  what, rd_kafka_err2str(err));
	rd_kafka_topic_partition_list_destroy(parts);

	/* Fire off any enqueued offset_commit_cb */
	test_consumer_poll_no_msgs(what, rk, testid, 0);

	TEST_SAY("%s: committed_offset %"PRId64", expected_offset %"PRId64"\n",
		 what, committed_offset, expected_offset);

	if (!auto_commit && !async) {
		/* Sync commits should be up to date at this point. */
		if (committed_offset != expected_offset)
			TEST_FAIL("%s: Sync commit: committed offset %"PRId64
				  " should be same as expected offset "
				  "%"PRId64,
				  what, committed_offset, expected_offset);
	} else {

		/* Wait for offset commits to catch up */
		while (committed_offset < expected_offset) {
			TEST_SAYL(3, "%s: Wait for committed offset %"PRId64
				  " to reach expected offset %"PRId64"\n",
				  what, committed_offset, expected_offset);
			test_consumer_poll_no_msgs(what, rk, testid, 1000);
		}

	}

	TEST_SAY("%s: phase 1 complete, %d messages consumed, "
		 "next expected offset is %"PRId64"\n",
		 what, cnt, expected_offset);

        /* Issue #827: cause committed() to return prematurely by specifying
         *             low timeout. The bug (use after free) will only
         *             be catched by valgrind. */
        do {
                parts = rd_kafka_topic_partition_list_new(1);
                rd_kafka_topic_partition_list_add(parts, topic, partition);
                err = rd_kafka_committed(rk, parts, 1);
                rd_kafka_topic_partition_list_destroy(parts);
                TEST_SAY("Issue #827: committed() returned %s\n",
                         rd_kafka_err2str(err));
        } while (err != RD_KAFKA_RESP_ERR__TIMED_OUT);

	/* Query position */
	parts = rd_kafka_topic_partition_list_new(1);
	rd_kafka_topic_partition_list_add(parts, topic, partition);

	err = rd_kafka_committed(rk, parts, tmout_multip(5*1000));
	if (err)
		TEST_FAIL("%s: committed() failed: %s", what, rd_kafka_err2str(err));
	if (!(rktpar = rd_kafka_topic_partition_list_find(parts,
							  topic, partition)))
		TEST_FAIL("%s: committed(): topic lost\n", what);
	if (rktpar->offset != expected_offset)
		TEST_FAIL("%s: Expected committed() offset %"PRId64", got %"PRId64,
			  what, expected_offset, rktpar->offset);
	TEST_SAY("%s: Committed offset is at %"PRId64", good!\n",
		 what, rktpar->offset);

	rd_kafka_topic_partition_list_destroy(parts);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);



	/* Fire up a new consumer and continue from where we left off. */
	TEST_SAY("%s: phase 2: starting new consumer to resume consumption\n",what);
	rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
	rd_kafka_poll_set_consumer(rk);

	test_consumer_subscribe(rk, topic);

	while (cnt < msgcnt) {
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, 10*1000);
		if (!rkm)
			continue;

		if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
			TEST_FAIL("%s: Timed out waiting for message %d", what,cnt);
		else if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			rd_kafka_message_destroy(rkm);
			continue;
		} else if (rkm->err)
			TEST_FAIL("%s: Consumer error: %s",
				  what, rd_kafka_message_errstr(rkm));

		if (rkm->offset != expected_offset)
			TEST_FAIL("%s: Received message offset %"PRId64
				  ", expected %"PRId64" at msgcnt %d/%d\n",
				  what, rkm->offset, expected_offset,
				  cnt, msgcnt);

		rd_kafka_message_destroy(rkm);
		expected_offset++;
		cnt++;
	}


	TEST_SAY("%s: phase 2: complete\n", what);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	

	TIMING_STOP(&t_all);
}
Example #13
0
int main_0006_symbols (int argc, char **argv) {

        if (argc < 0 /* always false */) {
                rd_kafka_version();
                rd_kafka_version_str();
		rd_kafka_get_debug_contexts();
		rd_kafka_get_err_descs(NULL, NULL);
                rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR);
		rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR);
                rd_kafka_errno2err(EINVAL);
                rd_kafka_errno();
		rd_kafka_last_error();
                rd_kafka_conf_new();
                rd_kafka_conf_destroy(NULL);
                rd_kafka_conf_dup(NULL);
                rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_conf_set_dr_cb(NULL, NULL);
                rd_kafka_conf_set_dr_msg_cb(NULL, NULL);
                rd_kafka_conf_set_error_cb(NULL, NULL);
                rd_kafka_conf_set_stats_cb(NULL, NULL);
                rd_kafka_conf_set_log_cb(NULL, NULL);
                rd_kafka_conf_set_socket_cb(NULL, NULL);
		rd_kafka_conf_set_rebalance_cb(NULL, NULL);
		rd_kafka_conf_set_offset_commit_cb(NULL, NULL);
		rd_kafka_conf_set_throttle_cb(NULL, NULL);
		rd_kafka_conf_set_default_topic_conf(NULL, NULL);
		rd_kafka_conf_get(NULL, NULL, NULL, NULL);
#ifndef _MSC_VER
		rd_kafka_conf_set_open_cb(NULL, NULL);
#endif
		rd_kafka_conf_set_opaque(NULL, NULL);
                rd_kafka_opaque(NULL);
                rd_kafka_conf_dump(NULL, NULL);
                rd_kafka_topic_conf_dump(NULL, NULL);
                rd_kafka_conf_dump_free(NULL, 0);
                rd_kafka_conf_properties_show(NULL);
                rd_kafka_topic_conf_new();
                rd_kafka_topic_conf_dup(NULL);
                rd_kafka_topic_conf_destroy(NULL);
                rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_topic_conf_set_opaque(NULL, NULL);
		rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL);
                rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL);
                rd_kafka_topic_partition_available(NULL, 0);
		rd_kafka_topic_opaque(NULL);
                rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_new(0, NULL, NULL, 0);
                rd_kafka_destroy(NULL);
                rd_kafka_name(NULL);
		rd_kafka_memberid(NULL);
                rd_kafka_topic_new(NULL, NULL, NULL);
                rd_kafka_topic_destroy(NULL);
                rd_kafka_topic_name(NULL);
                rd_kafka_message_destroy(NULL);
                rd_kafka_message_errstr(NULL);
		rd_kafka_message_timestamp(NULL, NULL);
                rd_kafka_consume_start(NULL, 0, 0);
                rd_kafka_consume_stop(NULL, 0);
                rd_kafka_consume(NULL, 0, 0);
                rd_kafka_consume_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL);
                rd_kafka_offset_store(NULL, 0, 0);
                rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL);
                rd_kafka_produce_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_poll(NULL, 0);
                rd_kafka_brokers_add(NULL, NULL);
                /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */
                rd_kafka_set_log_level(NULL, 0);
                rd_kafka_log_print(NULL, 0, NULL, NULL);
#ifndef _MSC_VER
                rd_kafka_log_syslog(NULL, 0, NULL, NULL);
#endif
                rd_kafka_outq_len(NULL);
                rd_kafka_dump(NULL, NULL);
                rd_kafka_thread_cnt();
                rd_kafka_wait_destroyed(0);
                rd_kafka_metadata(NULL, 0, NULL, NULL, 0);
                rd_kafka_metadata_destroy(NULL);
                rd_kafka_queue_destroy(NULL);
                rd_kafka_consume_start_queue(NULL, 0, 0, NULL);
                rd_kafka_consume_queue(NULL, 0);
                rd_kafka_consume_batch_queue(NULL, 0, NULL, 0);
                rd_kafka_consume_callback_queue(NULL, 0, NULL, NULL);
                rd_kafka_seek(NULL, 0, 0, 0);
                rd_kafka_yield(NULL);
                rd_kafka_mem_free(NULL, NULL);
                rd_kafka_list_groups(NULL, NULL, NULL, 0);
                rd_kafka_group_list_destroy(NULL);

		/* KafkaConsumer API */
		rd_kafka_subscribe(NULL, NULL);
		rd_kafka_unsubscribe(NULL);
		rd_kafka_subscription(NULL, NULL);
		rd_kafka_consumer_poll(NULL, 0);
		rd_kafka_consumer_close(NULL);
		rd_kafka_assign(NULL, NULL);
		rd_kafka_assignment(NULL, NULL);
		rd_kafka_commit(NULL, NULL, 0);
		rd_kafka_commit_message(NULL, NULL, 0);
                rd_kafka_committed(NULL, NULL, 0);
		rd_kafka_position(NULL, NULL);

		/* TopicPartition */
		rd_kafka_topic_partition_list_new(0);
		rd_kafka_topic_partition_list_destroy(NULL);
		rd_kafka_topic_partition_list_add(NULL, NULL, 0);
		rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0);
		rd_kafka_topic_partition_list_del(NULL, NULL, 0);
		rd_kafka_topic_partition_list_del_by_idx(NULL, 0);
		rd_kafka_topic_partition_list_copy(NULL);
		rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0);
		rd_kafka_topic_partition_list_find(NULL, NULL, 0);
		rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0);
		rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL);
        }


	return 0;
}