コード例 #1
0
ファイル: kafka_common.c プロジェクト: jrossi/pmacct-1
/* Functions */
void p_kafka_init_host(struct p_kafka_host *kafka_host, char *config_file)
{
  if (kafka_host) {
    memset(kafka_host, 0, sizeof(struct p_kafka_host));
    P_broker_timers_set_retry_interval(&kafka_host->btimers, PM_KAFKA_DEFAULT_RETRY);
    p_kafka_set_config_file(kafka_host, config_file);

    kafka_host->cfg = rd_kafka_conf_new();
    if (kafka_host->cfg) {
      rd_kafka_conf_set_log_cb(kafka_host->cfg, p_kafka_logger);
      rd_kafka_conf_set_error_cb(kafka_host->cfg, p_kafka_msg_error);
      rd_kafka_conf_set_dr_cb(kafka_host->cfg, p_kafka_msg_delivered);
      rd_kafka_conf_set_opaque(kafka_host->cfg, kafka_host);
      p_kafka_apply_global_config(kafka_host);

      if (config.debug) {
	const char **res;
	size_t res_len, idx;

	res = rd_kafka_conf_dump(kafka_host->cfg, &res_len);
	for (idx = 0; idx < res_len; idx += 2)
	  Log(LOG_DEBUG, "DEBUG ( %s/%s ): librdkafka global config: %s = %s\n", config.name, config.type, res[idx], res[idx + 1]);

	rd_kafka_conf_dump_free(res, res_len);
      }
    }
  }
}
コード例 #2
0
static PyObject *
Producer_start(RdkHandle *self, PyObject *args, PyObject *kwds)
{
    if (RdkHandle_excl_lock(self)) return NULL;

    char *keywords[] = {"brokers", "topic_name", "delivery_put", NULL};
    PyObject *brokers = NULL;
    PyObject *topic_name = NULL;
    PyObject *delivery_put = NULL;
    if (! PyArg_ParseTupleAndKeywords(
        args, kwds, "SSO", keywords, &brokers, &topic_name, &delivery_put)) {
        goto failed;
    }

    /* Configure delivery-reporting */
    if (! self->rdk_conf) {
        set_pykafka_error("RdKafkaException",
                          "Please run configure() before starting.");
        goto failed;
    }
    rd_kafka_conf_set_dr_msg_cb(self->rdk_conf,
                                Producer_delivery_report_callback);
    Py_INCREF(delivery_put);
    rd_kafka_conf_set_opaque(self->rdk_conf, delivery_put);

    if (RdkHandle_unlock(self)) return NULL;
    return RdkHandle_start(
            self,
            RD_KAFKA_PRODUCER,
            PyBytes_AS_STRING(brokers),
            PyBytes_AS_STRING(topic_name));
failed:
    RdkHandle_unlock(self);
    return NULL;
}
コード例 #3
0
ファイル: 0006-symbols.c プロジェクト: blblack/librdkafka
int main (int argc, char **argv) {

        if (argc < 0 /* always false */) {
                rd_kafka_version();
                rd_kafka_version_str();
                rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR);
                rd_kafka_errno2err(EINVAL);
                rd_kafka_conf_new();
                rd_kafka_conf_destroy(NULL);
                rd_kafka_conf_dup(NULL);
                rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_conf_set_dr_cb(NULL, NULL);
                rd_kafka_conf_set_error_cb(NULL, NULL);
                rd_kafka_conf_set_stats_cb(NULL, NULL);
                rd_kafka_conf_set_opaque(NULL, NULL);
                rd_kafka_conf_dump(NULL, NULL);
                rd_kafka_topic_conf_dump(NULL, NULL);
                rd_kafka_conf_dump_free(NULL, 0);
                rd_kafka_conf_properties_show(NULL);
                rd_kafka_topic_conf_new();
                rd_kafka_topic_conf_dup(NULL);
                rd_kafka_topic_conf_destroy(NULL);
                rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_topic_conf_set_opaque(NULL, NULL);
                rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL);
                rd_kafka_topic_partition_available(NULL, 0);
                rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_new(0, NULL, NULL, 0);
                rd_kafka_destroy(NULL);
                rd_kafka_name(NULL);
                rd_kafka_topic_new(NULL, NULL, NULL);
                rd_kafka_topic_destroy(NULL);
                rd_kafka_topic_name(NULL);
                rd_kafka_message_destroy(NULL);
                rd_kafka_message_errstr(NULL);
                rd_kafka_consume_start(NULL, 0, 0);
                rd_kafka_consume_stop(NULL, 0);
                rd_kafka_consume(NULL, 0, 0);
                rd_kafka_consume_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL);
                rd_kafka_offset_store(NULL, 0, 0);
                rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL);
                rd_kafka_poll(NULL, 0);
                rd_kafka_brokers_add(NULL, NULL);
                rd_kafka_set_logger(NULL, NULL);
                rd_kafka_set_log_level(NULL, 0);
                rd_kafka_log_print(NULL, 0, NULL, NULL);
                rd_kafka_log_syslog(NULL, 0, NULL, NULL);
                rd_kafka_outq_len(NULL);
                rd_kafka_dump(NULL, NULL);
                rd_kafka_thread_cnt();
                rd_kafka_wait_destroyed(0);
        }


	return 0;
}
コード例 #4
0
ファイル: hermann_lib.c プロジェクト: pocman/hermann
/**
 *  producer_init_kafka
 *
 *  Initialize the producer instance, setting up the Kafka topic and context.
 *
 *  @param  self    VALUE Instance of the Producer Ruby object
 *  @param  config  HermannInstanceConfig*  the instance configuration associated with this producer.
 */
void producer_init_kafka(VALUE self, HermannInstanceConfig* config) {

	TRACER("initing (%p)\n", config);

	config->quiet = !isatty(STDIN_FILENO);

	/* Kafka configuration */
	config->conf = rd_kafka_conf_new();


	/* Add our `self` to the opaque pointer for error and logging callbacks
	 */
	rd_kafka_conf_set_opaque(config->conf, (void*)config);
	rd_kafka_conf_set_error_cb(config->conf, producer_error_callback);

	/* Topic configuration */
	config->topic_conf = rd_kafka_topic_conf_new();

	/* Set up a message delivery report callback.
	 * It will be called once for each message, either on successful
	 * delivery to broker, or upon failure to deliver to broker. */
	rd_kafka_conf_set_dr_msg_cb(config->conf, msg_delivered);

	/* Create Kafka handle */
	if (!(config->rk = rd_kafka_new(RD_KAFKA_PRODUCER,
									config->conf,
									config->errstr,
									sizeof(config->errstr)))) {
		/* TODO: Use proper logger */
		fprintf(stderr,
		"%% Failed to create new producer: %s\n", config->errstr);
		rb_raise(rb_eRuntimeError, "%% Failed to create new producer: %s\n", config->errstr);
	}

	/* Set logger */
	rd_kafka_set_logger(config->rk, logger);
	rd_kafka_set_log_level(config->rk, LOG_DEBUG);

	if (rd_kafka_brokers_add(config->rk, config->brokers) == 0) {
		/* TODO: Use proper logger */
		fprintf(stderr, "%% No valid brokers specified\n");
		rb_raise(rb_eRuntimeError, "No valid brokers specified");
		return;
	}

	/* Create topic */
	config->rkt = rd_kafka_topic_new(config->rk, config->topic, config->topic_conf);

	/* Set the partitioner callback */
	rd_kafka_topic_conf_set_partitioner_cb( config->topic_conf, producer_partitioner_callback);

	/* We're now initialized */
	config->isInitialized = 1;

	TRACER("completed kafka init\n");
}
コード例 #5
0
ファイル: 0025-timers.c プロジェクト: sfdazsdf/librdkafka
/**
 * Enable statistics with a set interval, make sure the stats callbacks are
 * called within reasonable intervals.
 */
static void do_test_stats_timer (void) {
    rd_kafka_t *rk;
    rd_kafka_conf_t *conf;
    const int exp_calls = 10;
    char errstr[512];
    struct state state;
    test_timing_t t_new;

    memset(&state, 0, sizeof(state));

    state.interval = 600*1000;

    test_conf_init(&conf, NULL, 200);

    test_conf_set(conf, "statistics.interval.ms", "600");
    rd_kafka_conf_set_stats_cb(conf, stats_cb);
    rd_kafka_conf_set_opaque(conf, &state);


    TIMING_START(&t_new, "rd_kafka_new()");
    rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
    TIMING_STOP(&t_new);
    if (!rk)
        TEST_FAIL("Failed to create instance: %s\n", errstr);

    TEST_SAY("Starting wait loop for %d expected stats_cb calls "
             "with an interval of %dms\n",
             exp_calls, state.interval/1000);


    while (state.calls < exp_calls) {
        test_timing_t t_poll;
        TIMING_START(&t_poll, "rd_kafka_poll()");
        rd_kafka_poll(rk, 100);
        TIMING_STOP(&t_poll);

        if (TIMING_DURATION(&t_poll) > 150*1000)
            TEST_WARN("rd_kafka_poll(rk,100) "
                      "took more than 50%% extra\n");
    }

    rd_kafka_destroy(rk);

    if (state.calls > exp_calls)
        TEST_SAY("Got more calls than expected: %d > %d\n",
                 state.calls, exp_calls);

    if (state.fails)
        TEST_FAIL("%d/%d intervals failed\n", state.fails, state.calls);
    else
        TEST_SAY("All %d intervals okay\n", state.calls);
}
コード例 #6
0
ファイル: kafka_common.c プロジェクト: Rosiak/pmacct
/* Functions */
void p_kafka_init_host(struct p_kafka_host *kafka_host)
{
  if (kafka_host) {
    memset(kafka_host, 0, sizeof(struct p_kafka_host));
    P_broker_timers_set_retry_interval(&kafka_host->btimers, PM_KAFKA_DEFAULT_RETRY);

    kafka_host->cfg = rd_kafka_conf_new();
    if (kafka_host->cfg) {
      rd_kafka_conf_set_log_cb(kafka_host->cfg, p_kafka_logger);
      rd_kafka_conf_set_error_cb(kafka_host->cfg, p_kafka_msg_error);
      rd_kafka_conf_set_dr_cb(kafka_host->cfg, p_kafka_msg_delivered);
      rd_kafka_conf_set_opaque(kafka_host->cfg, kafka_host);
    }
  }
}
コード例 #7
0
ファイル: HandleImpl.cpp プロジェクト: edenhill/librdkafka
void RdKafka::HandleImpl::set_common_config (RdKafka::ConfImpl *confimpl) {

    rd_kafka_conf_set_opaque(confimpl->rk_conf_, this);

    if (confimpl->event_cb_) {
        rd_kafka_conf_set_log_cb(confimpl->rk_conf_,
                                 RdKafka::log_cb_trampoline);
        rd_kafka_conf_set_error_cb(confimpl->rk_conf_,
                                   RdKafka::error_cb_trampoline);
        rd_kafka_conf_set_throttle_cb(confimpl->rk_conf_,
                                      RdKafka::throttle_cb_trampoline);
        rd_kafka_conf_set_stats_cb(confimpl->rk_conf_,
                                   RdKafka::stats_cb_trampoline);
        event_cb_ = confimpl->event_cb_;
    }

    if (confimpl->socket_cb_) {
        rd_kafka_conf_set_socket_cb(confimpl->rk_conf_,
                                    RdKafka::socket_cb_trampoline);
        socket_cb_ = confimpl->socket_cb_;
    }

    if (confimpl->open_cb_) {
#ifndef _MSC_VER
        rd_kafka_conf_set_open_cb(confimpl->rk_conf_, RdKafka::open_cb_trampoline);
        open_cb_ = confimpl->open_cb_;
#endif
    }

    if (confimpl->rebalance_cb_) {
        rd_kafka_conf_set_rebalance_cb(confimpl->rk_conf_,
                                       RdKafka::rebalance_cb_trampoline);
        rebalance_cb_ = confimpl->rebalance_cb_;
    }

    if (confimpl->offset_commit_cb_) {
        rd_kafka_conf_set_offset_commit_cb(confimpl->rk_conf_,
                                           RdKafka::offset_commit_cb_trampoline);
        offset_commit_cb_ = confimpl->offset_commit_cb_;
    }

    if (confimpl->consume_cb_) {
        rd_kafka_conf_set_consume_cb(confimpl->rk_conf_,
                                     RdKafka::consume_cb_trampoline);
        consume_cb_ = confimpl->consume_cb_;
    }

}
コード例 #8
0
ファイル: test-1.c プロジェクト: antoniocorreia/cprojects
rd_kafka_t *test_create_consumer (const char *group_id,
				  void (*rebalance_cb) (
					  rd_kafka_t *rk,
					  rd_kafka_resp_err_t err,
					  rd_kafka_topic_partition_list_t
					  *partitions,
					  void *opaque),
                                  rd_kafka_topic_conf_t *default_topic_conf,
				  void *opaque) {
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	char errstr[512];
	char tmp[64];

	test_conf_init(&conf, NULL, 20);

        if (group_id) {
                if (rd_kafka_conf_set(conf, "group.id", group_id,
                                      errstr, sizeof(errstr)) !=
                    RD_KAFKA_CONF_OK)
                        TEST_FAIL("Conf failed: %s\n", errstr);
        }

	rd_snprintf(tmp, sizeof(tmp), "%d", test_session_timeout_ms);
	if (rd_kafka_conf_set(conf, "session.timeout.ms", tmp,
			      errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
		TEST_FAIL("Conf failed: %s\n", errstr);

	rd_kafka_conf_set_opaque(conf, opaque);

	if (rebalance_cb)
		rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);

        if (default_topic_conf)
                rd_kafka_conf_set_default_topic_conf(conf, default_topic_conf);

	/* Create kafka instance */
	rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
	if (!rk)
		TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr);

	TEST_SAY("Created    kafka instance %s\n", rd_kafka_name(rk));

	return rk;
}
コード例 #9
0
ファイル: 0093-holb.c プロジェクト: Whissi/librdkafka
int main_0093_holb_consumer (int argc, char **argv) {
        const char *topic = test_mk_topic_name("0093_holb_consumer", 1);
        int64_t testid;
        const int msgcnt = 100;
        struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT;
        rd_kafka_conf_t *conf;

        testid = test_id_generate();

        test_conf_init(&conf, NULL, 60);

        test_create_topic(topic, 1, 1);

        test_produce_msgs_easy(topic, testid, 0, msgcnt);

        test_conf_set(conf, "session.timeout.ms", "6000");
        test_conf_set(conf, "max.poll.interval.ms", "20000");
        test_conf_set(conf, "socket.timeout.ms", "3000");
        test_conf_set(conf, "auto.offset.reset", "earliest");
        /* Trigger other requests often */
        test_conf_set(conf, "topic.metadata.refresh.interval.ms", "500");
        rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);

        rd_kafka_conf_set_opaque(conf, &c[0]);
        c[0].rk = test_create_consumer(topic, NULL,
                                       rd_kafka_conf_dup(conf), NULL);

        rd_kafka_conf_set_opaque(conf, &c[1]);
        c[1].rk = test_create_consumer(topic, NULL, conf, NULL);

        test_consumer_subscribe(c[0].rk, topic);

        /* c0: assign */
        c[0].max_rebalance_cnt = 1;

        /* c1: none, hasn't joined yet */
        c[1].max_rebalance_cnt = 0;

        TEST_SAY("Waiting for c[0] assignment\n");
        while (1) {
                rd_kafka_topic_partition_list_t *parts = NULL;

                do_consume(&c[0], 1/*1s*/);

                if (rd_kafka_assignment(c[0].rk, &parts) !=
                    RD_KAFKA_RESP_ERR_NO_ERROR ||
                    !parts || parts->cnt == 0) {
                        if (parts)
                                rd_kafka_topic_partition_list_destroy(parts);
                        continue;
                }

                TEST_SAY("%s got assignment of %d partition(s)\n",
                         rd_kafka_name(c[0].rk), parts->cnt);
                rd_kafka_topic_partition_list_destroy(parts);
                break;
        }

        TEST_SAY("c[0] got assignment, consuming..\n");
        do_consume(&c[0], 5/*5s*/);

        TEST_SAY("Joining second consumer\n");
        test_consumer_subscribe(c[1].rk, topic);

        /* Just poll second consumer for 10s, the rebalance will not
         * finish until the first consumer polls */
        do_consume(&c[1], 10/*10s*/);

        /* c0: the next call to do_consume/poll will trigger
         *     its rebalance callback, first revoke then assign. */
        c[0].max_rebalance_cnt += 2;
        /* c1: first rebalance */
        c[1].max_rebalance_cnt++;

        TEST_SAY("Expected rebalances: c[0]: %d/%d, c[1]: %d/%d\n",
                 c[0].rebalance_cnt, c[0].max_rebalance_cnt,
                 c[1].rebalance_cnt, c[1].max_rebalance_cnt);

        /* Let rebalances kick in, then consume messages. */
        while (c[0].cnt + c[1].cnt < msgcnt) {
                do_consume(&c[0], 0);
                do_consume(&c[1], 0);
        }

        /* Allow the extra revoke rebalance on close() */
        c[0].max_rebalance_cnt++;
        c[1].max_rebalance_cnt++;

        test_consumer_close(c[0].rk);
        test_consumer_close(c[1].rk);

        rd_kafka_destroy(c[0].rk);
        rd_kafka_destroy(c[1].rk);

        return 0;
}
コード例 #10
0
/* Produce a batch of messages to a single partition. */
static void test_single_partition (void) {
	int partition = 0;
	int r;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *topic_conf;
	char msg[128];
	int msgcnt = 100000;
	int failcnt = 0;
	int i;
        rd_kafka_message_t *rkmessages;
        int msgcounter = 0;

        msgid_next = 0;

	test_conf_init(&conf, &topic_conf, 20);

	/* Set delivery report callback */
	rd_kafka_conf_set_dr_cb(conf, dr_single_partition_cb);
        rd_kafka_conf_set_opaque(conf, &msgcounter);

	/* Create kafka instance */
	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

	TEST_SAY("test_single_partition: Created kafka instance %s\n",
		 rd_kafka_name(rk));

	rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0),
                                 topic_conf);
	if (!rkt)
		TEST_FAIL("Failed to create topic: %s\n",
			  rd_strerror(errno));

        /* Create messages */
        rkmessages = calloc(sizeof(*rkmessages), msgcnt);
	for (i = 0 ; i < msgcnt ; i++) {
		int *msgidp = malloc(sizeof(*msgidp));
		*msgidp = i;
		rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i",
                         __FILE__, __FUNCTION__, i);

                rkmessages[i].payload  = rd_strdup(msg);
                rkmessages[i].len      = strlen(msg);
                rkmessages[i]._private = msgidp;
        }

        r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE,
                                   rkmessages, msgcnt);

        /* Scan through messages to check for errors. */
        for (i = 0 ; i < msgcnt ; i++) {
                if (rkmessages[i].err) {
                        failcnt++;
                        if (failcnt < 100)
                                TEST_SAY("Message #%i failed: %s\n",
                                         i,
                                         rd_kafka_err2str(rkmessages[i].err));
                }
        }

        /* All messages should've been produced. */
        if (r < msgcnt) {
                TEST_SAY("Not all messages were accepted "
                         "by produce_batch(): %i < %i\n", r, msgcnt);
                if (msgcnt - r != failcnt)
                        TEST_SAY("Discrepency between failed messages (%i) "
                                 "and return value %i (%i - %i)\n",
                                 failcnt, msgcnt - r, msgcnt, r);
                TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt);
        }

        free(rkmessages);
	TEST_SAY("Single partition: "
                 "Produced %i messages, waiting for deliveries\n", r);

        msgcounter = msgcnt;

	/* Wait for messages to be delivered */
        test_wait_delivery(rk, &msgcounter);

	if (fails)
		TEST_FAIL("%i failures, see previous errors", fails);

	if (msgid_next != msgcnt)
		TEST_FAIL("Still waiting for messages: next %i != end %i\n",
			  msgid_next, msgcnt);

	/* Destroy topic */
	rd_kafka_topic_destroy(rkt);

	/* Destroy rdkafka instance */
	TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
	rd_kafka_destroy(rk);

	return;
}
コード例 #11
0
ファイル: 0006-symbols.c プロジェクト: BDeus/librdkafka
int main_0006_symbols (int argc, char **argv) {

        if (argc < 0 /* always false */) {
                rd_kafka_version();
                rd_kafka_version_str();
		rd_kafka_get_debug_contexts();
		rd_kafka_get_err_descs(NULL, NULL);
                rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR);
		rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR);
                rd_kafka_errno2err(EINVAL);
                rd_kafka_errno();
		rd_kafka_last_error();
                rd_kafka_conf_new();
                rd_kafka_conf_destroy(NULL);
                rd_kafka_conf_dup(NULL);
                rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_conf_set_dr_cb(NULL, NULL);
                rd_kafka_conf_set_dr_msg_cb(NULL, NULL);
                rd_kafka_conf_set_error_cb(NULL, NULL);
                rd_kafka_conf_set_stats_cb(NULL, NULL);
                rd_kafka_conf_set_log_cb(NULL, NULL);
                rd_kafka_conf_set_socket_cb(NULL, NULL);
		rd_kafka_conf_set_rebalance_cb(NULL, NULL);
		rd_kafka_conf_set_offset_commit_cb(NULL, NULL);
		rd_kafka_conf_set_throttle_cb(NULL, NULL);
		rd_kafka_conf_set_default_topic_conf(NULL, NULL);
		rd_kafka_conf_get(NULL, NULL, NULL, NULL);
#ifndef _MSC_VER
		rd_kafka_conf_set_open_cb(NULL, NULL);
#endif
		rd_kafka_conf_set_opaque(NULL, NULL);
                rd_kafka_opaque(NULL);
                rd_kafka_conf_dump(NULL, NULL);
                rd_kafka_topic_conf_dump(NULL, NULL);
                rd_kafka_conf_dump_free(NULL, 0);
                rd_kafka_conf_properties_show(NULL);
                rd_kafka_topic_conf_new();
                rd_kafka_topic_conf_dup(NULL);
                rd_kafka_topic_conf_destroy(NULL);
                rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_topic_conf_set_opaque(NULL, NULL);
		rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL);
                rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL);
                rd_kafka_topic_partition_available(NULL, 0);
		rd_kafka_topic_opaque(NULL);
                rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_new(0, NULL, NULL, 0);
                rd_kafka_destroy(NULL);
                rd_kafka_name(NULL);
		rd_kafka_memberid(NULL);
                rd_kafka_topic_new(NULL, NULL, NULL);
                rd_kafka_topic_destroy(NULL);
                rd_kafka_topic_name(NULL);
                rd_kafka_message_destroy(NULL);
                rd_kafka_message_errstr(NULL);
		rd_kafka_message_timestamp(NULL, NULL);
                rd_kafka_consume_start(NULL, 0, 0);
                rd_kafka_consume_stop(NULL, 0);
                rd_kafka_consume(NULL, 0, 0);
                rd_kafka_consume_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL);
                rd_kafka_offset_store(NULL, 0, 0);
                rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL);
                rd_kafka_produce_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_poll(NULL, 0);
                rd_kafka_brokers_add(NULL, NULL);
                /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */
                rd_kafka_set_log_level(NULL, 0);
                rd_kafka_log_print(NULL, 0, NULL, NULL);
#ifndef _MSC_VER
                rd_kafka_log_syslog(NULL, 0, NULL, NULL);
#endif
                rd_kafka_outq_len(NULL);
                rd_kafka_dump(NULL, NULL);
                rd_kafka_thread_cnt();
                rd_kafka_wait_destroyed(0);
                rd_kafka_metadata(NULL, 0, NULL, NULL, 0);
                rd_kafka_metadata_destroy(NULL);
                rd_kafka_queue_destroy(NULL);
                rd_kafka_consume_start_queue(NULL, 0, 0, NULL);
                rd_kafka_consume_queue(NULL, 0);
                rd_kafka_consume_batch_queue(NULL, 0, NULL, 0);
                rd_kafka_consume_callback_queue(NULL, 0, NULL, NULL);
                rd_kafka_seek(NULL, 0, 0, 0);
                rd_kafka_yield(NULL);
                rd_kafka_mem_free(NULL, NULL);
                rd_kafka_list_groups(NULL, NULL, NULL, 0);
                rd_kafka_group_list_destroy(NULL);

		/* KafkaConsumer API */
		rd_kafka_subscribe(NULL, NULL);
		rd_kafka_unsubscribe(NULL);
		rd_kafka_subscription(NULL, NULL);
		rd_kafka_consumer_poll(NULL, 0);
		rd_kafka_consumer_close(NULL);
		rd_kafka_assign(NULL, NULL);
		rd_kafka_assignment(NULL, NULL);
		rd_kafka_commit(NULL, NULL, 0);
		rd_kafka_commit_message(NULL, NULL, 0);
                rd_kafka_committed(NULL, NULL, 0);
		rd_kafka_position(NULL, NULL);

		/* TopicPartition */
		rd_kafka_topic_partition_list_new(0);
		rd_kafka_topic_partition_list_destroy(NULL);
		rd_kafka_topic_partition_list_add(NULL, NULL, 0);
		rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0);
		rd_kafka_topic_partition_list_del(NULL, NULL, 0);
		rd_kafka_topic_partition_list_del_by_idx(NULL, 0);
		rd_kafka_topic_partition_list_copy(NULL);
		rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0);
		rd_kafka_topic_partition_list_find(NULL, NULL, 0);
		rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0);
		rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL);
        }


	return 0;
}