static void rebalance_cb (rd_kafka_t *rk,
                          rd_kafka_resp_err_t err,
			  rd_kafka_topic_partition_list_t *partitions,
                          void *opaque) {

	fprintf(stderr, "%% Consumer group rebalanced: ");

	switch (err)
	{
	case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
		fprintf(stderr, "assigned:\n");
		print_partition_list(stderr, 1, partitions);
		rd_kafka_assign(rk, partitions);
		break;

	case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
		fprintf(stderr, "revoked:\n");
		print_partition_list(stderr, 0, partitions);
		rd_kafka_assign(rk, NULL);
		break;

	default:
		fprintf(stderr, "failed: %s\n",
                        rd_kafka_err2str(err));
		break;
	}
}
Пример #2
0
static void rebalance_cb (rd_kafka_t *rk,
			  rd_kafka_resp_err_t err,
			  rd_kafka_topic_partition_list_t *partitions,
			  void *opaque) {
        char *memberid = rd_kafka_memberid(rk);

	TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n",
		 rd_kafka_name(rk), memberid, rd_kafka_err2str(err));

        if (memberid)
                free(memberid);

	test_print_partition_list(partitions);

	switch (err)
	{
	case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
		assign_cnt++;
		rd_kafka_assign(rk, partitions);
		break;

	case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
		if (assign_cnt == 0)
			TEST_FAIL("asymetric rebalance_cb\n");
		assign_cnt--;
		rd_kafka_assign(rk, NULL);
		break;

	default:
		TEST_FAIL("rebalance failed: %s\n",
			  rd_kafka_err2str(err));
		break;
	}
}
static void Consumer_rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
				   rd_kafka_topic_partition_list_t *c_parts,
				   void *opaque) {
	Handle *self = opaque;
	CallState *cs;

	cs = CallState_get(self);

	self->u.Consumer.rebalance_assigned = 0;

	if ((err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS &&
	     self->u.Consumer.on_assign) ||
	    (err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS &&
	     self->u.Consumer.on_revoke)) {
		PyObject *parts;
		PyObject *args, *result;

		/* Construct list of TopicPartition based on 'c_parts' */
		parts = c_parts_to_py(c_parts);

		args = Py_BuildValue("(OO)", self, parts);

		Py_DECREF(parts);

		if (!args) {
			cfl_PyErr_Format(RD_KAFKA_RESP_ERR__FAIL,
					 "Unable to build callback args");
			CallState_crash(cs);
			CallState_resume(cs);
			return;
		}

		result = PyObject_CallObject(
			err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS ?
			self->u.Consumer.on_assign :
			self->u.Consumer.on_revoke, args);

		Py_DECREF(args);

		if (result)
			Py_DECREF(result);
		else {
			CallState_crash(cs);
			rd_kafka_yield(rk);
		}
	}

	/* Fallback: librdkafka needs the rebalance_cb to call assign()
	 * to synchronize state, if the user did not do this from callback,
	 * or there was no callback, or the callback failed, then we perform
	 * that assign() call here instead. */
	if (!self->u.Consumer.rebalance_assigned) {
		if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
			rd_kafka_assign(rk, c_parts);
		else
			rd_kafka_assign(rk, NULL);
	}

	CallState_resume(cs);
}
Пример #4
0
static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
                         rd_kafka_topic_partition_list_t *partitions,
                         void *opaque) {
        int i;
        char *memberid = rd_kafka_memberid(rk);

        TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n",
                 rd_kafka_name(rk), memberid, rd_kafka_err2str(err));

        if (memberid)
                free(memberid);

        test_print_partition_list(partitions);

        switch (err) {
        case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
                assign_cnt++;

                rd_kafka_assign(rk, partitions);
                mtx_lock(&lock);
                consumers_running = 1;
                mtx_unlock(&lock);

                for (i = 0; i < partitions->cnt && i < MAX_THRD_CNT; ++i) {
                        rd_kafka_topic_partition_t part = partitions->elems[i];
                        rd_kafka_queue_t *rkqu;
                        /* This queue is loosed in partition-consume. */
                        rkqu = rd_kafka_queue_get_partition(rk, part.topic,
                                                            part.partition);

                        rd_kafka_queue_forward(rkqu, NULL);
                        tids[part.partition] = spawn_thread(rkqu,
                                                            part.partition);
                }

                rebalanced = 1;

                break;

        case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
                if (assign_cnt == 0)
                        TEST_FAIL("asymetric rebalance_cb");
                assign_cnt--;
                rd_kafka_assign(rk, NULL);
                mtx_lock(&lock);
                consumers_running = 0;
                mtx_unlock(&lock);

                break;

        default:
                TEST_FAIL("rebalance failed: %s", rd_kafka_err2str(err));
                break;
        }
}
static PyObject *Consumer_assign (Handle *self, PyObject *tlist) {

	rd_kafka_topic_partition_list_t *c_parts;
	rd_kafka_resp_err_t err;

        if (!self->rk) {
                PyErr_SetString(PyExc_RuntimeError,
                                "Consumer closed");
                return NULL;
        }

	if (!(c_parts = py_to_c_parts(tlist)))
		return NULL;

	self->u.Consumer.rebalance_assigned++;

	err = rd_kafka_assign(self->rk, c_parts);

	rd_kafka_topic_partition_list_destroy(c_parts);

	if (err) {
		cfl_PyErr_Format(err,
				 "Failed to set assignment: %s",
				 rd_kafka_err2str(err));
		return NULL;
	}

	Py_RETURN_NONE;
}
Пример #6
0
RdKafka::ErrorCode
RdKafka::KafkaConsumerImpl::assign (const std::vector<TopicPartition*> &partitions) {
  rd_kafka_topic_partition_list_t *c_parts;
  rd_kafka_resp_err_t err;

  c_parts = partitions_to_c_parts(partitions);

  err = rd_kafka_assign(rk_, c_parts);

  rd_kafka_topic_partition_list_destroy(c_parts);
  return static_cast<RdKafka::ErrorCode>(err);
}
Пример #7
0
/**
 * Wait for REBALANCE ASSIGN event and perform assignment
 *
 * Va-args are \p topic_cnt tuples of the expected assignment:
 *   { const char *topic, int partition_cnt }
 */
static void await_assignment (const char *pfx, rd_kafka_t *rk,
			      rd_kafka_queue_t *queue,
			      int topic_cnt, ...) {
	rd_kafka_event_t *rkev;
	rd_kafka_topic_partition_list_t *tps;
	int i;
	va_list ap;
	int fails = 0;
	int exp_part_cnt = 0;

	TEST_SAY("%s: waiting for assignment\n", pfx);
	rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000);
	if (!rkev)
		TEST_FAIL("timed out waiting for assignment");
	TEST_ASSERT(rd_kafka_event_error(rkev) ==
		    RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
		    "expected ASSIGN, got %s",
		    rd_kafka_err2str(rd_kafka_event_error(rkev)));
	tps = rd_kafka_event_topic_partition_list(rkev);

	TEST_SAY("%s: assignment:\n", pfx);
	test_print_partition_list(tps);

	va_start(ap, topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++) {
		const char *topic = va_arg(ap, const char *);
		int partition_cnt = va_arg(ap, int);
		int p;
		TEST_SAY("%s: expecting %s with %d partitions\n",
			 pfx, topic, partition_cnt);
		for (p = 0 ; p < partition_cnt ; p++) {
			if (!rd_kafka_topic_partition_list_find(tps, topic, p)) {
				TEST_FAIL_LATER("%s: expected partition %s [%d] "
						"not found in assginment",
						pfx, topic, p);
				fails++;
			}
		}
		exp_part_cnt += partition_cnt;
	}
	va_end(ap);

	TEST_ASSERT(exp_part_cnt == tps->cnt,
		    "expected assignment of %d partitions, got %d",
		    exp_part_cnt, tps->cnt);

	if (fails > 0)
		TEST_FAIL("%s: assignment mismatch: see above", pfx);

	rd_kafka_assign(rk, tps);
	rd_kafka_event_destroy(rkev);
}
Пример #8
0
void test_consumer_unassign (const char *what, rd_kafka_t *rk) {
        rd_kafka_resp_err_t err;
        test_timing_t timing;

        TIMING_START(&timing, "UNASSIGN.PARTITIONS");
        err = rd_kafka_assign(rk, NULL);
        TIMING_STOP(&timing);
        if (err)
                TEST_FAIL("%s: failed to unassign current partitions: %s\n",
                          what, rd_kafka_err2str(err));
        else
                TEST_SAY("%s: unassigned current partitions\n", what);
}
Пример #9
0
static void rebalance_cb (rd_kafka_t *rk,
                          rd_kafka_resp_err_t err,
                          rd_kafka_topic_partition_list_t *parts,
                          void *opaque) {
        struct _consumer *cons = opaque;

        cons->rebalance_cnt++;

        TEST_SAY(_C_BLU "%s rebalance #%d/%d: %s: %d partition(s)\n",
                 rd_kafka_name(cons->rk),
                 cons->rebalance_cnt, cons->max_rebalance_cnt,
                 rd_kafka_err2name(err),
                 parts->cnt);

        TEST_ASSERT(cons->rebalance_cnt <= cons->max_rebalance_cnt,
                    "%s rebalanced %d times, max was %d",
                    rd_kafka_name(cons->rk),
                    cons->rebalance_cnt, cons->max_rebalance_cnt);

        if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
                rd_kafka_assign(rk, parts);
        else
                rd_kafka_assign(rk, NULL);
}
Пример #10
0
void test_consumer_assign (const char *what, rd_kafka_t *rk,
			   rd_kafka_topic_partition_list_t *partitions) {
        rd_kafka_resp_err_t err;
        test_timing_t timing;

        TIMING_START(&timing, "ASSIGN.PARTITIONS");
        err = rd_kafka_assign(rk, partitions);
        TIMING_STOP(&timing);
        if (err)
                TEST_FAIL("%s: failed to assign %d partition(s): %s\n",
			  what, partitions->cnt, rd_kafka_err2str(err));
        else
                TEST_SAY("%s: assigned %d partition(s)\n",
			 what, partitions->cnt);
}
Пример #11
0
/**
 * Wait for REBALANCE REVOKE event and perform unassignment.
 */
static void await_revoke (const char *pfx, rd_kafka_t *rk,
			  rd_kafka_queue_t *queue) {
	rd_kafka_event_t *rkev;

	TEST_SAY("%s: waiting for revoke\n", pfx);
	rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000);
	if (!rkev)
		TEST_FAIL("timed out waiting for revoke");
	TEST_ASSERT(rd_kafka_event_error(rkev) ==
		    RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
		    "expected REVOKE, got %s",
		    rd_kafka_err2str(rd_kafka_event_error(rkev)));
	rd_kafka_assign(rk, NULL);
	rd_kafka_event_destroy(rkev);
}
Пример #12
0
static PyObject *Consumer_unassign (Handle *self, PyObject *ignore) {

	rd_kafka_resp_err_t err;

        if (!self->rk) {
                PyErr_SetString(PyExc_RuntimeError,
                                "Consumer closed");
                return NULL;
        }

	self->u.Consumer.rebalance_assigned++;

	err = rd_kafka_assign(self->rk, NULL);
	if (err) {
		cfl_PyErr_Format(err,
				 "Failed to remove assignment: %s",
				 rd_kafka_err2str(err));
		return NULL;
	}

	Py_RETURN_NONE;
}
Пример #13
0
int main_0040_io_event (int argc, char **argv) {
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_t *rk_p, *rk_c;
	const char *topic;
	rd_kafka_topic_t *rkt_p;
	rd_kafka_queue_t *queue;
	uint64_t testid;
	int msgcnt = 100;
	int recvd = 0;
	int fds[2];
	int wait_multiplier = 1;
	struct pollfd pfd;
        int r;
	enum {
		_NOPE,
		_YEP,
		_REBALANCE
	} expecting_io = _REBALANCE;

	testid = test_id_generate();
	topic = test_mk_topic_name(__FUNCTION__, 1);

	rk_p = test_create_producer();
	rkt_p = test_create_producer_topic(rk_p, topic, NULL);
	test_auto_create_topic_rkt(rk_p, rkt_p);

	test_conf_init(&conf, &tconf, 0);
	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
	test_conf_set(conf, "session.timeout.ms", "6000");
	test_conf_set(conf, "enable.partition.eof", "false");
	/* Speed up propagation of new topics */
	test_conf_set(conf, "metadata.max.age.ms", "5000");
	test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
	rk_c = test_create_consumer(topic, NULL, conf, tconf);

	queue = rd_kafka_queue_get_consumer(rk_c);

	test_consumer_subscribe(rk_c, topic);

#ifndef _MSC_VER
        r = pipe(fds);
#else
        r = _pipe(fds, 2, _O_BINARY);
#endif
        if (r == -1)
		TEST_FAIL("pipe() failed: %s\n", strerror(errno));
	
	rd_kafka_queue_io_event_enable(queue, fds[1], "1", 1);

	pfd.fd = fds[0];
	pfd.events = POLLIN;
	pfd.revents = 0;

	/**
	 * 1) Wait for rebalance event
	 * 2) Wait 1 interval (1s) expecting no IO (nothing produced).
	 * 3) Produce half the messages
	 * 4) Expect IO
	 * 5) Consume the available messages
	 * 6) Wait 1 interval expecting no IO.
	 * 7) Produce remaing half
	 * 8) Expect IO
	 * 9) Done.
	 */
	while (recvd < msgcnt) {
		int r;

#ifndef _MSC_VER
		r = poll(&pfd, 1, 1000 * wait_multiplier);
#else
                r = WSAPoll(&pfd, 1, 1000 * wait_multiplier);
#endif
		if (r == -1) {
			TEST_FAIL("poll() failed: %s", strerror(errno));
			
		} else if (r == 1) {
			rd_kafka_event_t *rkev;
			char b;
			int eventcnt = 0;

			if (pfd.events & POLLERR)
				TEST_FAIL("Poll error\n");
			if (!(pfd.events & POLLIN)) {
				TEST_SAY("Stray event 0x%x\n", (int)pfd.events);
				continue;
			}

			TEST_SAY("POLLIN\n");
                        /* Read signaling token to purge socket queue and
                         * eventually silence POLLIN */
#ifndef _MSC_VER
			r = read(pfd.fd, &b, 1);
#else
			r = _read((int)pfd.fd, &b, 1);
#endif
			if (r == -1)
				TEST_FAIL("read failed: %s\n", strerror(errno));

			if (!expecting_io)
				TEST_WARN("Got unexpected IO after %d/%d msgs\n",
					  recvd, msgcnt);

			while ((rkev = rd_kafka_queue_poll(queue, 0))) {
				eventcnt++;
				switch (rd_kafka_event_type(rkev))
				{
				case RD_KAFKA_EVENT_REBALANCE:
					TEST_SAY("Got %s: %s\n", rd_kafka_event_name(rkev),
						 rd_kafka_err2str(rd_kafka_event_error(rkev)));
					if (expecting_io != _REBALANCE)
						TEST_FAIL("Got Rebalance when expecting message\n");
					if (rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
						rd_kafka_assign(rk_c, rd_kafka_event_topic_partition_list(rkev));
						expecting_io = _NOPE;
					} else
						rd_kafka_assign(rk_c, NULL);
					break;
					
				case RD_KAFKA_EVENT_FETCH:
					if (expecting_io != _YEP)
						TEST_FAIL("Did not expect more messages at %d/%d\n",
							  recvd, msgcnt);
					recvd++;
					if (recvd == (msgcnt / 2) || recvd == msgcnt)
						expecting_io = _NOPE;
					break;

				case RD_KAFKA_EVENT_ERROR:
					TEST_FAIL("Error: %s\n", rd_kafka_event_error_string(rkev));
					break;

				default:
					TEST_SAY("Ignoring event %s\n", rd_kafka_event_name(rkev));
				}
					
				rd_kafka_event_destroy(rkev);
			}
			TEST_SAY("%d events, Consumed %d/%d messages\n", eventcnt, recvd, msgcnt);

			wait_multiplier = 1;

		} else {
			if (expecting_io == _REBALANCE) {
				continue;
			} else if (expecting_io == _YEP) {
				TEST_FAIL("Did not see expected IO after %d/%d msgs\n",
					  recvd, msgcnt);
			}

			TEST_SAY("IO poll timeout (good)\n");

			TEST_SAY("Got idle period, producing\n");
			test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, msgcnt/2,
					  NULL, 10);

			expecting_io = _YEP;
			/* When running slowly (e.g., valgrind) it might take
			 * some time before the first message is received
			 * after producing. */
			wait_multiplier = 3;
		}
	}
	TEST_SAY("Done\n");

	rd_kafka_topic_destroy(rkt_p);
	rd_kafka_destroy(rk_p);

	rd_kafka_queue_destroy(queue);
	rd_kafka_consumer_close(rk_c);
	rd_kafka_destroy(rk_c);

#ifndef _MSC_VER
	close(fds[0]);
	close(fds[1]);
#else
        _close(fds[0]);
        _close(fds[1]);
#endif

	return 0;
}
Пример #14
0
RdKafka::ErrorCode
RdKafka::KafkaConsumerImpl::unassign () {
  return static_cast<RdKafka::ErrorCode>(rd_kafka_assign(rk_, NULL));
}
Пример #15
0
int main_0006_symbols (int argc, char **argv) {

        if (argc < 0 /* always false */) {
                rd_kafka_version();
                rd_kafka_version_str();
		rd_kafka_get_debug_contexts();
		rd_kafka_get_err_descs(NULL, NULL);
                rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR);
		rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR);
                rd_kafka_errno2err(EINVAL);
                rd_kafka_errno();
		rd_kafka_last_error();
                rd_kafka_conf_new();
                rd_kafka_conf_destroy(NULL);
                rd_kafka_conf_dup(NULL);
                rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_conf_set_dr_cb(NULL, NULL);
                rd_kafka_conf_set_dr_msg_cb(NULL, NULL);
                rd_kafka_conf_set_error_cb(NULL, NULL);
                rd_kafka_conf_set_stats_cb(NULL, NULL);
                rd_kafka_conf_set_log_cb(NULL, NULL);
                rd_kafka_conf_set_socket_cb(NULL, NULL);
		rd_kafka_conf_set_rebalance_cb(NULL, NULL);
		rd_kafka_conf_set_offset_commit_cb(NULL, NULL);
		rd_kafka_conf_set_throttle_cb(NULL, NULL);
		rd_kafka_conf_set_default_topic_conf(NULL, NULL);
		rd_kafka_conf_get(NULL, NULL, NULL, NULL);
#ifndef _MSC_VER
		rd_kafka_conf_set_open_cb(NULL, NULL);
#endif
		rd_kafka_conf_set_opaque(NULL, NULL);
                rd_kafka_opaque(NULL);
                rd_kafka_conf_dump(NULL, NULL);
                rd_kafka_topic_conf_dump(NULL, NULL);
                rd_kafka_conf_dump_free(NULL, 0);
                rd_kafka_conf_properties_show(NULL);
                rd_kafka_topic_conf_new();
                rd_kafka_topic_conf_dup(NULL);
                rd_kafka_topic_conf_destroy(NULL);
                rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_topic_conf_set_opaque(NULL, NULL);
		rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL);
                rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL);
                rd_kafka_topic_partition_available(NULL, 0);
		rd_kafka_topic_opaque(NULL);
                rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_new(0, NULL, NULL, 0);
                rd_kafka_destroy(NULL);
                rd_kafka_name(NULL);
		rd_kafka_memberid(NULL);
                rd_kafka_topic_new(NULL, NULL, NULL);
                rd_kafka_topic_destroy(NULL);
                rd_kafka_topic_name(NULL);
                rd_kafka_message_destroy(NULL);
                rd_kafka_message_errstr(NULL);
		rd_kafka_message_timestamp(NULL, NULL);
                rd_kafka_consume_start(NULL, 0, 0);
                rd_kafka_consume_stop(NULL, 0);
                rd_kafka_consume(NULL, 0, 0);
                rd_kafka_consume_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL);
                rd_kafka_offset_store(NULL, 0, 0);
                rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL);
                rd_kafka_produce_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_poll(NULL, 0);
                rd_kafka_brokers_add(NULL, NULL);
                /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */
                rd_kafka_set_log_level(NULL, 0);
                rd_kafka_log_print(NULL, 0, NULL, NULL);
#ifndef _MSC_VER
                rd_kafka_log_syslog(NULL, 0, NULL, NULL);
#endif
                rd_kafka_outq_len(NULL);
                rd_kafka_dump(NULL, NULL);
                rd_kafka_thread_cnt();
                rd_kafka_wait_destroyed(0);
                rd_kafka_metadata(NULL, 0, NULL, NULL, 0);
                rd_kafka_metadata_destroy(NULL);
                rd_kafka_queue_destroy(NULL);
                rd_kafka_consume_start_queue(NULL, 0, 0, NULL);
                rd_kafka_consume_queue(NULL, 0);
                rd_kafka_consume_batch_queue(NULL, 0, NULL, 0);
                rd_kafka_consume_callback_queue(NULL, 0, NULL, NULL);
                rd_kafka_seek(NULL, 0, 0, 0);
                rd_kafka_yield(NULL);
                rd_kafka_mem_free(NULL, NULL);
                rd_kafka_list_groups(NULL, NULL, NULL, 0);
                rd_kafka_group_list_destroy(NULL);

		/* KafkaConsumer API */
		rd_kafka_subscribe(NULL, NULL);
		rd_kafka_unsubscribe(NULL);
		rd_kafka_subscription(NULL, NULL);
		rd_kafka_consumer_poll(NULL, 0);
		rd_kafka_consumer_close(NULL);
		rd_kafka_assign(NULL, NULL);
		rd_kafka_assignment(NULL, NULL);
		rd_kafka_commit(NULL, NULL, 0);
		rd_kafka_commit_message(NULL, NULL, 0);
                rd_kafka_committed(NULL, NULL, 0);
		rd_kafka_position(NULL, NULL);

		/* TopicPartition */
		rd_kafka_topic_partition_list_new(0);
		rd_kafka_topic_partition_list_destroy(NULL);
		rd_kafka_topic_partition_list_add(NULL, NULL, 0);
		rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0);
		rd_kafka_topic_partition_list_del(NULL, NULL, 0);
		rd_kafka_topic_partition_list_del_by_idx(NULL, 0);
		rd_kafka_topic_partition_list_copy(NULL);
		rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0);
		rd_kafka_topic_partition_list_find(NULL, NULL, 0);
		rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0);
		rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL);
        }


	return 0;
}
int main (int argc, char **argv) {
        char mode = 'C';
	char *brokers = "localhost:9092";
	int opt;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *topic_conf;
	char errstr[512];
	const char *debug = NULL;
	int do_conf_dump = 0;
	char tmp[16];
        rd_kafka_resp_err_t err;
        char *group = NULL;
        rd_kafka_topic_partition_list_t *topics;
        int is_subscription;
        int i;

	quiet = !isatty(STDIN_FILENO);

	/* Kafka configuration */
	conf = rd_kafka_conf_new();

        /* Set logger */
        rd_kafka_conf_set_log_cb(conf, logger);

	/* Quick termination */
	snprintf(tmp, sizeof(tmp), "%i", SIGIO);
	rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);

	/* Topic configuration */
	topic_conf = rd_kafka_topic_conf_new();

	while ((opt = getopt(argc, argv, "g:b:qd:eX:As:DO")) != -1) {
		switch (opt) {
		case 'b':
			brokers = optarg;
			break;
                case 'g':
                        group = optarg;
                        break;
		case 'e':
			exit_eof = 1;
			break;
		case 'd':
			debug = optarg;
			break;
		case 'q':
			quiet = 1;
			break;
		case 'A':
			output = OUTPUT_RAW;
			break;
		case 'X':
		{
			char *name, *val;
			rd_kafka_conf_res_t res;

			if (!strcmp(optarg, "list") ||
			    !strcmp(optarg, "help")) {
				rd_kafka_conf_properties_show(stdout);
				exit(0);
			}

			if (!strcmp(optarg, "dump")) {
				do_conf_dump = 1;
				continue;
			}

			name = optarg;
			if (!(val = strchr(name, '='))) {
				fprintf(stderr, "%% Expected "
					"-X property=value, not %s\n", name);
				exit(1);
			}

			*val = '\0';
			val++;

			res = RD_KAFKA_CONF_UNKNOWN;
			/* Try "topic." prefixed properties on topic
			 * conf first, and then fall through to global if
			 * it didnt match a topic configuration property. */
			if (!strncmp(name, "topic.", strlen("topic.")))
				res = rd_kafka_topic_conf_set(topic_conf,
							      name+
							      strlen("topic."),
							      val,
							      errstr,
							      sizeof(errstr));

			if (res == RD_KAFKA_CONF_UNKNOWN)
				res = rd_kafka_conf_set(conf, name, val,
							errstr, sizeof(errstr));

			if (res != RD_KAFKA_CONF_OK) {
				fprintf(stderr, "%% %s\n", errstr);
				exit(1);
			}
		}
		break;

                case 'D':
                case 'O':
                        mode = opt;
                        break;

		default:
			goto usage;
		}
	}


	if (do_conf_dump) {
		const char **arr;
		size_t cnt;
		int pass;

		for (pass = 0 ; pass < 2 ; pass++) {
			if (pass == 0) {
				arr = rd_kafka_conf_dump(conf, &cnt);
				printf("# Global config\n");
			} else {
				printf("# Topic config\n");
				arr = rd_kafka_topic_conf_dump(topic_conf,
							       &cnt);
			}

			for (i = 0 ; i < (int)cnt ; i += 2)
				printf("%s = %s\n",
				       arr[i], arr[i+1]);

			printf("\n");

			rd_kafka_conf_dump_free(arr, cnt);
		}

		exit(0);
	}


	if (strchr("OC", mode) && optind == argc) {
	usage:
		fprintf(stderr,
			"Usage: %s [options] <topic[:part]> <topic[:part]>..\n"
			"\n"
			"librdkafka version %s (0x%08x)\n"
			"\n"
			" Options:\n"
                        "  -g <group>      Consumer group (%s)\n"
			"  -b <brokers>    Broker address (%s)\n"
			"  -e              Exit consumer when last message\n"
			"                  in partition has been received.\n"
                        "  -D              Describe group.\n"
                        "  -O              Get commmitted offset(s)\n"
			"  -d [facs..]     Enable debugging contexts:\n"
			"                  %s\n"
			"  -q              Be quiet\n"
			"  -A              Raw payload output (consumer)\n"
			"  -X <prop=name> Set arbitrary librdkafka "
			"configuration property\n"
			"               Properties prefixed with \"topic.\" "
			"will be set on topic object.\n"
			"               Use '-X list' to see the full list\n"
			"               of supported properties.\n"
			"\n"
                        "For balanced consumer groups use the 'topic1 topic2..'"
                        " format\n"
                        "and for static assignment use "
                        "'topic1:part1 topic1:part2 topic2:part1..'\n"
			"\n",
			argv[0],
			rd_kafka_version_str(), rd_kafka_version(),
                        group, brokers,
			RD_KAFKA_DEBUG_CONTEXTS);
		exit(1);
	}


	signal(SIGINT, stop);
	signal(SIGUSR1, sig_usr1);

	if (debug &&
	    rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) !=
	    RD_KAFKA_CONF_OK) {
		fprintf(stderr, "%% Debug configuration failed: %s: %s\n",
			errstr, debug);
		exit(1);
	}

        /*
         * Client/Consumer group
         */

        if (strchr("CO", mode)) {
                /* Consumer groups require a group id */
                if (!group)
                        group = "rdkafka_consumer_example";
                if (rd_kafka_conf_set(conf, "group.id", group,
                                      errstr, sizeof(errstr)) !=
                    RD_KAFKA_CONF_OK) {
                        fprintf(stderr, "%% %s\n", errstr);
                        exit(1);
                }

                /* Consumer groups always use broker based offset storage */
                if (rd_kafka_topic_conf_set(topic_conf, "offset.store.method",
                                            "broker",
                                            errstr, sizeof(errstr)) !=
                    RD_KAFKA_CONF_OK) {
                        fprintf(stderr, "%% %s\n", errstr);
                        exit(1);
                }

                /* Set default topic config for pattern-matched topics. */
                rd_kafka_conf_set_default_topic_conf(conf, topic_conf);

                /* Callback called on partition assignment changes */
                rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
        }

        /* Create Kafka handle */
        if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
                                errstr, sizeof(errstr)))) {
                fprintf(stderr,
                        "%% Failed to create new consumer: %s\n",
                        errstr);
                exit(1);
        }

        rd_kafka_set_log_level(rk, LOG_DEBUG);

        /* Add brokers */
        if (rd_kafka_brokers_add(rk, brokers) == 0) {
                fprintf(stderr, "%% No valid brokers specified\n");
                exit(1);
        }


        if (mode == 'D') {
                int r;
                /* Describe groups */
                r = describe_groups(rk, group);

                rd_kafka_destroy(rk);
                exit(r == -1 ? 1 : 0);
        }

        /* Redirect rd_kafka_poll() to consumer_poll() */
        rd_kafka_poll_set_consumer(rk);

        topics = rd_kafka_topic_partition_list_new(argc - optind);
        is_subscription = 1;
        for (i = optind ; i < argc ; i++) {
                /* Parse "topic[:part] */
                char *topic = argv[i];
                char *t;
                int32_t partition = -1;

                if ((t = strstr(topic, ":"))) {
                        *t = '\0';
                        partition = atoi(t+1);
                        is_subscription = 0; /* is assignment */
                        wait_eof++;
                }

                rd_kafka_topic_partition_list_add(topics, topic, partition);
        }

        if (mode == 'O') {
                /* Offset query */

                err = rd_kafka_position(rk, topics, 5000);
                if (err) {
                        fprintf(stderr, "%% Failed to fetch offsets: %s\n",
                                rd_kafka_err2str(err));
                        exit(1);
                }

                for (i = 0 ; i < topics->cnt ; i++) {
                        rd_kafka_topic_partition_t *p = &topics->elems[i];
                        printf("Topic \"%s\" partition %"PRId32,
                               p->topic, p->partition);
                        if (p->err)
                                printf(" error %s",
                                       rd_kafka_err2str(p->err));
                        else {
                                printf(" offset %"PRId64"",
                                       p->offset);

                                if (p->metadata_size)
                                        printf(" (%d bytes of metadata)",
                                               (int)p->metadata_size);
                        }
                        printf("\n");
                }

                goto done;
        }


        if (is_subscription) {
                fprintf(stderr, "%% Subscribing to %d topics\n", topics->cnt);

                if ((err = rd_kafka_subscribe(rk, topics))) {
                        fprintf(stderr,
                                "%% Failed to start consuming topics: %s\n",
                                rd_kafka_err2str(err));
                        exit(1);
                }
        } else {
                fprintf(stderr, "%% Assigning %d partitions\n", topics->cnt);

                if ((err = rd_kafka_assign(rk, topics))) {
                        fprintf(stderr,
                                "%% Failed to assign partitions: %s\n",
                                rd_kafka_err2str(err));
                }
        }

        while (run) {
                rd_kafka_message_t *rkmessage;

                rkmessage = rd_kafka_consumer_poll(rk, 1000);
                if (rkmessage) {
                        msg_consume(rkmessage, NULL);
                        rd_kafka_message_destroy(rkmessage);
                }
        }

done:
        err = rd_kafka_consumer_close(rk);
        if (err)
                fprintf(stderr, "%% Failed to close consumer: %s\n",
                        rd_kafka_err2str(err));
        else
                fprintf(stderr, "%% Consumer closed\n");

        rd_kafka_topic_partition_list_destroy(topics);

        /* Destroy handle */
        rd_kafka_destroy(rk);

	/* Let background threads clean up and terminate cleanly. */
	run = 5;
	while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
		printf("Waiting for librdkafka to decommission\n");
	if (run <= 0)
		rd_kafka_dump(stdout, rk);

	return 0;
}