static PyObject *Consumer_offsets_for_times (Handle *self, PyObject *args,
                                                 PyObject *kwargs) {
#if RD_KAFKA_VERSION < 0x000b0000
	PyErr_Format(PyExc_NotImplementedError,
		     "Consumer offsets_for_times require "
		     "confluent-kafka-python built for librdkafka "
		     "version >=v0.11.0 (librdkafka runtime 0x%x, "
		     "buildtime 0x%x)",
		     rd_kafka_version(), RD_KAFKA_VERSION);
	return NULL;
#else

	PyObject *plist;
	double tmout = -1.0f;
	rd_kafka_topic_partition_list_t *c_parts;
	rd_kafka_resp_err_t err;
	static char *kws[] = { "partitions", "timeout", NULL };

        if (!self->rk) {
                PyErr_SetString(PyExc_RuntimeError,
                                "Consumer closed");
                return NULL;
        }

	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|d", kws,
					 &plist, &tmout))
		return NULL;

        if (!(c_parts = py_to_c_parts(plist)))
                return NULL;

        Py_BEGIN_ALLOW_THREADS;
        err = rd_kafka_offsets_for_times(self->rk,
                                         c_parts,
                                         tmout >= 0 ? (int)(tmout * 1000.0f) : -1);
        Py_END_ALLOW_THREADS;

        if (err) {
                rd_kafka_topic_partition_list_destroy(c_parts);
                cfl_PyErr_Format(err,
                                 "Failed to get offsets: %s",
                                 rd_kafka_err2str(err));
                return NULL;
        }

        plist = c_parts_to_py(c_parts);
        rd_kafka_topic_partition_list_destroy(c_parts);

        return plist;
#endif
}
static PyObject *Consumer_assignment (Handle *self, PyObject *args,
                                      PyObject *kwargs) {

        PyObject *plist;
        rd_kafka_topic_partition_list_t *c_parts;
        rd_kafka_resp_err_t err;

        if (!self->rk) {
                PyErr_SetString(PyExc_RuntimeError,
                                "Consumer closed");
                return NULL;
        }

        err = rd_kafka_assignment(self->rk, &c_parts);
        if (err) {
                cfl_PyErr_Format(err,
                                 "Failed to get assignment: %s",
                                 rd_kafka_err2str(err));
                return NULL;
        }


        plist = c_parts_to_py(c_parts);
        rd_kafka_topic_partition_list_destroy(c_parts);

        return plist;
}
Beispiel #3
0
static void do_nonexist_commit (void) {
	rd_kafka_t *rk;
	char group_id[64];
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_topic_partition_list_t *offsets;
	const char *unk_topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_resp_err_t err;

	test_conf_init(&conf, &tconf, 20);
	test_str_id_generate(group_id, sizeof(group_id));

	TEST_SAY(_C_MAG "[ do_nonexist_commit group.id %s ]\n", group_id);

	rk = test_create_consumer(group_id, NULL, conf, tconf, NULL);

	TEST_SAY("Try nonexist commit\n");
	offsets = rd_kafka_topic_partition_list_new(2);
	rd_kafka_topic_partition_list_add(offsets, unk_topic, 0)->offset = 123;
	rd_kafka_topic_partition_list_add(offsets, unk_topic, 1)->offset = 456;

	err = rd_kafka_commit_queue(rk, offsets, NULL,
				    nonexist_offset_commit_cb, NULL);
	TEST_SAY("nonexist commit returned %s\n", rd_kafka_err2str(err));
	if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
		TEST_FAIL("commit() should succeed, not: %s",
			  rd_kafka_err2str(err));

	rd_kafka_topic_partition_list_destroy(offsets);

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
static rd_kafka_resp_err_t
rd_kafka_offset_broker_commit (rd_kafka_toppar_t *rktp) {
        rd_kafka_topic_partition_list_t *offsets;
        rd_kafka_topic_partition_t *rktpar;

        rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_cgrp != NULL);
        rd_kafka_assert(rktp->rktp_rkt->rkt_rk,
                        rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE);

        rktp->rktp_committing_offset = rktp->rktp_stored_offset;

        offsets = rd_kafka_topic_partition_list_new(1);
        rktpar = rd_kafka_topic_partition_list_add(
                offsets, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
        rktpar->offset = rktp->rktp_committing_offset;

        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSETCMT",
                     "%.*s [%"PRId32"]: committing offset %"PRId64,
                     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
                     rktp->rktp_partition, rktp->rktp_committing_offset);

        rd_kafka_commit0(rktp->rktp_rkt->rkt_rk, offsets, rktp,
			 RD_KAFKA_REPLYQ(rktp->rktp_ops, 0),
			 rd_kafka_offset_broker_commit_cb, NULL);

        rd_kafka_topic_partition_list_destroy(offsets);

        return RD_KAFKA_RESP_ERR__IN_PROGRESS;
}
static void assign_consume_many (char **topics, int topic_cnt, uint64_t testid){
	rd_kafka_t *rk;
	rd_kafka_topic_partition_list_t *parts;
	int i;
	test_msgver_t mv;

	TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);

	test_conf_init(NULL, NULL, 60);
	rk = test_create_consumer(__FUNCTION__, NULL, NULL, NULL);

	parts = rd_kafka_topic_partition_list_new(topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++)
		rd_kafka_topic_partition_list_add(parts, topics[i], 0)->
			offset = RD_KAFKA_OFFSET_TAIL(msgs_per_topic);

	test_consumer_assign("consume.assign", rk, parts);
	rd_kafka_topic_partition_list_destroy(parts);

	test_msgver_init(&mv, testid);
	test_consumer_poll("consume.assign", rk, testid,
			   -1, 0, msgs_per_topic * topic_cnt, &mv);

	for (i = 0 ; i < topic_cnt ; i++)
		test_msgver_verify_part("assign", &mv, TEST_MSGVER_ALL_PART,
					topics[i], 0, i * msgs_per_topic,
					msgs_per_topic);
	test_msgver_clear(&mv);

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
static PyObject *Consumer_assign (Handle *self, PyObject *tlist) {

	rd_kafka_topic_partition_list_t *c_parts;
	rd_kafka_resp_err_t err;

        if (!self->rk) {
                PyErr_SetString(PyExc_RuntimeError,
                                "Consumer closed");
                return NULL;
        }

	if (!(c_parts = py_to_c_parts(tlist)))
		return NULL;

	self->u.Consumer.rebalance_assigned++;

	err = rd_kafka_assign(self->rk, c_parts);

	rd_kafka_topic_partition_list_destroy(c_parts);

	if (err) {
		cfl_PyErr_Format(err,
				 "Failed to set assignment: %s",
				 rd_kafka_err2str(err));
		return NULL;
	}

	Py_RETURN_NONE;
}
static PyObject *Consumer_committed (Handle *self, PyObject *args,
					 PyObject *kwargs) {

	PyObject *plist;
	rd_kafka_topic_partition_list_t *c_parts;
	rd_kafka_resp_err_t err;
	double tmout = -1.0f;
	static char *kws[] = { "partitions", "timeout", NULL };

        if (!self->rk) {
                PyErr_SetString(PyExc_RuntimeError,
                                "Consumer closed");
                return NULL;
        }

	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|d", kws,
					 &plist, &tmout))
		return NULL;


	if (!(c_parts = py_to_c_parts(plist)))
		return NULL;

        Py_BEGIN_ALLOW_THREADS;
        err = rd_kafka_committed(self->rk, c_parts,
                                 tmout >= 0 ? (int)(tmout * 1000.0f) : -1);
        Py_END_ALLOW_THREADS;

	if (err) {
		rd_kafka_topic_partition_list_destroy(c_parts);
		cfl_PyErr_Format(err,
				 "Failed to get committed offsets: %s",
				 rd_kafka_err2str(err));
		return NULL;
	}


	plist = c_parts_to_py(c_parts);
	rd_kafka_topic_partition_list_destroy(c_parts);

	return plist;
}
Beispiel #8
0
RdKafka::ErrorCode
RdKafka::KafkaConsumerImpl::assign (const std::vector<TopicPartition*> &partitions) {
  rd_kafka_topic_partition_list_t *c_parts;
  rd_kafka_resp_err_t err;

  c_parts = partitions_to_c_parts(partitions);

  err = rd_kafka_assign(rk_, c_parts);

  rd_kafka_topic_partition_list_destroy(c_parts);
  return static_cast<RdKafka::ErrorCode>(err);
}
static PyObject *Consumer_position (Handle *self, PyObject *args,
					PyObject *kwargs) {

	PyObject *plist;
	rd_kafka_topic_partition_list_t *c_parts;
	rd_kafka_resp_err_t err;
	static char *kws[] = { "partitions", NULL };

        if (!self->rk) {
                PyErr_SetString(PyExc_RuntimeError,
                                "Consumer closed");
                return NULL;
        }

	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O", kws,
					 &plist))
		return NULL;


	if (!(c_parts = py_to_c_parts(plist)))
		return NULL;

	err = rd_kafka_position(self->rk, c_parts);

	if (err) {
		rd_kafka_topic_partition_list_destroy(c_parts);
		cfl_PyErr_Format(err,
				 "Failed to get position: %s",
				 rd_kafka_err2str(err));
		return NULL;
	}


	plist = c_parts_to_py(c_parts);
	rd_kafka_topic_partition_list_destroy(c_parts);

	return plist;
}
Beispiel #10
0
RdKafka::ErrorCode
RdKafka::KafkaConsumerImpl::subscription (std::vector<std::string> &topics) {
  rd_kafka_topic_partition_list_t *c_topics;
  rd_kafka_resp_err_t err;

  if ((err = rd_kafka_subscription(rk_, &c_topics)))
    return static_cast<RdKafka::ErrorCode>(err);

  topics.resize(c_topics->cnt);
  for (int i = 0 ; i < c_topics->cnt ; i++)
    topics[i] = std::string(c_topics->elems[i].topic);

  rd_kafka_topic_partition_list_destroy(c_topics);

  return RdKafka::ERR_NO_ERROR;
}
Beispiel #11
0
RdKafka::ErrorCode
RdKafka::HandleImpl::resume (std::vector<RdKafka::TopicPartition*> &partitions) {
    rd_kafka_topic_partition_list_t *c_parts;
    rd_kafka_resp_err_t err;

    c_parts = partitions_to_c_parts(partitions);

    err = rd_kafka_resume_partitions(rk_, c_parts);

    if (!err)
        update_partitions_from_c_parts(partitions, c_parts);

    rd_kafka_topic_partition_list_destroy(c_parts);

    return static_cast<RdKafka::ErrorCode>(err);
}
Beispiel #12
0
/**
 * Trigger offset_commit_cb op, if configured.
 * Takes ownership of `offsets`
 */
void rd_kafka_offset_commit_cb_op (rd_kafka_t *rk,
				   rd_kafka_resp_err_t err,
				   rd_kafka_topic_partition_list_t *offsets) {
	rd_kafka_op_t *rko;

        if (!rk->rk_conf.offset_commit_cb) {
		rd_kafka_topic_partition_list_destroy(offsets);
		return;
	}

	rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT|RD_KAFKA_OP_REPLY);
	rko->rko_err = err;
	rd_kafka_assert(NULL, offsets->cnt > 0);
        rd_kafka_op_payload_set(rko, offsets,
                                (void *)rd_kafka_topic_partition_list_destroy);
	rd_kafka_q_enq(&rk->rk_rep, rko);
}
Beispiel #13
0
RdKafka::ErrorCode
RdKafka::KafkaConsumerImpl::subscribe (const std::vector<std::string> &topics) {
  rd_kafka_topic_partition_list_t *c_topics;
  rd_kafka_resp_err_t err;

  c_topics = rd_kafka_topic_partition_list_new((int)topics.size());

  for (unsigned int i = 0 ; i < topics.size() ; i++)
    rd_kafka_topic_partition_list_add(c_topics, topics[i].c_str(),
                                      RD_KAFKA_PARTITION_UA);

  err = rd_kafka_subscribe(rk_, c_topics);

  rd_kafka_topic_partition_list_destroy(c_topics);

  return static_cast<RdKafka::ErrorCode>(err);
}
Beispiel #14
0
RdKafka::ErrorCode
RdKafka::KafkaConsumerImpl::assignment (std::vector<RdKafka::TopicPartition*> &partitions) {
  rd_kafka_topic_partition_list_t *c_parts;
  rd_kafka_resp_err_t err;

  if ((err = rd_kafka_assignment(rk_, &c_parts)))
    return static_cast<RdKafka::ErrorCode>(err);

  partitions.resize(c_parts->cnt);

  for (int i = 0 ; i < c_parts->cnt ; i++)
    partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]);

  rd_kafka_topic_partition_list_destroy(c_parts);

  return RdKafka::ERR_NO_ERROR;
}
Beispiel #15
0
RdKafka::ErrorCode
RdKafka::KafkaConsumerImpl::committed (std::vector<RdKafka::TopicPartition*> &partitions, int timeout_ms) {
  rd_kafka_topic_partition_list_t *c_parts;
  rd_kafka_resp_err_t err;

  c_parts = partitions_to_c_parts(partitions);

  err = rd_kafka_committed(rk_, c_parts, timeout_ms);

  if (!err) {
    update_partitions_from_c_parts(partitions, c_parts);
  }

  rd_kafka_topic_partition_list_destroy(c_parts);

  return static_cast<RdKafka::ErrorCode>(err);
}
Beispiel #16
0
static void subscribe_consume_many (char **topics, int topic_cnt,
				    uint64_t testid) {
	rd_kafka_t *rk;
        int i;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_topic_partition_list_t *parts;
	rd_kafka_resp_err_t err;
	test_msgver_t mv;

	TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);

	test_conf_init(NULL, &tconf, 60);
	test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
	rk = test_create_consumer(__FUNCTION__, NULL, NULL, tconf);

	parts = rd_kafka_topic_partition_list_new(topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++)
		rd_kafka_topic_partition_list_add(parts, topics[i],
						  RD_KAFKA_PARTITION_UA);

	TEST_SAY("Subscribing to %d topics\n", topic_cnt);
	err = rd_kafka_subscribe(rk, parts);
	if (err)
		TEST_FAIL("subscribe() failed: %s\n", rd_kafka_err2str(err));

	rd_kafka_topic_partition_list_destroy(parts);

	test_msgver_init(&mv, testid);
	test_consumer_poll("consume.subscribe", rk, testid,
			   -1, 0, msgs_per_topic * topic_cnt, &mv);

	for (i = 0 ; i < topic_cnt ; i++)
		test_msgver_verify_part("subscribe", &mv, TEST_MSGVER_ALL_PART,
					topics[i], 0, i * msgs_per_topic,
					msgs_per_topic);
	test_msgver_clear(&mv);

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
Beispiel #17
0
/**
 * Create high-level consumer subscribing to \p topic from BEGINNING
 * and expects \d exp_msgcnt with matching \p testid
 * Destroys consumer when done.
 *
 * If \p group_id is NULL a new unique group is generated
 */
void
test_consume_msgs_easy (const char *group_id, const char *topic,
                        uint64_t testid, int exp_msgcnt) {
        rd_kafka_t *rk;
        rd_kafka_topic_conf_t *tconf;
        rd_kafka_resp_err_t err;
        rd_kafka_topic_partition_list_t *topics;
	char grpid0[64];

        test_conf_init(NULL, &tconf, 0);

	if (!group_id)
		group_id = test_str_id_generate(grpid0, sizeof(grpid0));

        test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
        rk = test_create_consumer(group_id, NULL, tconf, NULL);

        rd_kafka_poll_set_consumer(rk);

        topics = rd_kafka_topic_partition_list_new(1);
        rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA);

        TEST_SAY("Subscribing to topic %s in group %s "
                 "(expecting %d msgs with testid %"PRIu64")\n",
                 topic, group_id, exp_msgcnt, testid);

        err = rd_kafka_subscribe(rk, topics);
        if (err)
                TEST_FAIL("Failed to subscribe to %s: %s\n",
                          topic, rd_kafka_err2str(err));

        rd_kafka_topic_partition_list_destroy(topics);

        /* Consume messages */
        test_consumer_poll("consume.easy", rk, testid, -1, -1, exp_msgcnt);

        test_consumer_close(rk);

        rd_kafka_destroy(rk);
}
rd_kafka_resp_err_t
rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage,
                         int async) {
        rd_kafka_topic_partition_list_t *offsets;
        rd_kafka_topic_partition_t *rktpar;
        rd_kafka_resp_err_t err;

        if (rkmessage->err)
                return RD_KAFKA_RESP_ERR__INVALID_ARG;

        offsets = rd_kafka_topic_partition_list_new(1);
        rktpar = rd_kafka_topic_partition_list_add(
                offsets, rd_kafka_topic_name(rkmessage->rkt),
                rkmessage->partition);
        rktpar->offset = rkmessage->offset+1;

        err = rd_kafka_commit(rk, offsets, async);

        rd_kafka_topic_partition_list_destroy(offsets);

        return err;
}
Beispiel #19
0
static void do_consume (const char *topic, int msgcnt) {
        rd_kafka_t *rk;
        rd_kafka_topic_partition_list_t *parts;

        rk = test_create_consumer(topic, NULL, NULL, NULL);

        parts = rd_kafka_topic_partition_list_new(1);
        rd_kafka_topic_partition_list_add(parts, topic, 0)->offset =
                RD_KAFKA_OFFSET_BEGINNING;

        test_consumer_assign("assign", rk, parts);

        rd_kafka_topic_partition_list_destroy(parts);

        exp_msgid = 0;

        while (exp_msgid < msgcnt) {
                rd_kafka_message_t *rkm;

                rkm = rd_kafka_consumer_poll(rk, 1000);
                if (!rkm)
                        continue;

                if (rkm->err)
                        TEST_FAIL("consume error while expecting msgid %d/%d: "
                                  "%s",
                                  exp_msgid, msgcnt,
                                  rd_kafka_message_errstr(rkm));

                handle_consumed_msg(rkm);

                rd_kafka_message_destroy(rkm);
        }

        test_consumer_close(rk);
        rd_kafka_destroy(rk);
}
static PyObject *Consumer_resume (Handle *self, PyObject *args,
                    PyObject *kwargs) {

    PyObject *plist;
	rd_kafka_topic_partition_list_t *c_parts;
    rd_kafka_resp_err_t err;
    static char *kws[] = {"partitions", NULL};

    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O", kws, &plist))
        return NULL;

    if (!(c_parts = py_to_c_parts(plist)))
        return NULL;

    err = rd_kafka_resume_partitions(self->rk, c_parts);
    rd_kafka_topic_partition_list_destroy(c_parts);
    if (err) {
        cfl_PyErr_Format(err,
                "Failed to resume partitions: %s",
                rd_kafka_err2str(err));
        return NULL;
    }
	Py_RETURN_NONE;
}
Beispiel #21
0
int main_0093_holb_consumer (int argc, char **argv) {
        const char *topic = test_mk_topic_name("0093_holb_consumer", 1);
        int64_t testid;
        const int msgcnt = 100;
        struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT;
        rd_kafka_conf_t *conf;

        testid = test_id_generate();

        test_conf_init(&conf, NULL, 60);

        test_create_topic(topic, 1, 1);

        test_produce_msgs_easy(topic, testid, 0, msgcnt);

        test_conf_set(conf, "session.timeout.ms", "6000");
        test_conf_set(conf, "max.poll.interval.ms", "20000");
        test_conf_set(conf, "socket.timeout.ms", "3000");
        test_conf_set(conf, "auto.offset.reset", "earliest");
        /* Trigger other requests often */
        test_conf_set(conf, "topic.metadata.refresh.interval.ms", "500");
        rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);

        rd_kafka_conf_set_opaque(conf, &c[0]);
        c[0].rk = test_create_consumer(topic, NULL,
                                       rd_kafka_conf_dup(conf), NULL);

        rd_kafka_conf_set_opaque(conf, &c[1]);
        c[1].rk = test_create_consumer(topic, NULL, conf, NULL);

        test_consumer_subscribe(c[0].rk, topic);

        /* c0: assign */
        c[0].max_rebalance_cnt = 1;

        /* c1: none, hasn't joined yet */
        c[1].max_rebalance_cnt = 0;

        TEST_SAY("Waiting for c[0] assignment\n");
        while (1) {
                rd_kafka_topic_partition_list_t *parts = NULL;

                do_consume(&c[0], 1/*1s*/);

                if (rd_kafka_assignment(c[0].rk, &parts) !=
                    RD_KAFKA_RESP_ERR_NO_ERROR ||
                    !parts || parts->cnt == 0) {
                        if (parts)
                                rd_kafka_topic_partition_list_destroy(parts);
                        continue;
                }

                TEST_SAY("%s got assignment of %d partition(s)\n",
                         rd_kafka_name(c[0].rk), parts->cnt);
                rd_kafka_topic_partition_list_destroy(parts);
                break;
        }

        TEST_SAY("c[0] got assignment, consuming..\n");
        do_consume(&c[0], 5/*5s*/);

        TEST_SAY("Joining second consumer\n");
        test_consumer_subscribe(c[1].rk, topic);

        /* Just poll second consumer for 10s, the rebalance will not
         * finish until the first consumer polls */
        do_consume(&c[1], 10/*10s*/);

        /* c0: the next call to do_consume/poll will trigger
         *     its rebalance callback, first revoke then assign. */
        c[0].max_rebalance_cnt += 2;
        /* c1: first rebalance */
        c[1].max_rebalance_cnt++;

        TEST_SAY("Expected rebalances: c[0]: %d/%d, c[1]: %d/%d\n",
                 c[0].rebalance_cnt, c[0].max_rebalance_cnt,
                 c[1].rebalance_cnt, c[1].max_rebalance_cnt);

        /* Let rebalances kick in, then consume messages. */
        while (c[0].cnt + c[1].cnt < msgcnt) {
                do_consume(&c[0], 0);
                do_consume(&c[1], 0);
        }

        /* Allow the extra revoke rebalance on close() */
        c[0].max_rebalance_cnt++;
        c[1].max_rebalance_cnt++;

        test_consumer_close(c[0].rk);
        test_consumer_close(c[1].rk);

        rd_kafka_destroy(c[0].rk);
        rd_kafka_destroy(c[1].rk);

        return 0;
}
void rd_kafka_op_destroy (rd_kafka_op_t *rko) {

	switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK)
	{
	case RD_KAFKA_OP_FETCH:
		rd_kafka_msg_destroy(NULL, &rko->rko_u.fetch.rkm);
		/* Decrease refcount on rkbuf to eventually rd_free shared buf*/
		if (rko->rko_u.fetch.rkbuf)
			rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY);

		break;

	case RD_KAFKA_OP_OFFSET_FETCH:
		if (rko->rko_u.offset_fetch.partitions &&
		    rko->rko_u.offset_fetch.do_free)
			rd_kafka_topic_partition_list_destroy(
				rko->rko_u.offset_fetch.partitions);
		break;

	case RD_KAFKA_OP_OFFSET_COMMIT:
		RD_IF_FREE(rko->rko_u.offset_commit.partitions,
			   rd_kafka_topic_partition_list_destroy);
                RD_IF_FREE(rko->rko_u.offset_commit.reason, rd_free);
		break;

	case RD_KAFKA_OP_SUBSCRIBE:
	case RD_KAFKA_OP_GET_SUBSCRIPTION:
		RD_IF_FREE(rko->rko_u.subscribe.topics,
			   rd_kafka_topic_partition_list_destroy);
		break;

	case RD_KAFKA_OP_ASSIGN:
	case RD_KAFKA_OP_GET_ASSIGNMENT:
		RD_IF_FREE(rko->rko_u.assign.partitions,
			   rd_kafka_topic_partition_list_destroy);
		break;

	case RD_KAFKA_OP_REBALANCE:
		RD_IF_FREE(rko->rko_u.rebalance.partitions,
			   rd_kafka_topic_partition_list_destroy);
		break;

	case RD_KAFKA_OP_NAME:
		RD_IF_FREE(rko->rko_u.name.str, rd_free);
		break;

	case RD_KAFKA_OP_ERR:
	case RD_KAFKA_OP_CONSUMER_ERR:
		RD_IF_FREE(rko->rko_u.err.errstr, rd_free);
		rd_kafka_msg_destroy(NULL, &rko->rko_u.err.rkm);
		break;

		break;

	case RD_KAFKA_OP_THROTTLE:
		RD_IF_FREE(rko->rko_u.throttle.nodename, rd_free);
		break;

	case RD_KAFKA_OP_STATS:
		RD_IF_FREE(rko->rko_u.stats.json, rd_free);
		break;

	case RD_KAFKA_OP_XMIT_RETRY:
	case RD_KAFKA_OP_XMIT_BUF:
	case RD_KAFKA_OP_RECV_BUF:
		if (rko->rko_u.xbuf.rkbuf)
			rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY);

		RD_IF_FREE(rko->rko_u.xbuf.rkbuf, rd_kafka_buf_destroy);
		break;

	case RD_KAFKA_OP_DR:
		rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq);
		if (rko->rko_u.dr.do_purge2)
			rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq2);

		if (rko->rko_u.dr.s_rkt)
			rd_kafka_topic_destroy0(rko->rko_u.dr.s_rkt);
		break;

	case RD_KAFKA_OP_OFFSET_RESET:
		RD_IF_FREE(rko->rko_u.offset_reset.reason, rd_free);
		break;

        case RD_KAFKA_OP_METADATA:
                RD_IF_FREE(rko->rko_u.metadata.md, rd_kafka_metadata_destroy);
                break;

        case RD_KAFKA_OP_LOG:
                rd_free(rko->rko_u.log.str);
                break;

	default:
		break;
	}

        if (rko->rko_type & RD_KAFKA_OP_CB && rko->rko_op_cb) {
                rd_kafka_op_res_t res;
                /* Let callback clean up */
                rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY;
                res = rko->rko_op_cb(rko->rko_rk, NULL, rko);
                assert(res != RD_KAFKA_OP_RES_YIELD);
        }

	RD_IF_FREE(rko->rko_rktp, rd_kafka_toppar_destroy);

	rd_kafka_replyq_destroy(&rko->rko_replyq);

#if ENABLE_DEVEL
        if (rd_atomic32_sub(&rd_kafka_op_cnt, 1) < 0)
                rd_kafka_assert(NULL, !*"rd_kafka_op_cnt < 0");
#endif

	rd_free(rko);
}
Beispiel #23
0
/* @remark This test will fail if auto topic creation is enabled on the broker
 * since the client will issue a topic-creating metadata request to find
 * a new leader when the topic is removed.
 *
 * To run with trivup, do:
 * ./interactive_broker_version.py .. -conf '{"auto_create_topics":"false"}' ..
 * TESTS=0045 ./run-test.sh -k ./merged
 */
static void do_test_topic_remove (void) {
	char *topic_f = rd_strdup(test_mk_topic_name("topic_f", 1));
	char *topic_g = rd_strdup(test_mk_topic_name("topic_g", 1));
	int parts_f = 5;
	int parts_g = 9;
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_queue_t *queue;
	rd_kafka_topic_partition_list_t *topics;
	rd_kafka_resp_err_t err;

	/**
	 * Topic removal test:
	 * - Create topic f & g
	 * - Subscribe to f & g
	 * - Verify f & g assignment
	 * - Remove topic f
	 * - Verify g assignment
	 * - Remove topic g
	 * - Verify empty assignment
	 */
	TEST_SAY("Topic removal testing\n");
	test_conf_init(&conf, NULL, 60);

	/* Decrease metadata interval to speed up topic change discovery. */
	test_conf_set(conf, "metadata.max.age.ms", "5000");

	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
	rk = test_create_consumer(test_str_id_generate_tmp(),
				  NULL, conf, NULL, NULL);
	queue = rd_kafka_queue_get_consumer(rk);

	TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_f);
	test_create_topic(topic_f, parts_f, 1);

	TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_g);
	test_create_topic(topic_g, parts_g, 1);

	rd_sleep(1); // FIXME: do check&wait loop instead

	TEST_SAY("Topic removal: Subscribing to %s & %s\n", topic_f, topic_g);
	topics = rd_kafka_topic_partition_list_new(2);
	rd_kafka_topic_partition_list_add(topics, topic_f, RD_KAFKA_PARTITION_UA);
	rd_kafka_topic_partition_list_add(topics, topic_g, RD_KAFKA_PARTITION_UA);
	err = rd_kafka_subscribe(rk, topics);
	TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR,
		    "%s", rd_kafka_err2str(err));
	rd_kafka_topic_partition_list_destroy(topics);

	await_assignment("Topic removal: both topics exist", rk, queue, 2,
			 topic_f, parts_f,
			 topic_g, parts_g);

	TEST_SAY("Topic removal: removing %s\n", topic_f);
	test_kafka_topics("--delete --topic %s", topic_f);

	await_revoke("Topic removal: rebalance after topic removal", rk, queue);

	await_assignment("Topic removal: one topic exists", rk, queue, 1,
			 topic_g, parts_g);
	
	TEST_SAY("Topic removal: removing %s\n", topic_g);
	test_kafka_topics("--delete --topic %s", topic_g);

	await_revoke("Topic removal: rebalance after 2nd topic removal",
		     rk, queue);

	/* Should not see another rebalance since all topics now removed */
	await_no_rebalance("Topic removal: empty", rk, queue, 10000);

	test_consumer_close(rk);
	rd_kafka_queue_destroy(queue);
	rd_kafka_destroy(rk);

	rd_free(topic_f);
	rd_free(topic_g);
}
int main_0029_assign_offset (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_topic_partition_list_t *parts;
        uint64_t testid;
	int i;
	test_timing_t t_simple, t_hl;
	test_msgver_t mv;

	test_conf_init(NULL, &tconf, 20 + (test_session_timeout_ms * 3 / 1000));
	test_topic_conf_set(tconf, "auto.offset.reset", "smallest");

	/* Produce X messages to Y partitions so we get a 
	 * nice seekable 0..X offset one each partition. */
        /* Produce messages */
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	parts = rd_kafka_topic_partition_list_new(partitions);

	for (i = 0 ; i < partitions ; i++) {
		test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0);
		rd_kafka_topic_partition_list_add(parts, topic, i)->offset =
			msgcnt / 2;
	}

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);


	/* Simple consumer */
	TIMING_START(&t_simple, "SIMPLE.CONSUMER");
	rk = test_create_consumer(topic, NULL, NULL, NULL);
	test_msgver_init(&mv, testid);
	test_consumer_assign("SIMPLE.ASSIGN", rk, parts);
	test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0,
			   partitions * (msgcnt / 2), &mv);
	for (i = 0 ; i < partitions ; i++)
		test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
					topic, i, msgcnt/2, msgcnt/2);
	test_msgver_clear(&mv);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_simple);

	rd_kafka_topic_partition_list_destroy(parts);
	

	/* High-level consumer.
	 * Offsets are set in rebalance callback. */
	TIMING_START(&t_hl, "HL.CONSUMER");
	test_msgver_init(&mv, testid);
	rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
	test_consumer_subscribe(rk, topic);
	test_consumer_poll("HL.CONSUME", rk, testid, -1, 0,
			   partitions * (msgcnt / 2), &mv);
	for (i = 0 ; i < partitions ; i++)
		test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
					topic, i, msgcnt/2, msgcnt/2);
	test_msgver_clear(&mv);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_hl);

	rd_kafka_topic_conf_destroy(tconf);

        return 0;
}
Beispiel #25
0
int main_0029_assign_offset (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_topic_partition_list_t *parts;
        uint64_t testid;
	int i;
	test_timing_t t_simple, t_hl;
	test_msgver_t mv;

	test_conf_init(NULL, NULL, 20 + (test_session_timeout_ms * 3 / 1000));

	/* Produce X messages to Y partitions so we get a 
	 * nice seekable 0..X offset one each partition. */
        /* Produce messages */
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	parts = rd_kafka_topic_partition_list_new(partitions);

	for (i = 0 ; i < partitions ; i++) {
		test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0);
		/* Set start offset */
		rd_kafka_topic_partition_list_add(parts, topic, i)->offset =
			msgcnt / 2;
	}

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);


	/* Simple consumer */
	TIMING_START(&t_simple, "SIMPLE.CONSUMER");
	rk = test_create_consumer(topic, NULL, NULL, NULL);
	test_msgver_init(&mv, testid);
	test_consumer_assign("SIMPLE.ASSIGN", rk, parts);
	test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0,
			   partitions * (msgcnt / 2), &mv);
	for (i = 0 ; i < partitions ; i++)
		test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
					topic, i, msgcnt/2, msgcnt/2);
	test_msgver_clear(&mv);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_simple);

	rd_kafka_topic_partition_list_destroy(parts);
	

	/* High-level consumer: method 1
	 * Offsets are set in rebalance callback. */
	if (test_broker_version >= TEST_BRKVER(0,9,0,0)) {
		reb_method = REB_METHOD_1;
		TIMING_START(&t_hl, "HL.CONSUMER");
		test_msgver_init(&mv, testid);
		rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
		test_consumer_subscribe(rk, topic);
		test_consumer_poll("HL.CONSUME", rk, testid, -1, 0,
				   partitions * (msgcnt / 2), &mv);
		for (i = 0 ; i < partitions ; i++)
			test_msgver_verify_part("HL.MSGS", &mv,
						TEST_MSGVER_ALL_PART,
						topic, i, msgcnt/2, msgcnt/2);
		test_msgver_clear(&mv);
		test_consumer_close(rk);
		rd_kafka_destroy(rk);
		TIMING_STOP(&t_hl);


		/* High-level consumer: method 2:
		 * first two partitions are with fixed absolute offset, rest are
		 * auto offset (stored, which is now at end). 
		 * Offsets are set in rebalance callback. */
		reb_method = REB_METHOD_2;
		TIMING_START(&t_hl, "HL.CONSUMER2");
		test_msgver_init(&mv, testid);
		rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
		test_consumer_subscribe(rk, topic);
		test_consumer_poll("HL.CONSUME2", rk, testid, partitions, 0,
				   2 * (msgcnt / 2), &mv);
		for (i = 0 ; i < partitions ; i++) {
			if (i < 2)
				test_msgver_verify_part("HL.MSGS2.A", &mv,
							TEST_MSGVER_ALL_PART,
							topic, i, msgcnt/2,
							msgcnt/2);
		}
		test_msgver_clear(&mv);
		test_consumer_close(rk);
		rd_kafka_destroy(rk);
		TIMING_STOP(&t_hl);
	}

        return 0;
}
Beispiel #26
0
int main_0019_list_groups (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
#define _CONS_CNT 2
        char *groups[_CONS_CNT];
	rd_kafka_t *rk, *rk_c[_CONS_CNT];
	rd_kafka_topic_partition_list_t *topics;
	rd_kafka_resp_err_t err;
        test_timing_t t_grps;
	int i;
        int groups_seen;
	rd_kafka_topic_t *rkt;

        /* Handle for group listings */
        rk = test_create_producer();

	/* Produce messages so that topic is auto created */
	rkt = test_create_topic_object(rk, topic, NULL);
	test_produce_msgs(rk, rkt, 0, 0, 0, 10, NULL, 64);
	rd_kafka_topic_destroy(rkt);

        /* Query groups before creation, should not list our groups. */
        groups_seen = list_groups(rk, NULL, 0, "should be none");
        if (groups_seen != 0)
                TEST_FAIL("Saw %d groups when there wasn't "
                          "supposed to be any\n", groups_seen);

	/* Fill in topic subscription set */
	topics = rd_kafka_topic_partition_list_new(1);
	rd_kafka_topic_partition_list_add(topics, topic, -1);

	/* Create consumers and start subscription */
	for (i = 0 ; i < _CONS_CNT ; i++) {
                groups[i] = malloc(32);
                test_str_id_generate(groups[i], 32);
		rk_c[i] = test_create_consumer(groups[i],
					       NULL, NULL, NULL);

		err = rd_kafka_poll_set_consumer(rk_c[i]);
		if (err)
			TEST_FAIL("poll_set_consumer: %s\n",
				  rd_kafka_err2str(err));

		err = rd_kafka_subscribe(rk_c[i], topics);
		if (err)
			TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err));
	}

        rd_kafka_topic_partition_list_destroy(topics);


        TIMING_START(&t_grps, "WAIT.GROUPS");
        /* Query groups again until both groups are seen. */
        while (1) {
                int groups_seen = list_groups(rk, (char **)groups, _CONS_CNT,
                                              "should see my groups");
                if (groups_seen == _CONS_CNT)
                        break;
                rd_sleep(1);
        }
        TIMING_STOP(&t_grps);


	TEST_SAY("Closing remaining consumers\n");
	for (i = 0 ; i < _CONS_CNT ; i++) {
		test_timing_t t_close;
		if (!rk_c[i])
			continue;

		TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i]));
		TIMING_START(&t_close, "CONSUMER.CLOSE");
		err = rd_kafka_consumer_close(rk_c[i]);
		TIMING_STOP(&t_close);
		if (err)
			TEST_FAIL("consumer_close failed: %s\n",
				  rd_kafka_err2str(err));

		rd_kafka_destroy(rk_c[i]);
		rk_c[i] = NULL;

                free(groups[i]);
	}

        rd_kafka_destroy(rk);

        return 0;
}
int main (int argc, char **argv) {
        char mode = 'C';
	char *brokers = "localhost:9092";
	int opt;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *topic_conf;
	char errstr[512];
	const char *debug = NULL;
	int do_conf_dump = 0;
	char tmp[16];
        rd_kafka_resp_err_t err;
        char *group = NULL;
        rd_kafka_topic_partition_list_t *topics;
        int i;

	quiet = !isatty(STDIN_FILENO);

	/* Kafka configuration */
	conf = rd_kafka_conf_new();

        /* Set logger */
        rd_kafka_conf_set_log_cb(conf, logger);

	/* Quick termination */
	snprintf(tmp, sizeof(tmp), "%i", SIGIO);
	rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);

	/* Topic configuration */
	topic_conf = rd_kafka_topic_conf_new();

	while ((opt = getopt(argc, argv, "g:b:qd:eX:As:DO")) != -1) {
		switch (opt) {
		case 'b':
			brokers = optarg;
			break;
                case 'g':
                        group = optarg;
                        break;
		case 'e':
			exit_eof = 1;
			break;
		case 'd':
			debug = optarg;
			break;
		case 'q':
			quiet = 1;
			break;
		case 'A':
			output = OUTPUT_RAW;
			break;
		case 'X':
		{
			char *name, *val;
			rd_kafka_conf_res_t res;

			if (!strcmp(optarg, "list") ||
			    !strcmp(optarg, "help")) {
				rd_kafka_conf_properties_show(stdout);
				exit(0);
			}

			if (!strcmp(optarg, "dump")) {
				do_conf_dump = 1;
				continue;
			}

			name = optarg;
			if (!(val = strchr(name, '='))) {
				fprintf(stderr, "%% Expected "
					"-X property=value, not %s\n", name);
				exit(1);
			}

			*val = '\0';
			val++;

			res = RD_KAFKA_CONF_UNKNOWN;
			/* Try "topic." prefixed properties on topic
			 * conf first, and then fall through to global if
			 * it didnt match a topic configuration property. */
			if (!strncmp(name, "topic.", strlen("topic.")))
				res = rd_kafka_topic_conf_set(topic_conf,
							      name+
							      strlen("topic."),
							      val,
							      errstr,
							      sizeof(errstr));

			if (res == RD_KAFKA_CONF_UNKNOWN)
				res = rd_kafka_conf_set(conf, name, val,
							errstr, sizeof(errstr));

			if (res != RD_KAFKA_CONF_OK) {
				fprintf(stderr, "%% %s\n", errstr);
				exit(1);
			}
		}
		break;

                case 'D':
                case 'O':
                        mode = opt;
                        break;

		default:
			goto usage;
		}
	}


	if (do_conf_dump) {
		const char **arr;
		size_t cnt;
		int pass;

		for (pass = 0 ; pass < 2 ; pass++) {
			if (pass == 0) {
				arr = rd_kafka_conf_dump(conf, &cnt);
				printf("# Global config\n");
			} else {
				printf("# Topic config\n");
				arr = rd_kafka_topic_conf_dump(topic_conf,
							       &cnt);
			}

			for (i = 0 ; i < (int)cnt ; i += 2)
				printf("%s = %s\n",
				       arr[i], arr[i+1]);

			printf("\n");

			rd_kafka_conf_dump_free(arr, cnt);
		}

		exit(0);
	}


	if (strchr("OC", mode) && optind == argc) {
	usage:
		fprintf(stderr,
			"Usage: %s [options] <topic[:part]> <topic[:part]>..\n"
			"\n"
			"librdkafka version %s (0x%08x)\n"
			"\n"
			" Options:\n"
                        "  -g <group>      Consumer group (%s)\n"
			"  -b <brokers>    Broker address (%s)\n"
			"  -e              Exit consumer when last message\n"
			"                  in partition has been received.\n"
                        "  -D              Describe group.\n"
                        "  -O              Get commmitted offset(s)\n"
			"  -d [facs..]     Enable debugging contexts:\n"
			"                  %s\n"
			"  -q              Be quiet\n"
			"  -A              Raw payload output (consumer)\n"
			"  -X <prop=name> Set arbitrary librdkafka "
			"configuration property\n"
			"               Properties prefixed with \"topic.\" "
			"will be set on topic object.\n"
			"               Use '-X list' to see the full list\n"
			"               of supported properties.\n"
			"\n"
			"\n",
			argv[0],
			rd_kafka_version_str(), rd_kafka_version(),
                        group, brokers,
			RD_KAFKA_DEBUG_CONTEXTS);
		exit(1);
	}


	signal(SIGINT, stop);
	signal(SIGUSR1, sig_usr1);

	if (debug &&
	    rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) !=
	    RD_KAFKA_CONF_OK) {
		fprintf(stderr, "%% Debug configuration failed: %s: %s\n",
			errstr, debug);
		exit(1);
	}

        /*
         * Client/Consumer group
         */

        if (strchr("CO", mode)) {
                /* Consumer groups require a group id */
                if (!group)
                        group = "rdkafka_consumer_example";
                if (rd_kafka_conf_set(conf, "group.id", group,
                                      errstr, sizeof(errstr)) !=
                    RD_KAFKA_CONF_OK) {
                        fprintf(stderr, "%% %s\n", errstr);
                        exit(1);
                }

                /* Consumer groups always use broker based offset storage */
                if (rd_kafka_topic_conf_set(topic_conf, "offset.store.method",
                                            "broker",
                                            errstr, sizeof(errstr)) !=
                    RD_KAFKA_CONF_OK) {
                        fprintf(stderr, "%% %s\n", errstr);
                        exit(1);
                }

                /* Set default topic config for pattern-matched topics. */
                rd_kafka_conf_set_default_topic_conf(conf, topic_conf);

                /* Callback called on partition assignment changes */
                rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
        }

        /* Create Kafka handle */
        if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
                                errstr, sizeof(errstr)))) {
                fprintf(stderr,
                        "%% Failed to create new consumer: %s\n",
                        errstr);
                exit(1);
        }

        rd_kafka_set_log_level(rk, LOG_DEBUG);

        /* Add brokers */
        if (rd_kafka_brokers_add(rk, brokers) == 0) {
                fprintf(stderr, "%% No valid brokers specified\n");
                exit(1);
        }


        if (mode == 'D') {
                int r;
                /* Describe groups */
                r = describe_groups(rk, group);

                rd_kafka_destroy(rk);
                exit(r == -1 ? 1 : 0);
        }

        /* Redirect rd_kafka_poll() to consumer_poll() */
        rd_kafka_poll_set_consumer(rk);

        topics = rd_kafka_topic_partition_list_new(argc - optind);
        for (i = optind ; i < argc ; i++) {
                /* Parse "topic[:part] */
                char *topic = argv[i];
                char *t;
                int32_t partition = -1;

                if ((t = strstr(topic, ":"))) {
                        *t = '\0';
                        partition = atoi(t+1);
                }

                rd_kafka_topic_partition_list_add(topics, topic, partition);
        }

        if (mode == 'O') {
                /* Offset query */

                err = rd_kafka_position(rk, topics, 5000);
                if (err) {
                        fprintf(stderr, "%% Failed to fetch offsets: %s\n",
                                rd_kafka_err2str(err));
                        exit(1);
                }

                for (i = 0 ; i < topics->cnt ; i++) {
                        rd_kafka_topic_partition_t *p = &topics->elems[i];
                        printf("Topic \"%s\" partition %"PRId32,
                               p->topic, p->partition);
                        if (p->err)
                                printf(" error %s",
                                       rd_kafka_err2str(p->err));
                        else {
                                printf(" offset %"PRId64"",
                                       p->offset);

                                if (p->metadata_size)
                                        printf(" (%d bytes of metadata)",
                                               (int)p->metadata_size);
                        }
                        printf("\n");
                }

                goto done;
        }


        if ((err = rd_kafka_subscribe(rk, topics))) {
                fprintf(stderr, "%% Failed to start consuming topics: %s\n",
                        rd_kafka_err2str(err));
                exit(1);
        }

        while (run) {
                rd_kafka_message_t *rkmessage;

                rkmessage = rd_kafka_consumer_poll(rk, 1000);
                if (rkmessage) {
                        msg_consume(rkmessage, NULL);
                        rd_kafka_message_destroy(rkmessage);
                }
        }

done:
        err = rd_kafka_consumer_close(rk);
        if (err)
                fprintf(stderr, "%% Failed to close consumer: %s\n",
                        rd_kafka_err2str(err));
        else
                fprintf(stderr, "%% Consumer closed\n");

        rd_kafka_topic_partition_list_destroy(topics);

        /* Destroy handle */
        rd_kafka_destroy(rk);

	/* Let background threads clean up and terminate cleanly. */
	run = 5;
	while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
		printf("Waiting for librdkafka to decommission\n");
	if (run <= 0)
		rd_kafka_dump(stdout, rk);

	return 0;
}
Beispiel #28
0
static void do_offset_test (const char *what, int auto_commit, int auto_store,
			    int async) {
	test_timing_t t_all;
	char groupid[64];
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	int cnt = 0;
	const int extra_cnt = 5;
	rd_kafka_resp_err_t err;
	rd_kafka_topic_partition_list_t *parts;
	rd_kafka_topic_partition_t *rktpar;
	int64_t next_offset = -1;

	test_conf_init(&conf, &tconf, 20);
	test_conf_set(conf, "enable.auto.commit", auto_commit ? "true":"false");
	test_conf_set(conf, "enable.auto.offset.store", auto_store ?"true":"false");
	test_conf_set(conf, "auto.commit.interval.ms", "500");
	rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
	test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
	test_str_id_generate(groupid, sizeof(groupid));
	test_conf_set(conf, "group.id", groupid);
	rd_kafka_conf_set_default_topic_conf(conf, tconf);

	TEST_SAY(_C_MAG "[ do_offset_test: %s with group.id %s ]\n",
		 what, groupid);

	TIMING_START(&t_all, what);

	expected_offset  = 0;
	committed_offset = -1;

	/* MO:
	 *  - Create consumer.
	 *  - Start consuming from beginning
	 *  - Perform store & commits according to settings
	 *  - Stop storing&committing when half of the messages are consumed,
	 *  - but consume 5 more to check against.
	 *  - Query position.
	 *  - Destroy consumer.
	 *  - Create new consumer with same group.id using stored offsets
	 *  - Should consume the expected message.
	 */

	/* Create kafka instance */
	rk = test_create_handle(RD_KAFKA_CONSUMER, rd_kafka_conf_dup(conf));

	rd_kafka_poll_set_consumer(rk);

	test_consumer_subscribe(rk, topic);

	while (cnt - extra_cnt < msgcnt / 2) {
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, 10*1000);
		if (!rkm)
			continue;

		if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
			TEST_FAIL("%s: Timed out waiting for message %d", what,cnt);
		else if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			rd_kafka_message_destroy(rkm);
			continue;
		} else if (rkm->err)
			TEST_FAIL("%s: Consumer error: %s",
				  what, rd_kafka_message_errstr(rkm));

		/* Offset of next message. */
		next_offset = rkm->offset + 1;

		if (cnt < msgcnt / 2) {
			if (!auto_store) {
				err = rd_kafka_offset_store(rkm->rkt,rkm->partition,
							    rkm->offset);
				if (err)
					TEST_FAIL("%s: offset_store failed: %s\n",
						  what, rd_kafka_err2str(err));
			}
			expected_offset = rkm->offset+1;
			if (!auto_commit) {
				test_timing_t t_commit;
				TIMING_START(&t_commit,
					     async?"commit.async":"commit.sync");
				err = rd_kafka_commit_message(rk, rkm, async);
				TIMING_STOP(&t_commit);
				if (err)
					TEST_FAIL("%s: commit failed: %s\n",
						  what, rd_kafka_err2str(err));
			}

		} else if (auto_store && auto_commit)
			expected_offset = rkm->offset+1;

		rd_kafka_message_destroy(rkm);
		cnt++;
	}

	TEST_SAY("%s: done consuming after %d messages, at offset %"PRId64"\n",
		 what, cnt, expected_offset);

	if ((err = rd_kafka_assignment(rk, &parts)))
		TEST_FAIL("%s: failed to get assignment(): %s\n",
			  what, rd_kafka_err2str(err));

	/* Verify position */
	if ((err = rd_kafka_position(rk, parts)))
		TEST_FAIL("%s: failed to get position(): %s\n",
			  what, rd_kafka_err2str(err));
	if (!(rktpar = rd_kafka_topic_partition_list_find(parts,
							  topic, partition)))
		TEST_FAIL("%s: position(): topic lost\n", what);
	if (rktpar->offset != next_offset)
		TEST_FAIL("%s: Expected position() offset %"PRId64", got %"PRId64,
			  what, next_offset, rktpar->offset);
	TEST_SAY("%s: Position is at %"PRId64", good!\n",
		 what, rktpar->offset);

	/* Pause messages while waiting so we can serve callbacks
	 * without having more messages received. */
	if ((err = rd_kafka_pause_partitions(rk, parts)))
		TEST_FAIL("%s: failed to pause partitions: %s\n",
			  what, rd_kafka_err2str(err));
	rd_kafka_topic_partition_list_destroy(parts);

	/* Fire off any enqueued offset_commit_cb */
	test_consumer_poll_no_msgs(what, rk, testid, 0);

	TEST_SAY("%s: committed_offset %"PRId64", expected_offset %"PRId64"\n",
		 what, committed_offset, expected_offset);

	if (!auto_commit && !async) {
		/* Sync commits should be up to date at this point. */
		if (committed_offset != expected_offset)
			TEST_FAIL("%s: Sync commit: committed offset %"PRId64
				  " should be same as expected offset "
				  "%"PRId64,
				  what, committed_offset, expected_offset);
	} else {

		/* Wait for offset commits to catch up */
		while (committed_offset < expected_offset) {
			TEST_SAYL(3, "%s: Wait for committed offset %"PRId64
				  " to reach expected offset %"PRId64"\n",
				  what, committed_offset, expected_offset);
			test_consumer_poll_no_msgs(what, rk, testid, 1000);
		}

	}

	TEST_SAY("%s: phase 1 complete, %d messages consumed, "
		 "next expected offset is %"PRId64"\n",
		 what, cnt, expected_offset);

        /* Issue #827: cause committed() to return prematurely by specifying
         *             low timeout. The bug (use after free) will only
         *             be catched by valgrind. */
        do {
                parts = rd_kafka_topic_partition_list_new(1);
                rd_kafka_topic_partition_list_add(parts, topic, partition);
                err = rd_kafka_committed(rk, parts, 1);
                rd_kafka_topic_partition_list_destroy(parts);
                TEST_SAY("Issue #827: committed() returned %s\n",
                         rd_kafka_err2str(err));
        } while (err != RD_KAFKA_RESP_ERR__TIMED_OUT);

	/* Query position */
	parts = rd_kafka_topic_partition_list_new(1);
	rd_kafka_topic_partition_list_add(parts, topic, partition);

	err = rd_kafka_committed(rk, parts, tmout_multip(5*1000));
	if (err)
		TEST_FAIL("%s: committed() failed: %s", what, rd_kafka_err2str(err));
	if (!(rktpar = rd_kafka_topic_partition_list_find(parts,
							  topic, partition)))
		TEST_FAIL("%s: committed(): topic lost\n", what);
	if (rktpar->offset != expected_offset)
		TEST_FAIL("%s: Expected committed() offset %"PRId64", got %"PRId64,
			  what, expected_offset, rktpar->offset);
	TEST_SAY("%s: Committed offset is at %"PRId64", good!\n",
		 what, rktpar->offset);

	rd_kafka_topic_partition_list_destroy(parts);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);



	/* Fire up a new consumer and continue from where we left off. */
	TEST_SAY("%s: phase 2: starting new consumer to resume consumption\n",what);
	rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
	rd_kafka_poll_set_consumer(rk);

	test_consumer_subscribe(rk, topic);

	while (cnt < msgcnt) {
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, 10*1000);
		if (!rkm)
			continue;

		if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
			TEST_FAIL("%s: Timed out waiting for message %d", what,cnt);
		else if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			rd_kafka_message_destroy(rkm);
			continue;
		} else if (rkm->err)
			TEST_FAIL("%s: Consumer error: %s",
				  what, rd_kafka_message_errstr(rkm));

		if (rkm->offset != expected_offset)
			TEST_FAIL("%s: Received message offset %"PRId64
				  ", expected %"PRId64" at msgcnt %d/%d\n",
				  what, rkm->offset, expected_offset,
				  cnt, msgcnt);

		rd_kafka_message_destroy(rkm);
		expected_offset++;
		cnt++;
	}


	TEST_SAY("%s: phase 2: complete\n", what);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	

	TIMING_STOP(&t_all);
}
static int nonexist_part (void) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_t *rk;
	rd_kafka_topic_partition_list_t *parts;
	rd_kafka_resp_err_t err;
        test_timing_t t_pos;
        const int msgcnt = 1000;
        uint64_t testid;
        int i;
	int it, iterations = 5;

        /* Produce messages */
        testid = test_produce_msgs_easy(topic, 0,
                                        RD_KAFKA_PARTITION_UA, msgcnt);

	for (it = 0 ; it < iterations ; it++) {
		char group_id[32];

		test_str_id_generate(group_id, sizeof(group_id));

		TEST_SAY("Iteration %d/%d, using group.id %s\n", it, iterations,
			 group_id);

		/* Consume messages */
		test_consume_msgs_easy(group_id, topic, testid, -1,
				       msgcnt, NULL);

		/*
		 * Now start a new consumer and query stored offsets (positions)
		 */

		rk = test_create_consumer(group_id, NULL, NULL, NULL, NULL);

		/* Fill in partition set */
		parts = rd_kafka_topic_partition_list_new(2);
		/* existing */
		rd_kafka_topic_partition_list_add(parts, topic, 0);
		/* non-existing */
		rd_kafka_topic_partition_list_add(parts, topic, 123);


		TIMING_START(&t_pos, "COMMITTED");
		err = rd_kafka_committed(rk, parts, tmout_multip(5000));
		TIMING_STOP(&t_pos);
		if (err)
			TEST_FAIL("Failed to acquire committed offsets: %s\n",
				  rd_kafka_err2str(err));

		for (i = 0 ; i < parts->cnt ; i++) {
			TEST_SAY("%s [%"PRId32"] returned offset %"PRId64
				 ": %s\n",
				 parts->elems[i].topic,
				 parts->elems[i].partition,
				 parts->elems[i].offset,
				 rd_kafka_err2str(parts->elems[i].err));
			if (parts->elems[i].partition == 0 &&
			    parts->elems[i].offset <= 0)
				TEST_FAIL("Partition %"PRId32" should have a "
					  "proper offset, not %"PRId64"\n",
					  parts->elems[i].partition,
					  parts->elems[i].offset);
			else if (parts->elems[i].partition == 123 &&
				 parts->elems[i].offset !=
				 RD_KAFKA_OFFSET_INVALID)
				TEST_FAIL("Partition %"PRId32
					  " should have failed\n",
					  parts->elems[i].partition);
		}

		rd_kafka_topic_partition_list_destroy(parts);

		test_consumer_close(rk);

		/* Hangs if bug isn't fixed */
		rd_kafka_destroy(rk);
	}

        return 0;
}
int main_0049_consume_conn_close (int argc, char **argv) {
        rd_kafka_t *rk;
        const char *topic = test_mk_topic_name("0049_consume_conn_close", 1);
        uint64_t testid;
        int msgcnt = test_on_ci ? 1000 : 10000;
        test_msgver_t mv;
        rd_kafka_conf_t *conf;
        rd_kafka_topic_conf_t *tconf;
        rd_kafka_topic_partition_list_t *assignment;
        rd_kafka_resp_err_t err;

        if (!test_conf_match(NULL, "sasl.mechanisms", "GSSAPI")) {
                TEST_SKIP("KNOWN ISSUE: ApiVersionRequest+SaslHandshake "
                          "will not play well with sudden disconnects\n");
                return 0;
        }

        test_conf_init(&conf, &tconf, 60);
        /* Want an even number so it is divisable by two without surprises */
        msgcnt = (msgcnt / (int)test_timeout_multiplier) & ~1;

        testid = test_id_generate();
        test_produce_msgs_easy(topic, testid, RD_KAFKA_PARTITION_UA, msgcnt);


        test_socket_enable(conf);
        test_curr->connect_cb = connect_cb;
        test_curr->is_fatal_cb = is_fatal_cb;

        test_topic_conf_set(tconf, "auto.offset.reset", "smallest");

        rk = test_create_consumer(topic, NULL, conf, tconf);

        test_consumer_subscribe(rk, topic);

        test_msgver_init(&mv, testid);

        test_consumer_poll("consume.up", rk, testid, -1, 0, msgcnt/2, &mv);

        err = rd_kafka_assignment(rk, &assignment);
        TEST_ASSERT(!err, "assignment() failed: %s", rd_kafka_err2str(err));
        TEST_ASSERT(assignment->cnt > 0, "empty assignment");

        TEST_SAY("Bringing down the network\n");

        TEST_LOCK();
        simulate_network_down = 1;
        TEST_UNLOCK();
        test_socket_close_all(test_curr, 1/*reinit*/);

        TEST_SAY("Waiting for session timeout to expire (6s), and then some\n");

        /* Commit an offset, which should fail, to trigger the offset commit
         * callback fallback (CONSUMER_ERR) */
        assignment->elems[0].offset = 123456789;
        TEST_SAY("Committing offsets while down, should fail eventually\n");
        err = rd_kafka_commit(rk, assignment, 1/*async*/);
        TEST_ASSERT(!err, "async commit failed: %s", rd_kafka_err2str(err));
        rd_kafka_topic_partition_list_destroy(assignment);

        rd_sleep(10);

        TEST_SAY("Bringing network back up\n");
        TEST_LOCK();
        simulate_network_down = 0;
        TEST_UNLOCK();

        TEST_SAY("Continuing to consume..\n");
        test_consumer_poll("consume.up2", rk, testid, -1, msgcnt/2, msgcnt/2,
                           &mv);

        test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER|TEST_MSGVER_DUP,
                           0, msgcnt);

        test_msgver_clear(&mv);

        test_consumer_close(rk);
        rd_kafka_destroy(rk);

        return 0;
}