コード例 #1
0
int main_0028_long_topicnames (int argc, char **argv) {
        const int msgcnt = 1000;
        uint64_t testid;
	char topic[256];
	rd_kafka_t *rk_c;

	memset(topic, 'a', sizeof(topic)-1);
	topic[sizeof(topic)-1] = '\0';

	strncpy(topic, test_mk_topic_name(topic, 1), sizeof(topic)-1);

	TEST_SAY("Using topic name of %d bytes: %s\n",
		 (int)strlen(topic), topic);

	/* Create topic */
	test_create_topic(topic, 1, 1);

	/* First try a non-verifying consumer. The consumer has been known
	 * to crash when the broker bug kicks in. */
	rk_c = test_create_consumer(topic, NULL, NULL, NULL, NULL);
	test_consumer_subscribe(rk_c, topic);
	test_consumer_poll_no_msgs("consume.nomsgs", rk_c, 0, 5000);
	test_consumer_close(rk_c);

        /* Produce messages */
        testid = test_produce_msgs_easy(topic, 0,
                                        RD_KAFKA_PARTITION_UA, msgcnt);

	/* Consume messages */
	test_consume_msgs_easy(NULL, topic, testid, msgcnt);

        return 0;
}
コード例 #2
0
int main_0030_offset_commit (int argc, char **argv) {

	topic = test_mk_topic_name(__FUNCTION__, 1);
	testid = test_produce_msgs_easy(topic, 0, partition, msgcnt);

	do_offset_test("AUTO.COMMIT & AUTO.STORE",
		       1 /* enable.auto.commit */,
		       1 /* enable.auto.offset.store */,
		       0 /* not used. */);

	do_offset_test("AUTO.COMMIT & MANUAL.STORE",
		       1 /* enable.auto.commit */,
		       0 /* enable.auto.offset.store */,
		       0 /* not used */);

	do_offset_test("MANUAL.COMMIT.ASYNC & AUTO.STORE",
		       0 /* enable.auto.commit */,
		       1 /* enable.auto.offset.store */,
		       1 /* async */);

	do_offset_test("MANUAL.COMMIT.SYNC & AUTO.STORE",
		       0 /* enable.auto.commit */,
		       1 /* enable.auto.offset.store */,
		       0 /* async */);

	do_offset_test("MANUAL.COMMIT.ASYNC & MANUAL.STORE",
		       0 /* enable.auto.commit */,
		       0 /* enable.auto.offset.store */,
		       1 /* sync */);

	do_offset_test("MANUAL.COMMIT.SYNC & MANUAL.STORE",
		       0 /* enable.auto.commit */,
		       0 /* enable.auto.offset.store */,
		       0 /* sync */);

	do_empty_commit();

	do_nonexist_commit();

        return 0;
}
コード例 #3
0
ファイル: 0093-holb.c プロジェクト: Whissi/librdkafka
int main_0093_holb_consumer (int argc, char **argv) {
        const char *topic = test_mk_topic_name("0093_holb_consumer", 1);
        int64_t testid;
        const int msgcnt = 100;
        struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT;
        rd_kafka_conf_t *conf;

        testid = test_id_generate();

        test_conf_init(&conf, NULL, 60);

        test_create_topic(topic, 1, 1);

        test_produce_msgs_easy(topic, testid, 0, msgcnt);

        test_conf_set(conf, "session.timeout.ms", "6000");
        test_conf_set(conf, "max.poll.interval.ms", "20000");
        test_conf_set(conf, "socket.timeout.ms", "3000");
        test_conf_set(conf, "auto.offset.reset", "earliest");
        /* Trigger other requests often */
        test_conf_set(conf, "topic.metadata.refresh.interval.ms", "500");
        rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);

        rd_kafka_conf_set_opaque(conf, &c[0]);
        c[0].rk = test_create_consumer(topic, NULL,
                                       rd_kafka_conf_dup(conf), NULL);

        rd_kafka_conf_set_opaque(conf, &c[1]);
        c[1].rk = test_create_consumer(topic, NULL, conf, NULL);

        test_consumer_subscribe(c[0].rk, topic);

        /* c0: assign */
        c[0].max_rebalance_cnt = 1;

        /* c1: none, hasn't joined yet */
        c[1].max_rebalance_cnt = 0;

        TEST_SAY("Waiting for c[0] assignment\n");
        while (1) {
                rd_kafka_topic_partition_list_t *parts = NULL;

                do_consume(&c[0], 1/*1s*/);

                if (rd_kafka_assignment(c[0].rk, &parts) !=
                    RD_KAFKA_RESP_ERR_NO_ERROR ||
                    !parts || parts->cnt == 0) {
                        if (parts)
                                rd_kafka_topic_partition_list_destroy(parts);
                        continue;
                }

                TEST_SAY("%s got assignment of %d partition(s)\n",
                         rd_kafka_name(c[0].rk), parts->cnt);
                rd_kafka_topic_partition_list_destroy(parts);
                break;
        }

        TEST_SAY("c[0] got assignment, consuming..\n");
        do_consume(&c[0], 5/*5s*/);

        TEST_SAY("Joining second consumer\n");
        test_consumer_subscribe(c[1].rk, topic);

        /* Just poll second consumer for 10s, the rebalance will not
         * finish until the first consumer polls */
        do_consume(&c[1], 10/*10s*/);

        /* c0: the next call to do_consume/poll will trigger
         *     its rebalance callback, first revoke then assign. */
        c[0].max_rebalance_cnt += 2;
        /* c1: first rebalance */
        c[1].max_rebalance_cnt++;

        TEST_SAY("Expected rebalances: c[0]: %d/%d, c[1]: %d/%d\n",
                 c[0].rebalance_cnt, c[0].max_rebalance_cnt,
                 c[1].rebalance_cnt, c[1].max_rebalance_cnt);

        /* Let rebalances kick in, then consume messages. */
        while (c[0].cnt + c[1].cnt < msgcnt) {
                do_consume(&c[0], 0);
                do_consume(&c[1], 0);
        }

        /* Allow the extra revoke rebalance on close() */
        c[0].max_rebalance_cnt++;
        c[1].max_rebalance_cnt++;

        test_consumer_close(c[0].rk);
        test_consumer_close(c[1].rk);

        rd_kafka_destroy(c[0].rk);
        rd_kafka_destroy(c[1].rk);

        return 0;
}
コード例 #4
0
int main_0089_max_poll_interval (int argc, char **argv) {
        const char *topic = test_mk_topic_name("0089_max_poll_interval", 1);
        uint64_t testid;
        const int msgcnt = 10;
        rd_kafka_t *c[2];
        rd_kafka_conf_t *conf;
        int64_t ts_next[2] = { 0, 0 };
        int64_t ts_exp_msg[2] = { 0, 0 };
        int cmsgcnt = 0;
        int i;
        int bad = -1;

        testid = test_id_generate();

        test_create_topic(topic, 1, 1);

        test_produce_msgs_easy(topic, testid, -1, msgcnt);

        test_conf_init(&conf, NULL, 60);

        test_conf_set(conf, "session.timeout.ms", "6000");
        test_conf_set(conf, "max.poll.interval.ms", "10000" /*10s*/);
        test_conf_set(conf, "auto.offset.reset", "earliest");

        c[0] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
        c[1] = test_create_consumer(topic, NULL, conf, NULL);

        test_consumer_subscribe(c[0], topic);
        test_consumer_subscribe(c[1], topic);

        while (1) {
                for (i = 0 ; i < 2 ; i++) {
                        int64_t now;
                        rd_kafka_message_t *rkm;

                        /* Consumer is "processing" */
                        if (ts_next[i] > test_clock())
                                continue;

                        rkm = rd_kafka_consumer_poll(c[i], 100);
                        if (!rkm)
                                continue;

                        if (rkm->err) {
                                TEST_WARN("Consumer %d error: %s: "
                                          "ignoring\n", i,
                                          rd_kafka_message_errstr(rkm));
                                continue;
                        }

                        now = test_clock();

                        cmsgcnt++;

                        TEST_SAY("Consumer %d received message (#%d) "
                                 "at offset %"PRId64"\n",
                                 i, cmsgcnt, rkm->offset);

                        if (ts_exp_msg[i]) {
                                /* This consumer is expecting a message
                                 * after a certain time, namely after the
                                 * rebalance following max.poll.. being
                                 * exceeded in the other consumer */
                                TEST_ASSERT(now > ts_exp_msg[i],
                                            "Consumer %d: did not expect "
                                            "message for at least %dms",
                                            i,
                                            (int)((ts_exp_msg[i] - now)/1000));
                                TEST_ASSERT(now < ts_exp_msg[i] + 10000*1000,
                                            "Consumer %d: expected message "
                                            "within 10s, not after %dms",
                                            i,
                                            (int)((now - ts_exp_msg[i])/1000));
                                TEST_SAY("Consumer %d: received message "
                                         "at offset %"PRId64
                                         " after rebalance\n",
                                         i, rkm->offset);

                                rd_kafka_message_destroy(rkm);
                                goto done;

                        } else if (cmsgcnt == 1) {
                                /* Process this message for 20s */
                                ts_next[i] = now + (20000 * 1000);

                                /* Exp message on other consumer after
                                 * max.poll.interval.ms */
                                ts_exp_msg[i^1] = now + (10000 * 1000);

                                /* This is the bad consumer */
                                bad = i;

                                TEST_SAY("Consumer %d processing message at "
                                         "offset %"PRId64"\n",
                                         i, rkm->offset);
                                rd_kafka_message_destroy(rkm);
                        } else {
                                rd_kafka_message_destroy(rkm);

                                TEST_FAIL("Consumer %d did not expect "
                                          "a message", i);
                        }
                }
        }

 done:

        TEST_ASSERT(bad != -1, "Bad consumer not set");

        /* Wait for error ERR__MAX_POLL_EXCEEDED on the bad consumer. */
        while (1) {
                rd_kafka_message_t *rkm;

                rkm = rd_kafka_consumer_poll(c[bad], 1000);
                TEST_ASSERT(rkm, "Expected consumer result within 1s");

                TEST_ASSERT(rkm->err, "Did not expect message on bad consumer");

                TEST_SAY("Consumer error: %s: %s\n",
                         rd_kafka_err2name(rkm->err),
                         rd_kafka_message_errstr(rkm));

                if (rkm->err == RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) {
                        rd_kafka_message_destroy(rkm);
                        break;
                }

                rd_kafka_message_destroy(rkm);
        }


        for (i = 0 ; i < 2 ; i++)
                rd_kafka_destroy_flags(c[i],
                                       RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE);
        return 0;
}
コード例 #5
0
int main_0049_consume_conn_close (int argc, char **argv) {
        rd_kafka_t *rk;
        const char *topic = test_mk_topic_name("0049_consume_conn_close", 1);
        uint64_t testid;
        int msgcnt = test_on_ci ? 1000 : 10000;
        test_msgver_t mv;
        rd_kafka_conf_t *conf;
        rd_kafka_topic_conf_t *tconf;
        rd_kafka_topic_partition_list_t *assignment;
        rd_kafka_resp_err_t err;

        if (!test_conf_match(NULL, "sasl.mechanisms", "GSSAPI")) {
                TEST_SKIP("KNOWN ISSUE: ApiVersionRequest+SaslHandshake "
                          "will not play well with sudden disconnects\n");
                return 0;
        }

        test_conf_init(&conf, &tconf, 60);
        /* Want an even number so it is divisable by two without surprises */
        msgcnt = (msgcnt / (int)test_timeout_multiplier) & ~1;

        testid = test_id_generate();
        test_produce_msgs_easy(topic, testid, RD_KAFKA_PARTITION_UA, msgcnt);


        test_socket_enable(conf);
        test_curr->connect_cb = connect_cb;
        test_curr->is_fatal_cb = is_fatal_cb;

        test_topic_conf_set(tconf, "auto.offset.reset", "smallest");

        rk = test_create_consumer(topic, NULL, conf, tconf);

        test_consumer_subscribe(rk, topic);

        test_msgver_init(&mv, testid);

        test_consumer_poll("consume.up", rk, testid, -1, 0, msgcnt/2, &mv);

        err = rd_kafka_assignment(rk, &assignment);
        TEST_ASSERT(!err, "assignment() failed: %s", rd_kafka_err2str(err));
        TEST_ASSERT(assignment->cnt > 0, "empty assignment");

        TEST_SAY("Bringing down the network\n");

        TEST_LOCK();
        simulate_network_down = 1;
        TEST_UNLOCK();
        test_socket_close_all(test_curr, 1/*reinit*/);

        TEST_SAY("Waiting for session timeout to expire (6s), and then some\n");

        /* Commit an offset, which should fail, to trigger the offset commit
         * callback fallback (CONSUMER_ERR) */
        assignment->elems[0].offset = 123456789;
        TEST_SAY("Committing offsets while down, should fail eventually\n");
        err = rd_kafka_commit(rk, assignment, 1/*async*/);
        TEST_ASSERT(!err, "async commit failed: %s", rd_kafka_err2str(err));
        rd_kafka_topic_partition_list_destroy(assignment);

        rd_sleep(10);

        TEST_SAY("Bringing network back up\n");
        TEST_LOCK();
        simulate_network_down = 0;
        TEST_UNLOCK();

        TEST_SAY("Continuing to consume..\n");
        test_consumer_poll("consume.up2", rk, testid, -1, msgcnt/2, msgcnt/2,
                           &mv);

        test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER|TEST_MSGVER_DUP,
                           0, msgcnt);

        test_msgver_clear(&mv);

        test_consumer_close(rk);
        rd_kafka_destroy(rk);

        return 0;
}
コード例 #6
0
static int do_test_consume_batch (void) {
#define topic_cnt 2
	const char *topics[topic_cnt] = {
                test_mk_topic_name(__FUNCTION__, 1),
                test_mk_topic_name(__FUNCTION__, 1)
        };
        const int partition_cnt = 2;
	rd_kafka_t *rk;
        rd_kafka_queue_t *rkq;
        rd_kafka_topic_t *rkts[topic_cnt];
	rd_kafka_resp_err_t err;
        const int msgcnt = 10000;
        uint64_t testid;
        int i, p;
        int batch_cnt = 0;
        int remains;

        testid = test_id_generate();

        /* Produce messages */
        for (i = 0 ; i < topic_cnt ; i++) {
                for (p = 0 ; p < partition_cnt ; p++)
                        test_produce_msgs_easy(topics[i], testid, p,
                                               msgcnt / topic_cnt /
                                               partition_cnt);
        }


        /* Create simple consumer */
        rk = test_create_consumer(NULL, NULL, NULL, NULL);

        /* Create generic consume queue */
        rkq = rd_kafka_queue_new(rk);

        for (i = 0 ; i < topic_cnt ; i++) {
                /* Create topic object */
                rkts[i] = test_create_topic(rk, topics[i],
                                            "auto.offset.reset", "smallest",
                                            NULL);

                /* Start consuming each partition and redirect
                 * messages to queue */

                TEST_SAY("Start consuming topic %s partitions 0..%d\n",
                         rd_kafka_topic_name(rkts[i]), partition_cnt);

                for (p = 0 ; p < partition_cnt ; p++) {
                        err = rd_kafka_consume_start_queue(
                                rkts[i], p, RD_KAFKA_OFFSET_BEGINNING, rkq);
                        if (err)
                                TEST_FAIL("Failed to start consuming: %s\n",
                                          rd_kafka_err2str(err));
                }
        }

        remains = msgcnt;

        /* Consume messages from common queue using batch interface. */
        TEST_SAY("Consume %d messages from queue\n", remains);
        while (remains > 0) {
                rd_kafka_message_t *rkmessage[1000];
                ssize_t r;
                test_timing_t t_batch;

                TIMING_START(&t_batch, "CONSUME.BATCH");
                r = rd_kafka_consume_batch_queue(rkq, 1000, rkmessage, 1000);
                TIMING_STOP(&t_batch);

                TEST_SAY("Batch consume iteration #%d: Consumed %"PRIdsz
                         "/1000 messages\n", batch_cnt, r);

                if (r == -1)
                        TEST_FAIL("Failed to consume messages: %s\n",
                                  rd_kafka_err2str(rd_kafka_errno2err(errno)));

                remains -= r;

                for (i = 0 ; i < r ; i++)
                        rd_kafka_message_destroy(rkmessage[i]);

                batch_cnt++;
        }


        TEST_SAY("Stopping consumer\n");
        for (i = 0 ; i < topic_cnt ; i++) {
                for (p = 0 ; p < partition_cnt ; p++) {
                        err = rd_kafka_consume_stop(rkts[i], p);
                        if (err)
                                TEST_FAIL("Failed to stop consuming: %s\n",
                                          rd_kafka_err2str(err));
                }

                rd_kafka_topic_destroy(rkts[i]);
        }

        rd_kafka_queue_destroy(rkq);

        rd_kafka_destroy(rk);

        return 0;
}
コード例 #7
0
static int nonexist_part (void) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_t *rk;
	rd_kafka_topic_partition_list_t *parts;
	rd_kafka_resp_err_t err;
        test_timing_t t_pos;
        const int msgcnt = 1000;
        uint64_t testid;
        int i;
	int it, iterations = 5;

        /* Produce messages */
        testid = test_produce_msgs_easy(topic, 0,
                                        RD_KAFKA_PARTITION_UA, msgcnt);

	for (it = 0 ; it < iterations ; it++) {
		char group_id[32];

		test_str_id_generate(group_id, sizeof(group_id));

		TEST_SAY("Iteration %d/%d, using group.id %s\n", it, iterations,
			 group_id);

		/* Consume messages */
		test_consume_msgs_easy(group_id, topic, testid, -1,
				       msgcnt, NULL);

		/*
		 * Now start a new consumer and query stored offsets (positions)
		 */

		rk = test_create_consumer(group_id, NULL, NULL, NULL, NULL);

		/* Fill in partition set */
		parts = rd_kafka_topic_partition_list_new(2);
		/* existing */
		rd_kafka_topic_partition_list_add(parts, topic, 0);
		/* non-existing */
		rd_kafka_topic_partition_list_add(parts, topic, 123);


		TIMING_START(&t_pos, "COMMITTED");
		err = rd_kafka_committed(rk, parts, tmout_multip(5000));
		TIMING_STOP(&t_pos);
		if (err)
			TEST_FAIL("Failed to acquire committed offsets: %s\n",
				  rd_kafka_err2str(err));

		for (i = 0 ; i < parts->cnt ; i++) {
			TEST_SAY("%s [%"PRId32"] returned offset %"PRId64
				 ": %s\n",
				 parts->elems[i].topic,
				 parts->elems[i].partition,
				 parts->elems[i].offset,
				 rd_kafka_err2str(parts->elems[i].err));
			if (parts->elems[i].partition == 0 &&
			    parts->elems[i].offset <= 0)
				TEST_FAIL("Partition %"PRId32" should have a "
					  "proper offset, not %"PRId64"\n",
					  parts->elems[i].partition,
					  parts->elems[i].offset);
			else if (parts->elems[i].partition == 123 &&
				 parts->elems[i].offset !=
				 RD_KAFKA_OFFSET_INVALID)
				TEST_FAIL("Partition %"PRId32
					  " should have failed\n",
					  parts->elems[i].partition);
		}

		rd_kafka_topic_partition_list_destroy(parts);

		test_consumer_close(rk);

		/* Hangs if bug isn't fixed */
		rd_kafka_destroy(rk);
	}

        return 0;
}
コード例 #8
0
int main_0031_get_offsets (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
        const int msgcnt = 100;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	int64_t qry_low = -1234, qry_high = -1235;
	int64_t get_low = -1234, get_high = -1235;
	rd_kafka_resp_err_t err;
	test_timing_t t_qry, t_get;
	uint64_t testid;

        /* Produce messages */
        testid = test_produce_msgs_easy(topic, 0, 0, msgcnt);

	/* Get offsets */
	rk = test_create_consumer(NULL, NULL, NULL, NULL
);

	TIMING_START(&t_qry, "query_watermark_offsets");
	err = rd_kafka_query_watermark_offsets(rk, topic, 0,
					       &qry_low, &qry_high, 10*1000);
	TIMING_STOP(&t_qry);
	if (err)
		TEST_FAIL("query_watermark_offsets failed: %s\n",
			  rd_kafka_err2str(err));

	if (qry_low != 0 && qry_high != msgcnt)
		TEST_FAIL("Expected low,high %d,%d, but got "
			  "%"PRId64",%"PRId64,
			  0, msgcnt, qry_low, qry_high);

	TEST_SAY("query_watermark_offsets: "
		 "offsets %"PRId64", %"PRId64"\n", qry_low, qry_high);

	/* Now start consuming to update the offset cache, then query it
	 * with the get_ API. */
	rkt = test_create_topic_object(rk, topic, NULL);

	test_consumer_start("get", rkt, 0, RD_KAFKA_OFFSET_BEGINNING);
	test_consume_msgs("get", rkt, testid, 0, TEST_NO_SEEK,
			  0, msgcnt, 0);
	/* After at least one message has been consumed the
	 * watermarks are cached. */

	TIMING_START(&t_get, "get_watermark_offsets");
	err = rd_kafka_get_watermark_offsets(rk, topic, 0,
					     &get_low, &get_high);
	TIMING_STOP(&t_get);
	if (err)
		TEST_FAIL("get_watermark_offsets failed: %s\n",
			  rd_kafka_err2str(err));

	TEST_SAY("get_watermark_offsets: "
		 "offsets %"PRId64", %"PRId64"\n", get_low, get_high);

	if (get_high != qry_high)
		TEST_FAIL("query/get discrepancies: "
			  "low: %"PRId64"/%"PRId64", high: %"PRId64"/%"PRId64,
			  qry_low, get_low, qry_high, get_high);
	if (get_low >= get_high)
		TEST_FAIL("get_watermark_offsets: "
			  "low %"PRId64" >= high %"PRId64,
			  get_low, get_high);

	/* FIXME: We currently dont bother checking the get_low offset
	 *        since it requires stats to be enabled. */

	test_consumer_stop("get", rkt, 0);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

        return 0;
}
コード例 #9
0
static void do_test_consumer_lag (void) {
  const int msgcnt = 10;
  std::string errstr;
  RdKafka::ErrorCode err;

  topic = Test::mk_topic_name("0061-consumer_lag", 1);

  test_produce_msgs_easy(topic.c_str(), 0, 0, msgcnt);

  /*
   * Create consumer
   */

  /* Create consumer */
  RdKafka::Conf *conf;
  Test::conf_init(&conf, NULL, 10);
  StatsCb stats;
  if (conf->set("event_cb", &stats, errstr) != RdKafka::Conf::CONF_OK)
    Test::Fail("set event_cb failed: " + errstr);
  Test::conf_set(conf, "group.id", topic);
  Test::conf_set(conf, "enable.auto.commit", "false");
  Test::conf_set(conf, "enable.partition.eof", "false");
  Test::conf_set(conf, "auto.offset.reset", "earliest");
  Test::conf_set(conf, "statistics.interval.ms", "100");

  RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
  if (!c)
    Test::Fail("Failed to create KafkaConsumer: " + errstr);
  delete conf;

  /* Assign partitions */
  /* Subscribe */
  std::vector<RdKafka::TopicPartition*> parts;
  parts.push_back(RdKafka::TopicPartition::create(topic, 0));
  if ((err = c->assign(parts)))
    Test::Fail("assign failed: " + RdKafka::err2str(err));
  RdKafka::TopicPartition::destroy(parts);

  /* Start consuming */
  Test::Say("Consuming topic " + topic + "\n");
  int cnt = 0;
  while (cnt < msgcnt) {
    RdKafka::Message *msg = c->consume(tmout_multip(1000));
    switch (msg->err())
      {
      case RdKafka::ERR__TIMED_OUT:
        break;
      case RdKafka::ERR__PARTITION_EOF:
        Test::Fail(tostr() << "Consume error after " << cnt << "/" << msgcnt << " messages: " << msg->errstr());
        break;

      case RdKafka::ERR_NO_ERROR:
        /* Proper message. Update calculated lag for later
         * checking in stats callback */
        stats.calc_lag = msgcnt - (msg->offset()+1);
        cnt++;
        Test::Say(2, tostr() << "Received message #" << cnt << "/" << msgcnt <<
                  " at offset " << msg->offset() << " (calc lag " << stats.calc_lag << ")\n");
        /* Slow down message "processing" to make sure we get
         * at least one stats callback per message. */
        if (cnt < msgcnt)
          rd_sleep(1);
        break;

      default:
        Test::Fail("Consume error: " + msg->errstr());
        break;
      }

    delete msg;
  }
  Test::Say(tostr() << "Done, lag was valid " <<
            stats.lag_valid << " times\n");
  if (stats.lag_valid == 0)
    Test::Fail("No valid consumer_lag in statistics seen");

  c->close();
  delete c;
}