static void legacy_consume_many (char **topics, int topic_cnt, uint64_t testid){
	rd_kafka_t *rk;
        test_timing_t t_rkt_create;
        int i;
	rd_kafka_topic_t **rkts;
	int msg_base = 0;

	TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);

	test_conf_init(NULL, NULL, 60);

	rk = test_create_consumer(NULL, NULL, NULL, NULL);

	TEST_SAY("Creating %d topic objects\n", topic_cnt);
		 
	rkts = malloc(sizeof(*rkts) * topic_cnt);
	TIMING_START(&t_rkt_create, "Topic object create");
	for (i = 0 ; i < topic_cnt ; i++)
		rkts[i] = test_create_topic_object(rk, topics[i], NULL);
	TIMING_STOP(&t_rkt_create);

	TEST_SAY("Start consumer for %d topics\n", topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++)
		test_consumer_start("legacy", rkts[i], 0,
				    RD_KAFKA_OFFSET_BEGINNING);
	
	TEST_SAY("Consuming from %d messages from each %d topics\n",
		 msgs_per_topic, topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++) {
		test_consume_msgs("legacy", rkts[i], testid, 0, TEST_NO_SEEK,
				  msg_base, msgs_per_topic, 1);
		msg_base += msgs_per_topic;
	}

	TEST_SAY("Stopping consumers\n");
	for (i = 0 ; i < topic_cnt ; i++)
		test_consumer_stop("legacy", rkts[i], 0);


	TEST_SAY("Destroying %d topic objects\n", topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++)
		rd_kafka_topic_destroy(rkts[i]);

	free(rkts);

	rd_kafka_destroy(rk);
}
int main_0041_fetch_max_bytes (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	const int partition = 0;
	const int msgcnt = 2*1000;
	const int MAX_BYTES = 100000;
	uint64_t testid;
	rd_kafka_conf_t *conf;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;

	test_conf_init(NULL, NULL, 60);
	
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt/2, NULL, MAX_BYTES/10);
	test_produce_msgs(rk, rkt, testid, partition, msgcnt/2, msgcnt/2, NULL, MAX_BYTES*5);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	TEST_SAY("Creating consumer\n");
	test_conf_init(&conf, NULL, 0);

	test_conf_set(conf, "fetch.message.max.bytes", tsprintf("%d", MAX_BYTES));
	
	rk = test_create_consumer(NULL, NULL, conf, NULL);
	rkt = rd_kafka_topic_new(rk, topic, NULL);

	test_consumer_start("CONSUME", rkt, partition,
			    RD_KAFKA_OFFSET_BEGINNING);
	test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK,
			  0, msgcnt, 1);
	test_consumer_stop("CONSUME", rkt, partition);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	return 0;
}
Beispiel #3
0
int main_0036_partial_fetch (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	const int partition = 0;
	const int msgcnt = 100;
	const int msgsize = 1000;
	uint64_t testid;
	rd_kafka_conf_t *conf;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;

	TEST_SAY("Producing %d messages of size %d to %s [%d]\n",
		 msgcnt, (int)msgsize, topic, partition);
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, msgsize);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	TEST_SAY("Creating consumer\n");
	test_conf_init(&conf, NULL, 0);
	/* This should fetch 1.5 messages per fetch, thus resulting in
	 * partial fetches, hopefully. */
	test_conf_set(conf, "fetch.message.max.bytes", "1500");
	
	rk = test_create_consumer(NULL, NULL, conf, NULL, NULL);
	rkt = rd_kafka_topic_new(rk, topic, NULL);

	test_consumer_start("CONSUME", rkt, partition,
			    RD_KAFKA_OFFSET_BEGINNING);
	test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK,
			  0, msgcnt, 1);
	test_consumer_stop("CONSUME", rkt, partition);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	return 0;
}
int main_0038_performance (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	const int partition = 0;
	const int msgsize = 100;
	uint64_t testid;
	rd_kafka_conf_t *conf;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	test_timing_t t_create, t_produce, t_consume;
	int totsize = 1024*1024*128;
	int msgcnt;

	if (!strcmp(test_mode, "valgrind") || !strcmp(test_mode, "helgrind") ||
	    !strcmp(test_mode, "drd"))
		totsize = 1024*1024*8; /* 8 meg, valgrind is slow. */

	msgcnt = totsize / msgsize;

	TEST_SAY("Producing %d messages of size %d to %s [%d]\n",
		 msgcnt, (int)msgsize, topic, partition);
	testid = test_id_generate();
	test_conf_init(&conf, NULL, 120);
	rd_kafka_conf_set_dr_cb(conf, test_dr_cb);
	test_conf_set(conf, "queue.buffering.max.messages", "10000000");
	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
	rkt = test_create_producer_topic(rk, topic, "acks", "1", NULL);

	/* First produce one message to create the topic, etc, this might take 
	 * a while and we dont want this to affect the throughput timing. */
	TIMING_START(&t_create, "CREATE TOPIC");
	test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, msgsize);
	TIMING_STOP(&t_create);

	TIMING_START(&t_produce, "PRODUCE");
	test_produce_msgs(rk, rkt, testid, partition, 1, msgcnt-1, NULL, msgsize);
	TIMING_STOP(&t_produce);

	TEST_SAY("Destroying producer\n");
	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	TEST_SAY("Creating consumer\n");
	test_conf_init(&conf, NULL, 120);
	rk = test_create_consumer(NULL, NULL, conf, NULL, NULL);
	rkt = rd_kafka_topic_new(rk, topic, NULL);

	test_consumer_start("CONSUME", rkt, partition,
			    RD_KAFKA_OFFSET_BEGINNING);
	TIMING_START(&t_consume, "CONSUME");
	test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK,
			  0, msgcnt, 1);
	TIMING_STOP(&t_consume);
	test_consumer_stop("CONSUME", rkt, partition);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	TEST_REPORT("{ \"producer\": "
		    " { \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f },"
		    " \"consumer\": "
		    "{ \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f } "
		    "}",
		    (double)
		    (totsize/((double)TIMING_DURATION(&t_produce)/1000000.0f)) /
		    1000000.0f,
		    (float)
		    (msgcnt/((double)TIMING_DURATION(&t_produce)/1000000.0f)),
		    (double)
		    (totsize/((double)TIMING_DURATION(&t_consume)/1000000.0f)) /
		    1000000.0f,
		    (float)
		    (msgcnt/((double)TIMING_DURATION(&t_consume)/1000000.0f)));
	return 0;
}
int main_0015_offsets_seek (int argc, char **argv) {
    const char *topic = test_mk_topic_name("0015", 1);
    rd_kafka_t *rk_p, *rk_c;
    rd_kafka_topic_t *rkt_p, *rkt_c;
    int msg_cnt = 1000;
    int msg_base = 0;
    int32_t partition = 0;
    int i;
    int64_t offset_last, offset_base;
    uint64_t testid;
    int dance_iterations = 10;
    int msgs_per_dance = 10;

    testid = test_id_generate();

    /* Produce messages */
    rk_p = test_create_producer();
    rkt_p = test_create_producer_topic(rk_p, topic, NULL);

    test_produce_msgs(rk_p, rkt_p, testid, partition, msg_base, msg_cnt,
                      NULL, 0);

    rd_kafka_topic_destroy(rkt_p);
    rd_kafka_destroy(rk_p);


    rk_c = test_create_consumer(NULL, NULL, NULL, NULL);
    rkt_c = test_create_consumer_topic(rk_c, topic);

    /* Start consumer tests */
    test_consumer_start("verify.all", rkt_c, partition,
                        RD_KAFKA_OFFSET_BEGINNING);
    /* Make sure all messages are available */
    offset_last = test_consume_msgs("verify.all", rkt_c,
                                    testid, partition, TEST_NO_SEEK,
                                    msg_base, msg_cnt, 1/* parse format*/);

    /* Rewind offset back to its base. */
    offset_base = offset_last - msg_cnt + 1;

    TEST_SAY("%s [%"PRId32"]: Do random seek&consume for msgs #%d+%d with "
             "offsets %"PRId64"..%"PRId64"\n",
             rd_kafka_topic_name(rkt_c), partition,
             msg_base, msg_cnt, offset_base, offset_last);

    /* Now go dancing over the entire range with offset seeks. */
    for (i = 0 ; i < dance_iterations ; i++) {
        int64_t offset = jitter((int)offset_base,
                                (int)offset_base+msg_cnt);

        test_consume_msgs("dance", rkt_c,
                          testid, partition, offset,
                          msg_base + (int)(offset - offset_base),
                          RD_MIN(msgs_per_dance,
                                 (int)(offset_last - offset)),
                          1 /* parse format */);
    }

    test_consumer_stop("1", rkt_c, partition);

    rd_kafka_topic_destroy(rkt_c);
    rd_kafka_destroy(rk_c);

    return 0;
}
Beispiel #6
0
int main_0017_compression(int argc, char **argv) {
    rd_kafka_t *rk_p, *rk_c;
    const int msg_cnt = 1000;
    int msg_base = 0;
    uint64_t testid;
#define CODEC_CNT 4
    const char *codecs[CODEC_CNT+1] = {
        "none",
#if WITH_ZLIB
        "gzip",
#endif
#if WITH_SNAPPY
        "snappy",
#endif
#if WITH_LZ4
        "lz4",
#endif
        NULL
    };
    const char *topics[CODEC_CNT];
    const int32_t partition = 0;
    int i;

    testid = test_id_generate();

    /* Produce messages */
    rk_p = test_create_producer();
    for (i = 0; codecs[i] != NULL ; i++) {
        rd_kafka_topic_t *rkt_p;

        topics[i] = test_mk_topic_name(codecs[i], 1);
        TEST_SAY("Produce %d messages with %s compression to "
                 "topic %s\n",
                 msg_cnt, codecs[i], topics[i]);
        rkt_p = test_create_producer_topic(rk_p, topics[i],
                                           "compression.codec", codecs[i], NULL);

        /* Produce small message that will not decrease with
         * compression (issue #781) */
        test_produce_msgs(rk_p, rkt_p, testid, partition,
                          msg_base + (partition*msg_cnt), 1,
                          NULL, 5);

        /* Produce standard sized messages */
        test_produce_msgs(rk_p, rkt_p, testid, partition,
                          msg_base + (partition*msg_cnt) + 1, msg_cnt-1,
                          NULL, 512);
        rd_kafka_topic_destroy(rkt_p);
    }

    rd_kafka_destroy(rk_p);


    /* Consume messages */
    rk_c = test_create_consumer(NULL, NULL, NULL, NULL);

    for (i = 0; codecs[i] != NULL ; i++) {
        rd_kafka_topic_t *rkt_c = rd_kafka_topic_new(rk_c,
                                  topics[i], NULL);
        TEST_SAY("Consume %d messages from topic %s\n",
                 msg_cnt, topics[i]);
        /* Start consuming */
        test_consumer_start(codecs[i], rkt_c, partition,
                            RD_KAFKA_OFFSET_BEGINNING);

        /* Consume messages */
        test_consume_msgs(codecs[i], rkt_c, testid, partition,
                          /* Use offset 0 here, which is wrong, should
                           * be TEST_NO_SEEK, but it exposed a bug
                           * where the Offset query was postponed
                           * till after the seek, causing messages
                           * to be replayed. */
                          0,
                          msg_base, msg_cnt, 1 /* parse format */);

        test_consumer_stop(codecs[i], rkt_c, partition);
        rd_kafka_topic_destroy(rkt_c);
    }

    rd_kafka_destroy(rk_c);

    return 0;
}
int main_0031_get_offsets (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
        const int msgcnt = 100;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	int64_t qry_low = -1234, qry_high = -1235;
	int64_t get_low = -1234, get_high = -1235;
	rd_kafka_resp_err_t err;
	test_timing_t t_qry, t_get;
	uint64_t testid;

        /* Produce messages */
        testid = test_produce_msgs_easy(topic, 0, 0, msgcnt);

	/* Get offsets */
	rk = test_create_consumer(NULL, NULL, NULL, NULL
);

	TIMING_START(&t_qry, "query_watermark_offsets");
	err = rd_kafka_query_watermark_offsets(rk, topic, 0,
					       &qry_low, &qry_high, 10*1000);
	TIMING_STOP(&t_qry);
	if (err)
		TEST_FAIL("query_watermark_offsets failed: %s\n",
			  rd_kafka_err2str(err));

	if (qry_low != 0 && qry_high != msgcnt)
		TEST_FAIL("Expected low,high %d,%d, but got "
			  "%"PRId64",%"PRId64,
			  0, msgcnt, qry_low, qry_high);

	TEST_SAY("query_watermark_offsets: "
		 "offsets %"PRId64", %"PRId64"\n", qry_low, qry_high);

	/* Now start consuming to update the offset cache, then query it
	 * with the get_ API. */
	rkt = test_create_topic_object(rk, topic, NULL);

	test_consumer_start("get", rkt, 0, RD_KAFKA_OFFSET_BEGINNING);
	test_consume_msgs("get", rkt, testid, 0, TEST_NO_SEEK,
			  0, msgcnt, 0);
	/* After at least one message has been consumed the
	 * watermarks are cached. */

	TIMING_START(&t_get, "get_watermark_offsets");
	err = rd_kafka_get_watermark_offsets(rk, topic, 0,
					     &get_low, &get_high);
	TIMING_STOP(&t_get);
	if (err)
		TEST_FAIL("get_watermark_offsets failed: %s\n",
			  rd_kafka_err2str(err));

	TEST_SAY("get_watermark_offsets: "
		 "offsets %"PRId64", %"PRId64"\n", get_low, get_high);

	if (get_high != qry_high)
		TEST_FAIL("query/get discrepancies: "
			  "low: %"PRId64"/%"PRId64", high: %"PRId64"/%"PRId64,
			  qry_low, get_low, qry_high, get_high);
	if (get_low >= get_high)
		TEST_FAIL("get_watermark_offsets: "
			  "low %"PRId64" >= high %"PRId64,
			  get_low, get_high);

	/* FIXME: We currently dont bother checking the get_low offset
	 *        since it requires stats to be enabled. */

	test_consumer_stop("get", rkt, 0);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

        return 0;
}