Example #1
0
int main_0021_rkt_destroy (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 0);
	rd_kafka_t *rk;
        rd_kafka_topic_t *rkt;
        const int msgcnt = 1000;
        uint64_t testid;
        int remains = 0;

        test_conf_init(NULL, NULL, 10);


        testid = test_id_generate();
        rk = test_create_producer();
        rkt = test_create_producer_topic(rk, topic, NULL);


        test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA,
                                 0, msgcnt, NULL, 0, &remains);

        TEST_ASSERT(msgcnt == remains, "Only %d/%d messages produced",
                    remains, msgcnt);

        rd_kafka_topic_destroy(rkt);

        test_wait_delivery(rk, &remains);

        rd_kafka_destroy(rk);

        return 0;
}
Example #2
0
/**
 * @brief Test handling of implicit acks.
 *
 * @param batch_cnt Total number of batches, ProduceRequests, sent.
 * @param initial_fail_batch_cnt How many of the initial batches should
 *                               fail with an emulated network timeout.
 */
static void do_test_implicit_ack (const char *what,
                                  int batch_cnt, int initial_fail_batch_cnt) {
        rd_kafka_t *rk;
        const char *topic = test_mk_topic_name("0090_idempotence_impl_ack", 1);
        const int32_t partition = 0;
        uint64_t testid;
        int msgcnt = 10*batch_cnt;
        rd_kafka_conf_t *conf;
        rd_kafka_topic_t *rkt;
        test_msgver_t mv;

        TEST_SAY(_C_MAG "[ Test implicit ack: %s ]\n", what);

        rd_atomic32_init(&state.produce_cnt, 0);
        state.batch_cnt = batch_cnt;
        state.initial_fail_batch_cnt = initial_fail_batch_cnt;

        testid = test_id_generate();

        test_conf_init(&conf, NULL, 60);
        rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
        test_conf_set(conf, "enable.idempotence", "true");
        test_conf_set(conf, "batch.num.messages", "10");
        test_conf_set(conf, "linger.ms", "500");
        test_conf_set(conf, "retry.backoff.ms", "2000");

        /* The ProduceResponse handler will inject timed-out-in-flight
         * errors for the first N ProduceRequests, which will trigger retries
         * that in turn will result in OutOfSequence errors. */
        test_conf_set(conf, "ut_handle_ProduceResponse",
                      (char *)handle_ProduceResponse);

        test_create_topic(topic, 1, 1);

        rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
        rkt = test_create_producer_topic(rk, topic, NULL);


        TEST_SAY("Producing %d messages\n", msgcnt);
        test_produce_msgs(rk, rkt, testid, -1, 0, msgcnt, NULL, 0);

        TEST_SAY("Flushing..\n");
        rd_kafka_flush(rk, 10000);

        rd_kafka_topic_destroy(rkt);
        rd_kafka_destroy(rk);

        TEST_SAY("Verifying messages with consumer\n");
        test_msgver_init(&mv, testid);
        test_consume_msgs_easy_mv(NULL, topic, partition,
                                  testid, 1, msgcnt, NULL, &mv);
        test_msgver_verify("verify", &mv, TEST_MSGVER_ALL, 0, msgcnt);
        test_msgver_clear(&mv);

        TEST_SAY(_C_GRN "[ Test implicit ack: %s : PASS ]\n", what);
}
Example #3
0
/**
 * Create producer, produce \p msgcnt messages to \p topic \p partition,
 * destroy consumer, and returns the used testid.
 */
uint64_t
test_produce_msgs_easy (const char *topic, uint64_t testid,
                        int32_t partition, int msgcnt) {
        rd_kafka_t *rk;
        rd_kafka_topic_t *rkt;
        test_timing_t t_produce;

        if (!testid)
                testid = test_id_generate();
        rk = test_create_producer();
        rkt = test_create_producer_topic(rk, topic, NULL);

        TIMING_START(&t_produce, "PRODUCE");
        test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, 0);
        TIMING_STOP(&t_produce);
        rd_kafka_topic_destroy(rkt);
        rd_kafka_destroy(rk);

        return testid;
}
int main_0041_fetch_max_bytes (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	const int partition = 0;
	const int msgcnt = 2*1000;
	const int MAX_BYTES = 100000;
	uint64_t testid;
	rd_kafka_conf_t *conf;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;

	test_conf_init(NULL, NULL, 60);
	
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt/2, NULL, MAX_BYTES/10);
	test_produce_msgs(rk, rkt, testid, partition, msgcnt/2, msgcnt/2, NULL, MAX_BYTES*5);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	TEST_SAY("Creating consumer\n");
	test_conf_init(&conf, NULL, 0);

	test_conf_set(conf, "fetch.message.max.bytes", tsprintf("%d", MAX_BYTES));
	
	rk = test_create_consumer(NULL, NULL, conf, NULL);
	rkt = rd_kafka_topic_new(rk, topic, NULL);

	test_consumer_start("CONSUME", rkt, partition,
			    RD_KAFKA_OFFSET_BEGINNING);
	test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK,
			  0, msgcnt, 1);
	test_consumer_stop("CONSUME", rkt, partition);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	return 0;
}
Example #5
0
int main_0036_partial_fetch (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	const int partition = 0;
	const int msgcnt = 100;
	const int msgsize = 1000;
	uint64_t testid;
	rd_kafka_conf_t *conf;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;

	TEST_SAY("Producing %d messages of size %d to %s [%d]\n",
		 msgcnt, (int)msgsize, topic, partition);
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, msgsize);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	TEST_SAY("Creating consumer\n");
	test_conf_init(&conf, NULL, 0);
	/* This should fetch 1.5 messages per fetch, thus resulting in
	 * partial fetches, hopefully. */
	test_conf_set(conf, "fetch.message.max.bytes", "1500");
	
	rk = test_create_consumer(NULL, NULL, conf, NULL, NULL);
	rkt = rd_kafka_topic_new(rk, topic, NULL);

	test_consumer_start("CONSUME", rkt, partition,
			    RD_KAFKA_OFFSET_BEGINNING);
	test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK,
			  0, msgcnt, 1);
	test_consumer_stop("CONSUME", rkt, partition);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	return 0;
}
int main_0038_performance (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	const int partition = 0;
	const int msgsize = 100;
	uint64_t testid;
	rd_kafka_conf_t *conf;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	test_timing_t t_create, t_produce, t_consume;
	int totsize = 1024*1024*128;
	int msgcnt;

	if (!strcmp(test_mode, "valgrind") || !strcmp(test_mode, "helgrind") ||
	    !strcmp(test_mode, "drd"))
		totsize = 1024*1024*8; /* 8 meg, valgrind is slow. */

	msgcnt = totsize / msgsize;

	TEST_SAY("Producing %d messages of size %d to %s [%d]\n",
		 msgcnt, (int)msgsize, topic, partition);
	testid = test_id_generate();
	test_conf_init(&conf, NULL, 120);
	rd_kafka_conf_set_dr_cb(conf, test_dr_cb);
	test_conf_set(conf, "queue.buffering.max.messages", "10000000");
	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
	rkt = test_create_producer_topic(rk, topic, "acks", "1", NULL);

	/* First produce one message to create the topic, etc, this might take 
	 * a while and we dont want this to affect the throughput timing. */
	TIMING_START(&t_create, "CREATE TOPIC");
	test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, msgsize);
	TIMING_STOP(&t_create);

	TIMING_START(&t_produce, "PRODUCE");
	test_produce_msgs(rk, rkt, testid, partition, 1, msgcnt-1, NULL, msgsize);
	TIMING_STOP(&t_produce);

	TEST_SAY("Destroying producer\n");
	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	TEST_SAY("Creating consumer\n");
	test_conf_init(&conf, NULL, 120);
	rk = test_create_consumer(NULL, NULL, conf, NULL, NULL);
	rkt = rd_kafka_topic_new(rk, topic, NULL);

	test_consumer_start("CONSUME", rkt, partition,
			    RD_KAFKA_OFFSET_BEGINNING);
	TIMING_START(&t_consume, "CONSUME");
	test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK,
			  0, msgcnt, 1);
	TIMING_STOP(&t_consume);
	test_consumer_stop("CONSUME", rkt, partition);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	TEST_REPORT("{ \"producer\": "
		    " { \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f },"
		    " \"consumer\": "
		    "{ \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f } "
		    "}",
		    (double)
		    (totsize/((double)TIMING_DURATION(&t_produce)/1000000.0f)) /
		    1000000.0f,
		    (float)
		    (msgcnt/((double)TIMING_DURATION(&t_produce)/1000000.0f)),
		    (double)
		    (totsize/((double)TIMING_DURATION(&t_consume)/1000000.0f)) /
		    1000000.0f,
		    (float)
		    (msgcnt/((double)TIMING_DURATION(&t_consume)/1000000.0f)));
	return 0;
}
Example #7
0
int main_0015_offsets_seek (int argc, char **argv) {
    const char *topic = test_mk_topic_name("0015", 1);
    rd_kafka_t *rk_p, *rk_c;
    rd_kafka_topic_t *rkt_p, *rkt_c;
    int msg_cnt = 1000;
    int msg_base = 0;
    int32_t partition = 0;
    int i;
    int64_t offset_last, offset_base;
    uint64_t testid;
    int dance_iterations = 10;
    int msgs_per_dance = 10;

    testid = test_id_generate();

    /* Produce messages */
    rk_p = test_create_producer();
    rkt_p = test_create_producer_topic(rk_p, topic, NULL);

    test_produce_msgs(rk_p, rkt_p, testid, partition, msg_base, msg_cnt,
                      NULL, 0);

    rd_kafka_topic_destroy(rkt_p);
    rd_kafka_destroy(rk_p);


    rk_c = test_create_consumer(NULL, NULL, NULL, NULL);
    rkt_c = test_create_consumer_topic(rk_c, topic);

    /* Start consumer tests */
    test_consumer_start("verify.all", rkt_c, partition,
                        RD_KAFKA_OFFSET_BEGINNING);
    /* Make sure all messages are available */
    offset_last = test_consume_msgs("verify.all", rkt_c,
                                    testid, partition, TEST_NO_SEEK,
                                    msg_base, msg_cnt, 1/* parse format*/);

    /* Rewind offset back to its base. */
    offset_base = offset_last - msg_cnt + 1;

    TEST_SAY("%s [%"PRId32"]: Do random seek&consume for msgs #%d+%d with "
             "offsets %"PRId64"..%"PRId64"\n",
             rd_kafka_topic_name(rkt_c), partition,
             msg_base, msg_cnt, offset_base, offset_last);

    /* Now go dancing over the entire range with offset seeks. */
    for (i = 0 ; i < dance_iterations ; i++) {
        int64_t offset = jitter((int)offset_base,
                                (int)offset_base+msg_cnt);

        test_consume_msgs("dance", rkt_c,
                          testid, partition, offset,
                          msg_base + (int)(offset - offset_base),
                          RD_MIN(msgs_per_dance,
                                 (int)(offset_last - offset)),
                          1 /* parse format */);
    }

    test_consumer_stop("1", rkt_c, partition);

    rd_kafka_topic_destroy(rkt_c);
    rd_kafka_destroy(rk_c);

    return 0;
}
Example #8
0
int main_0029_assign_offset (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_topic_partition_list_t *parts;
        uint64_t testid;
	int i;
	test_timing_t t_simple, t_hl;
	test_msgver_t mv;

	test_conf_init(NULL, &tconf, 20 + (test_session_timeout_ms * 3 / 1000));
	test_topic_conf_set(tconf, "auto.offset.reset", "smallest");

	/* Produce X messages to Y partitions so we get a 
	 * nice seekable 0..X offset one each partition. */
        /* Produce messages */
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	parts = rd_kafka_topic_partition_list_new(partitions);

	for (i = 0 ; i < partitions ; i++) {
		test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0);
		rd_kafka_topic_partition_list_add(parts, topic, i)->offset =
			msgcnt / 2;
	}

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);


	/* Simple consumer */
	TIMING_START(&t_simple, "SIMPLE.CONSUMER");
	rk = test_create_consumer(topic, NULL, NULL, NULL);
	test_msgver_init(&mv, testid);
	test_consumer_assign("SIMPLE.ASSIGN", rk, parts);
	test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0,
			   partitions * (msgcnt / 2), &mv);
	for (i = 0 ; i < partitions ; i++)
		test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
					topic, i, msgcnt/2, msgcnt/2);
	test_msgver_clear(&mv);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_simple);

	rd_kafka_topic_partition_list_destroy(parts);
	

	/* High-level consumer.
	 * Offsets are set in rebalance callback. */
	TIMING_START(&t_hl, "HL.CONSUMER");
	test_msgver_init(&mv, testid);
	rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
	test_consumer_subscribe(rk, topic);
	test_consumer_poll("HL.CONSUME", rk, testid, -1, 0,
			   partitions * (msgcnt / 2), &mv);
	for (i = 0 ; i < partitions ; i++)
		test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
					topic, i, msgcnt/2, msgcnt/2);
	test_msgver_clear(&mv);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_hl);

	rd_kafka_topic_conf_destroy(tconf);

        return 0;
}
Example #9
0
/**
 * @brief Test that Metadata requests are retried properly when
 *        timing out due to high broker rtt.
 */
static void do_test_low_socket_timeout (const char *topic) {
        rd_kafka_t *rk;
        rd_kafka_conf_t *conf;
        rd_kafka_topic_t *rkt;
        rd_kafka_resp_err_t err;
        const struct rd_kafka_metadata *md;
        int res;

        mtx_init(&ctrl.lock, mtx_plain);
        cnd_init(&ctrl.cnd);

        TEST_SAY("Test Metadata request retries on timeout\n");

        test_conf_init(&conf, NULL, 60);
        test_conf_set(conf, "socket.timeout.ms", "1000");
        test_conf_set(conf, "socket.max.fails", "12345");
        test_conf_set(conf, "retry.backoff.ms", "5000");
        /* Avoid api version requests (with their own timeout) to get in
         * the way of our test */
        test_conf_set(conf, "api.version.request", "false");
        test_socket_enable(conf);
        test_curr->connect_cb = connect_cb;
        test_curr->is_fatal_cb = is_fatal_cb;

        rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
        rkt = test_create_producer_topic(rk, topic, NULL);

        TEST_SAY("Waiting for sockem connect..\n");
        mtx_lock(&ctrl.lock);
        while (!ctrl.skm)
                cnd_wait(&ctrl.cnd, &ctrl.lock);
        mtx_unlock(&ctrl.lock);

        TEST_SAY("Connected, fire off a undelayed metadata() to "
                 "make sure connection is up\n");

        err = rd_kafka_metadata(rk, 0, rkt, &md, tmout_multip(2000));
        TEST_ASSERT(!err, "metadata(undelayed) failed: %s",
                    rd_kafka_err2str(err));
        rd_kafka_metadata_destroy(md);

        if (thrd_create(&ctrl.thrd, ctrl_thrd_main, NULL) != thrd_success)
                TEST_FAIL("Failed to create sockem ctrl thread");

        set_delay(0, 3000); /* Takes effect immediately */

        /* After two retries, remove the delay, the third retry
         * should kick in and work. */
        set_delay(((1000 /*socket.timeout.ms*/ +
                    5000 /*retry.backoff.ms*/) * 2) - 2000, 0);

        TEST_SAY("Calling metadata() again which should succeed after "
                 "3 internal retries\n");
        /* Metadata should be returned after the third retry */
        err = rd_kafka_metadata(rk, 0, rkt, &md,
                                ((1000 /*socket.timeout.ms*/ +
                                  5000 /*retry.backoff.ms*/) * 2) + 5000);
        TEST_SAY("metadata() returned %s\n", rd_kafka_err2str(err));
        TEST_ASSERT(!err, "metadata(undelayed) failed: %s",
                    rd_kafka_err2str(err));
        rd_kafka_metadata_destroy(md);

        rd_kafka_topic_destroy(rkt);
        rd_kafka_destroy(rk);

        /* Join controller thread */
        mtx_lock(&ctrl.lock);
        ctrl.term = 1;
        mtx_unlock(&ctrl.lock);
        thrd_join(ctrl.thrd, &res);

        cnd_destroy(&ctrl.cnd);
        mtx_destroy(&ctrl.lock);
}
Example #10
0
int main_0040_io_event (int argc, char **argv) {
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_t *rk_p, *rk_c;
	const char *topic;
	rd_kafka_topic_t *rkt_p;
	rd_kafka_queue_t *queue;
	uint64_t testid;
	int msgcnt = 100;
	int recvd = 0;
	int fds[2];
	int wait_multiplier = 1;
	struct pollfd pfd;
        int r;
	enum {
		_NOPE,
		_YEP,
		_REBALANCE
	} expecting_io = _REBALANCE;

	testid = test_id_generate();
	topic = test_mk_topic_name(__FUNCTION__, 1);

	rk_p = test_create_producer();
	rkt_p = test_create_producer_topic(rk_p, topic, NULL);
	test_auto_create_topic_rkt(rk_p, rkt_p);

	test_conf_init(&conf, &tconf, 0);
	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
	test_conf_set(conf, "session.timeout.ms", "6000");
	test_conf_set(conf, "enable.partition.eof", "false");
	/* Speed up propagation of new topics */
	test_conf_set(conf, "metadata.max.age.ms", "5000");
	test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
	rk_c = test_create_consumer(topic, NULL, conf, tconf);

	queue = rd_kafka_queue_get_consumer(rk_c);

	test_consumer_subscribe(rk_c, topic);

#ifndef _MSC_VER
        r = pipe(fds);
#else
        r = _pipe(fds, 2, _O_BINARY);
#endif
        if (r == -1)
		TEST_FAIL("pipe() failed: %s\n", strerror(errno));
	
	rd_kafka_queue_io_event_enable(queue, fds[1], "1", 1);

	pfd.fd = fds[0];
	pfd.events = POLLIN;
	pfd.revents = 0;

	/**
	 * 1) Wait for rebalance event
	 * 2) Wait 1 interval (1s) expecting no IO (nothing produced).
	 * 3) Produce half the messages
	 * 4) Expect IO
	 * 5) Consume the available messages
	 * 6) Wait 1 interval expecting no IO.
	 * 7) Produce remaing half
	 * 8) Expect IO
	 * 9) Done.
	 */
	while (recvd < msgcnt) {
		int r;

#ifndef _MSC_VER
		r = poll(&pfd, 1, 1000 * wait_multiplier);
#else
                r = WSAPoll(&pfd, 1, 1000 * wait_multiplier);
#endif
		if (r == -1) {
			TEST_FAIL("poll() failed: %s", strerror(errno));
			
		} else if (r == 1) {
			rd_kafka_event_t *rkev;
			char b;
			int eventcnt = 0;

			if (pfd.events & POLLERR)
				TEST_FAIL("Poll error\n");
			if (!(pfd.events & POLLIN)) {
				TEST_SAY("Stray event 0x%x\n", (int)pfd.events);
				continue;
			}

			TEST_SAY("POLLIN\n");
                        /* Read signaling token to purge socket queue and
                         * eventually silence POLLIN */
#ifndef _MSC_VER
			r = read(pfd.fd, &b, 1);
#else
			r = _read((int)pfd.fd, &b, 1);
#endif
			if (r == -1)
				TEST_FAIL("read failed: %s\n", strerror(errno));

			if (!expecting_io)
				TEST_WARN("Got unexpected IO after %d/%d msgs\n",
					  recvd, msgcnt);

			while ((rkev = rd_kafka_queue_poll(queue, 0))) {
				eventcnt++;
				switch (rd_kafka_event_type(rkev))
				{
				case RD_KAFKA_EVENT_REBALANCE:
					TEST_SAY("Got %s: %s\n", rd_kafka_event_name(rkev),
						 rd_kafka_err2str(rd_kafka_event_error(rkev)));
					if (expecting_io != _REBALANCE)
						TEST_FAIL("Got Rebalance when expecting message\n");
					if (rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
						rd_kafka_assign(rk_c, rd_kafka_event_topic_partition_list(rkev));
						expecting_io = _NOPE;
					} else
						rd_kafka_assign(rk_c, NULL);
					break;
					
				case RD_KAFKA_EVENT_FETCH:
					if (expecting_io != _YEP)
						TEST_FAIL("Did not expect more messages at %d/%d\n",
							  recvd, msgcnt);
					recvd++;
					if (recvd == (msgcnt / 2) || recvd == msgcnt)
						expecting_io = _NOPE;
					break;

				case RD_KAFKA_EVENT_ERROR:
					TEST_FAIL("Error: %s\n", rd_kafka_event_error_string(rkev));
					break;

				default:
					TEST_SAY("Ignoring event %s\n", rd_kafka_event_name(rkev));
				}
					
				rd_kafka_event_destroy(rkev);
			}
			TEST_SAY("%d events, Consumed %d/%d messages\n", eventcnt, recvd, msgcnt);

			wait_multiplier = 1;

		} else {
			if (expecting_io == _REBALANCE) {
				continue;
			} else if (expecting_io == _YEP) {
				TEST_FAIL("Did not see expected IO after %d/%d msgs\n",
					  recvd, msgcnt);
			}

			TEST_SAY("IO poll timeout (good)\n");

			TEST_SAY("Got idle period, producing\n");
			test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, msgcnt/2,
					  NULL, 10);

			expecting_io = _YEP;
			/* When running slowly (e.g., valgrind) it might take
			 * some time before the first message is received
			 * after producing. */
			wait_multiplier = 3;
		}
	}
	TEST_SAY("Done\n");

	rd_kafka_topic_destroy(rkt_p);
	rd_kafka_destroy(rk_p);

	rd_kafka_queue_destroy(queue);
	rd_kafka_consumer_close(rk_c);
	rd_kafka_destroy(rk_c);

#ifndef _MSC_VER
	close(fds[0]);
	close(fds[1]);
#else
        _close(fds[0]);
        _close(fds[1]);
#endif

	return 0;
}
Example #11
0
int main_0017_compression(int argc, char **argv) {
    rd_kafka_t *rk_p, *rk_c;
    const int msg_cnt = 1000;
    int msg_base = 0;
    uint64_t testid;
#define CODEC_CNT 4
    const char *codecs[CODEC_CNT+1] = {
        "none",
#if WITH_ZLIB
        "gzip",
#endif
#if WITH_SNAPPY
        "snappy",
#endif
#if WITH_LZ4
        "lz4",
#endif
        NULL
    };
    const char *topics[CODEC_CNT];
    const int32_t partition = 0;
    int i;

    testid = test_id_generate();

    /* Produce messages */
    rk_p = test_create_producer();
    for (i = 0; codecs[i] != NULL ; i++) {
        rd_kafka_topic_t *rkt_p;

        topics[i] = test_mk_topic_name(codecs[i], 1);
        TEST_SAY("Produce %d messages with %s compression to "
                 "topic %s\n",
                 msg_cnt, codecs[i], topics[i]);
        rkt_p = test_create_producer_topic(rk_p, topics[i],
                                           "compression.codec", codecs[i], NULL);

        /* Produce small message that will not decrease with
         * compression (issue #781) */
        test_produce_msgs(rk_p, rkt_p, testid, partition,
                          msg_base + (partition*msg_cnt), 1,
                          NULL, 5);

        /* Produce standard sized messages */
        test_produce_msgs(rk_p, rkt_p, testid, partition,
                          msg_base + (partition*msg_cnt) + 1, msg_cnt-1,
                          NULL, 512);
        rd_kafka_topic_destroy(rkt_p);
    }

    rd_kafka_destroy(rk_p);


    /* Consume messages */
    rk_c = test_create_consumer(NULL, NULL, NULL, NULL);

    for (i = 0; codecs[i] != NULL ; i++) {
        rd_kafka_topic_t *rkt_c = rd_kafka_topic_new(rk_c,
                                  topics[i], NULL);
        TEST_SAY("Consume %d messages from topic %s\n",
                 msg_cnt, topics[i]);
        /* Start consuming */
        test_consumer_start(codecs[i], rkt_c, partition,
                            RD_KAFKA_OFFSET_BEGINNING);

        /* Consume messages */
        test_consume_msgs(codecs[i], rkt_c, testid, partition,
                          /* Use offset 0 here, which is wrong, should
                           * be TEST_NO_SEEK, but it exposed a bug
                           * where the Offset query was postponed
                           * till after the seek, causing messages
                           * to be replayed. */
                          0,
                          msg_base, msg_cnt, 1 /* parse format */);

        test_consumer_stop(codecs[i], rkt_c, partition);
        rd_kafka_topic_destroy(rkt_c);
    }

    rd_kafka_destroy(rk_c);

    return 0;
}
Example #12
0
int main_0029_assign_offset (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_topic_partition_list_t *parts;
        uint64_t testid;
	int i;
	test_timing_t t_simple, t_hl;
	test_msgver_t mv;

	test_conf_init(NULL, NULL, 20 + (test_session_timeout_ms * 3 / 1000));

	/* Produce X messages to Y partitions so we get a 
	 * nice seekable 0..X offset one each partition. */
        /* Produce messages */
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	parts = rd_kafka_topic_partition_list_new(partitions);

	for (i = 0 ; i < partitions ; i++) {
		test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0);
		/* Set start offset */
		rd_kafka_topic_partition_list_add(parts, topic, i)->offset =
			msgcnt / 2;
	}

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);


	/* Simple consumer */
	TIMING_START(&t_simple, "SIMPLE.CONSUMER");
	rk = test_create_consumer(topic, NULL, NULL, NULL);
	test_msgver_init(&mv, testid);
	test_consumer_assign("SIMPLE.ASSIGN", rk, parts);
	test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0,
			   partitions * (msgcnt / 2), &mv);
	for (i = 0 ; i < partitions ; i++)
		test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
					topic, i, msgcnt/2, msgcnt/2);
	test_msgver_clear(&mv);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_simple);

	rd_kafka_topic_partition_list_destroy(parts);
	

	/* High-level consumer: method 1
	 * Offsets are set in rebalance callback. */
	if (test_broker_version >= TEST_BRKVER(0,9,0,0)) {
		reb_method = REB_METHOD_1;
		TIMING_START(&t_hl, "HL.CONSUMER");
		test_msgver_init(&mv, testid);
		rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
		test_consumer_subscribe(rk, topic);
		test_consumer_poll("HL.CONSUME", rk, testid, -1, 0,
				   partitions * (msgcnt / 2), &mv);
		for (i = 0 ; i < partitions ; i++)
			test_msgver_verify_part("HL.MSGS", &mv,
						TEST_MSGVER_ALL_PART,
						topic, i, msgcnt/2, msgcnt/2);
		test_msgver_clear(&mv);
		test_consumer_close(rk);
		rd_kafka_destroy(rk);
		TIMING_STOP(&t_hl);


		/* High-level consumer: method 2:
		 * first two partitions are with fixed absolute offset, rest are
		 * auto offset (stored, which is now at end). 
		 * Offsets are set in rebalance callback. */
		reb_method = REB_METHOD_2;
		TIMING_START(&t_hl, "HL.CONSUMER2");
		test_msgver_init(&mv, testid);
		rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
		test_consumer_subscribe(rk, topic);
		test_consumer_poll("HL.CONSUME2", rk, testid, partitions, 0,
				   2 * (msgcnt / 2), &mv);
		for (i = 0 ; i < partitions ; i++) {
			if (i < 2)
				test_msgver_verify_part("HL.MSGS2.A", &mv,
							TEST_MSGVER_ALL_PART,
							topic, i, msgcnt/2,
							msgcnt/2);
		}
		test_msgver_clear(&mv);
		test_consumer_close(rk);
		rd_kafka_destroy(rk);
		TIMING_STOP(&t_hl);
	}

        return 0;
}
int main_0056_balanced_group_mt (int argc, char **argv) {
        const char *topic = test_mk_topic_name(__FUNCTION__, 1);
        rd_kafka_t *rk_p, *rk_c;
        rd_kafka_topic_t *rkt_p;
        int msg_cnt = 1000;
        int msg_base = 0;
        int partition_cnt = 2;
        int partition;
        uint64_t testid;
        rd_kafka_topic_conf_t *default_topic_conf;
        rd_kafka_topic_partition_list_t *sub, *topics;
        rd_kafka_resp_err_t err;
        test_timing_t t_assign, t_close, t_consume;
        int i;

        exp_msg_cnt = msg_cnt * partition_cnt;

        testid = test_id_generate();

        /* Produce messages */
        rk_p = test_create_producer();
        rkt_p = test_create_producer_topic(rk_p, topic, NULL);

        for (partition = 0; partition < partition_cnt; partition++) {
                test_produce_msgs(rk_p, rkt_p, testid, partition,
                                  msg_base + (partition * msg_cnt), msg_cnt,
                                  NULL, 0);
        }

        rd_kafka_topic_destroy(rkt_p);
        rd_kafka_destroy(rk_p);

        if (mtx_init(&lock, mtx_plain) != thrd_success)
                TEST_FAIL("Cannot create mutex.");

        test_conf_init(NULL, &default_topic_conf,
                       (test_session_timeout_ms * 3) / 1000);

        test_topic_conf_set(default_topic_conf, "auto.offset.reset",
                            "smallest");

        /* Fill in topic subscription set */
        topics = rd_kafka_topic_partition_list_new(1);
        rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA);

        /* Create consumers and start subscription */
        rk_c = test_create_consumer(
                topic /*group_id*/, rebalance_cb, NULL,
                default_topic_conf);

        test_consumer_subscribe(rk_c, topic);

        rd_kafka_topic_partition_list_destroy(topics);

        /* Wait for both consumers to get an assignment */
        TIMING_START(&t_assign, "WAIT.ASSIGN");
        get_assignment(rk_c);
        TIMING_STOP(&t_assign);

        TIMING_START(&t_consume, "CONSUME.WAIT");
        for (i = 0; i < MAX_THRD_CNT; ++i) {
                if (tids[i] != 0)
                        thrd_join(tids[i], NULL);
        }
        TIMING_STOP(&t_consume);

        TEST_SAY("Closing remaining consumers\n");
        /* Query subscription */
        err = rd_kafka_subscription(rk_c, &sub);
        TEST_ASSERT(!err, "%s: subscription () failed: %s", rd_kafka_name(rk_c),
                    rd_kafka_err2str(err));
        TEST_SAY("%s: subscription (%d):\n", rd_kafka_name(rk_c), sub->cnt);
        for (i = 0; i < sub->cnt; ++i)
                TEST_SAY(" %s\n", sub->elems[i].topic);
        rd_kafka_topic_partition_list_destroy(sub);

        /* Run an explicit unsubscribe () (async) prior to close ()
         * to trigger race condition issues on termination. */
        TEST_SAY("Unsubscribing instance %s\n", rd_kafka_name(rk_c));
        err = rd_kafka_unsubscribe(rk_c);
        TEST_ASSERT(!err, "%s: unsubscribe failed: %s", rd_kafka_name(rk_c),
                    rd_kafka_err2str(err));

        TEST_SAY("Closing %s\n", rd_kafka_name(rk_c));
        TIMING_START(&t_close, "CONSUMER.CLOSE");
        err = rd_kafka_consumer_close(rk_c);
        TIMING_STOP(&t_close);
        TEST_ASSERT(!err, "consumer_close failed: %s", rd_kafka_err2str(err));

        rd_kafka_destroy(rk_c);
        rk_c = NULL;

        TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, exp_msg_cnt);
        TEST_ASSERT(consumed_msg_cnt >= exp_msg_cnt,
                    "Only %d/%d messages were consumed", consumed_msg_cnt,
                    exp_msg_cnt);

        if (consumed_msg_cnt > exp_msg_cnt)
                TEST_SAY("At least %d/%d messages were consumed "
                         "multiple times\n",
                         consumed_msg_cnt - exp_msg_cnt, exp_msg_cnt);

        mtx_destroy(&lock);

        return 0;
}
Example #14
0
int main_0018_cgrp_term (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
#define _CONS_CNT 2
	rd_kafka_t *rk_p, *rk_c[_CONS_CNT];
        rd_kafka_topic_t *rkt_p;
	int msg_cnt = 1000;
	int msg_base = 0;
        int partition_cnt = 2;
        int partition;
	uint64_t testid;
        rd_kafka_topic_conf_t *default_topic_conf;
	rd_kafka_topic_partition_list_t *topics;
	rd_kafka_resp_err_t err;
	test_timing_t t_assign, t_consume;
	char errstr[512];
	int i;

	testid = test_id_generate();

	/* Produce messages */
	rk_p = test_create_producer();
	rkt_p = test_create_producer_topic(rk_p, topic, NULL);

        for (partition = 0 ; partition < partition_cnt ; partition++) {
                test_produce_msgs(rk_p, rkt_p, testid, partition,
                                  msg_base+(partition*msg_cnt), msg_cnt,
				  NULL, 0);
        }

	rd_kafka_topic_destroy(rkt_p);
	rd_kafka_destroy(rk_p);


        test_conf_init(NULL, &default_topic_conf,
		       (test_session_timeout_ms * 3) / 1000);
        if (rd_kafka_topic_conf_set(default_topic_conf, "auto.offset.reset",
				    "smallest", errstr, sizeof(errstr)) !=
	    RD_KAFKA_CONF_OK)
		TEST_FAIL("%s\n", errstr);

	/* Fill in topic subscription set */
	topics = rd_kafka_topic_partition_list_new(1);
	rd_kafka_topic_partition_list_add(topics, topic, -1);

	/* Create consumers and start subscription */
	for (i = 0 ; i < _CONS_CNT ; i++) {
		rk_c[i] = test_create_consumer(topic/*group_id*/,
					       rebalance_cb, NULL,
					       rd_kafka_topic_conf_dup(
						       default_topic_conf),
					       NULL);

		err = rd_kafka_poll_set_consumer(rk_c[i]);
		if (err)
			TEST_FAIL("poll_set_consumer: %s\n",
				  rd_kafka_err2str(err));

		err = rd_kafka_subscribe(rk_c[i], topics);
		if (err)
			TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err));
	}

        rd_kafka_topic_conf_destroy(default_topic_conf);

        rd_kafka_topic_partition_list_destroy(topics);


	/* Wait for both consumers to get an assignment */
	TIMING_START(&t_assign, "WAIT.ASSIGN");
	while (assign_cnt < _CONS_CNT)
		consume_all(rk_c, _CONS_CNT, msg_cnt,
			    test_session_timeout_ms + 3000);
	TIMING_STOP(&t_assign);

	/* Now close one of the consumers, this will cause a rebalance. */
	TEST_SAY("Closing down 1/%d consumer(s): %s\n", _CONS_CNT,
		 rd_kafka_name(rk_c[0]));
	err = rd_kafka_consumer_close(rk_c[0]);
	if (err)
		TEST_FAIL("consumer_close failed: %s\n", rd_kafka_err2str(err));
	rd_kafka_destroy(rk_c[0]);
	rk_c[0] = NULL;

	/* Let remaining consumers run for a while to take over the now
	 * lost partitions. */

	if (assign_cnt != _CONS_CNT-1)
		TEST_FAIL("assign_cnt %d, should be %d\n",
			  assign_cnt, _CONS_CNT-1);

	TIMING_START(&t_consume, "CONSUME.WAIT");
	consume_all(rk_c, _CONS_CNT, msg_cnt, test_session_timeout_ms + 3000);
	TIMING_STOP(&t_consume);

	TEST_SAY("Closing remaining consumers\n");
	for (i = 0 ; i < _CONS_CNT ; i++) {
		test_timing_t t_close;
                rd_kafka_topic_partition_list_t *sub;
                int j;

		if (!rk_c[i])
			continue;

                /* Query subscription */
                err = rd_kafka_subscription(rk_c[i], &sub);
                if (err)
                        TEST_FAIL("%s: subscription() failed: %s\n",
                                  rd_kafka_name(rk_c[i]),
                                  rd_kafka_err2str(err));
                TEST_SAY("%s: subscription (%d):\n",
                         rd_kafka_name(rk_c[i]), sub->cnt);
                for (j = 0 ; j < sub->cnt ; j++)
                        TEST_SAY(" %s\n", sub->elems[j].topic);
                rd_kafka_topic_partition_list_destroy(sub);

                /* Run an explicit unsubscribe() (async) prior to close()
                 * to trigger race condition issues on termination. */
                TEST_SAY("Unsubscribing instance %s\n", rd_kafka_name(rk_c[i]));
                err = rd_kafka_unsubscribe(rk_c[i]);
                if (err)
                        TEST_FAIL("%s: unsubscribe failed: %s\n",
                                  rd_kafka_name(rk_c[i]),
                                  rd_kafka_err2str(err));

		TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i]));
		TIMING_START(&t_close, "CONSUMER.CLOSE");
		err = rd_kafka_consumer_close(rk_c[i]);
		TIMING_STOP(&t_close);
		if (err)
			TEST_FAIL("consumer_close failed: %s\n",
				  rd_kafka_err2str(err));

		rd_kafka_destroy(rk_c[i]);
		rk_c[i] = NULL;
	}

	TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, msg_cnt);
	if (consumed_msg_cnt < msg_cnt)
		TEST_FAIL("Only %d/%d messages were consumed\n",
			  consumed_msg_cnt, msg_cnt);
	else if (consumed_msg_cnt > msg_cnt)
		TEST_SAY("At least %d/%d messages were consumed "
			 "multiple times\n",
			 consumed_msg_cnt - msg_cnt, msg_cnt);
	
	return 0;
}