예제 #1
0
static void do_nonexist_commit (void) {
	rd_kafka_t *rk;
	char group_id[64];
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_topic_partition_list_t *offsets;
	const char *unk_topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_resp_err_t err;

	test_conf_init(&conf, &tconf, 20);
	test_str_id_generate(group_id, sizeof(group_id));

	TEST_SAY(_C_MAG "[ do_nonexist_commit group.id %s ]\n", group_id);

	rk = test_create_consumer(group_id, NULL, conf, tconf, NULL);

	TEST_SAY("Try nonexist commit\n");
	offsets = rd_kafka_topic_partition_list_new(2);
	rd_kafka_topic_partition_list_add(offsets, unk_topic, 0)->offset = 123;
	rd_kafka_topic_partition_list_add(offsets, unk_topic, 1)->offset = 456;

	err = rd_kafka_commit_queue(rk, offsets, NULL,
				    nonexist_offset_commit_cb, NULL);
	TEST_SAY("nonexist commit returned %s\n", rd_kafka_err2str(err));
	if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
		TEST_FAIL("commit() should succeed, not: %s",
			  rd_kafka_err2str(err));

	rd_kafka_topic_partition_list_destroy(offsets);

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
예제 #2
0
int main_0042_many_topics (int argc, char **argv) {
	char **topics;
	const int topic_cnt = 20; /* up this as needed, topic creation
				   * takes time so unless hunting a bug
				   * we keep this low to keep the
				   * test suite run time down. */
	uint64_t testid;
	int i;

	test_conf_init(NULL, NULL, 60);

	testid = test_id_generate();

	/* Generate unique topic names */
	topics = malloc(sizeof(*topics) * topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++)
		topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));

	produce_many(topics, topic_cnt, testid);
	legacy_consume_many(topics, topic_cnt, testid);
	if (test_broker_version >= TEST_BRKVER(0,9,0,0)) {
		subscribe_consume_many(topics, topic_cnt, testid);
		assign_consume_many(topics, topic_cnt, testid);
	}

	for (i = 0 ; i < topic_cnt ; i++)
		free(topics[i]);
	free(topics);

        return 0;
}
예제 #3
0
static void test_producer_no_connection (void) {
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_t *rkt;
	int i;
	const int partition_cnt = 2;
	int msgcnt = 0;
	test_timing_t t_destroy;

	test_conf_init(&conf, NULL, 20);

	test_conf_set(conf, "bootstrap.servers", NULL);

	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
	rkt = test_create_topic_object(rk, __FUNCTION__,
				       "message.timeout.ms", "5000", NULL);

	test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 100,
				 NULL, 100, 0, &msgcnt);
	for (i = 0 ; i < partition_cnt ; i++)
		test_produce_msgs_nowait(rk, rkt, 0, i,
					 0, 100, NULL, 100, 0, &msgcnt);

	rd_kafka_poll(rk, 1000);

	TEST_SAY("%d messages in queue\n", rd_kafka_outq_len(rk));

	rd_kafka_topic_destroy(rkt);

	TIMING_START(&t_destroy, "rd_kafka_destroy()");
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_destroy);
}
예제 #4
0
int main_0035_api_version (int argc, char **argv) {
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	const struct rd_kafka_metadata *metadata;
	rd_kafka_resp_err_t err;
	test_timing_t t_meta;

	test_conf_init(&conf, NULL, 30);
	test_conf_set(conf, "socket.timeout.ms", "12000");
	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

	TEST_SAY("Querying for metadata\n");
	TIMING_START(&t_meta, "metadata()");
	err = rd_kafka_metadata(rk, 0, NULL, &metadata, 10*1000);
	TIMING_STOP(&t_meta);
	if (err)
		TEST_FAIL("metadata() failed: %s",
			  rd_kafka_err2str(err));

	if (TIMING_DURATION(&t_meta) / 1000 > 11*1000)
		TEST_FAIL("metadata() took too long: %.3fms",
			  (float)TIMING_DURATION(&t_meta) / 1000.0f);

	rd_kafka_metadata_destroy(metadata);

	TEST_SAY("Metadata succeeded\n");

	rd_kafka_destroy(rk);

	return 0;
}
예제 #5
0
static void assign_consume_many (char **topics, int topic_cnt, uint64_t testid){
	rd_kafka_t *rk;
	rd_kafka_topic_partition_list_t *parts;
	int i;
	test_msgver_t mv;

	TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);

	test_conf_init(NULL, NULL, 60);
	rk = test_create_consumer(__FUNCTION__, NULL, NULL, NULL);

	parts = rd_kafka_topic_partition_list_new(topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++)
		rd_kafka_topic_partition_list_add(parts, topics[i], 0)->
			offset = RD_KAFKA_OFFSET_TAIL(msgs_per_topic);

	test_consumer_assign("consume.assign", rk, parts);
	rd_kafka_topic_partition_list_destroy(parts);

	test_msgver_init(&mv, testid);
	test_consumer_poll("consume.assign", rk, testid,
			   -1, 0, msgs_per_topic * topic_cnt, &mv);

	for (i = 0 ; i < topic_cnt ; i++)
		test_msgver_verify_part("assign", &mv, TEST_MSGVER_ALL_PART,
					topics[i], 0, i * msgs_per_topic,
					msgs_per_topic);
	test_msgver_clear(&mv);

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
예제 #6
0
rd_kafka_topic_t *test_create_producer_topic (rd_kafka_t *rk,
	const char *topic, ...) {
	rd_kafka_topic_t *rkt;
	rd_kafka_topic_conf_t *topic_conf;
	char errstr[512];
	va_list ap;
	const char *name, *val;

	test_conf_init(NULL, &topic_conf, 20);

	va_start(ap, topic);
	while ((name = va_arg(ap, const char *)) &&
	       (val = va_arg(ap, const char *))) {
		if (rd_kafka_topic_conf_set(topic_conf, name, val,
			errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
			TEST_FAIL("Conf failed: %s\n", errstr);
	}
	va_end(ap);

	/* Make sure all replicas are in-sync after producing
	 * so that consume test wont fail. */
        rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1",
                                errstr, sizeof(errstr));


	rkt = rd_kafka_topic_new(rk, topic, topic_conf);
	if (!rkt)
		TEST_FAIL("Failed to create topic: %s\n",
                          rd_kafka_err2str(rd_kafka_errno2err(errno)));

	return rkt;

}
예제 #7
0
int main_0021_rkt_destroy (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 0);
	rd_kafka_t *rk;
        rd_kafka_topic_t *rkt;
        const int msgcnt = 1000;
        uint64_t testid;
        int remains = 0;

        test_conf_init(NULL, NULL, 10);


        testid = test_id_generate();
        rk = test_create_producer();
        rkt = test_create_producer_topic(rk, topic, NULL);


        test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA,
                                 0, msgcnt, NULL, 0, &remains);

        TEST_ASSERT(msgcnt == remains, "Only %d/%d messages produced",
                    remains, msgcnt);

        rd_kafka_topic_destroy(rkt);

        test_wait_delivery(rk, &remains);

        rd_kafka_destroy(rk);

        return 0;
}
예제 #8
0
static void consume_messages (uint64_t testid, const char *topic,
			      int32_t partition, int msg_base, int batch_cnt,
			      int msgcnt) {
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *topic_conf;
	int i;

	test_conf_init(&conf, &topic_conf, 20);

	/* Create kafka instance */
	rk = test_create_handle(RD_KAFKA_CONSUMER, conf);

	rkt = rd_kafka_topic_new(rk, topic, topic_conf);
	if (!rkt)
		TEST_FAIL("Failed to create topic: %s\n",
                          rd_kafka_err2str(rd_kafka_last_error()));

	TEST_SAY("Consuming %i messages from partition %i\n",
		 batch_cnt, partition);

	/* Consume messages */
	if (rd_kafka_consume_start(rkt, partition,
			     RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1)
		TEST_FAIL("consume_start(%i, -%i) failed: %s",
			  (int)partition, batch_cnt,
			  rd_kafka_err2str(rd_kafka_last_error()));

	for (i = 0 ; i < batch_cnt ; i++) {
		rd_kafka_message_t *rkmessage;

		rkmessage = rd_kafka_consume(rkt, partition, tmout_multip(5000));
		if (!rkmessage)
			TEST_FAIL("Failed to consume message %i/%i from "
				  "partition %i: %s",
				  i, batch_cnt, (int)partition,
				  rd_kafka_err2str(rd_kafka_last_error()));
		if (rkmessage->err)
			TEST_FAIL("Consume message %i/%i from partition %i "
				  "has error: %s",
				  i, batch_cnt, (int)partition,
				  rd_kafka_err2str(rkmessage->err));

		verify_consumed_msg(testid, partition, msg_base+i, rkmessage);

		rd_kafka_message_destroy(rkmessage);
	}

	rd_kafka_consume_stop(rkt, partition);

	/* Destroy topic */
	rd_kafka_topic_destroy(rkt);

	/* Destroy rdkafka instance */
	TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
	rd_kafka_destroy(rk);
}
예제 #9
0
/**
 * @brief Test handling of implicit acks.
 *
 * @param batch_cnt Total number of batches, ProduceRequests, sent.
 * @param initial_fail_batch_cnt How many of the initial batches should
 *                               fail with an emulated network timeout.
 */
static void do_test_implicit_ack (const char *what,
                                  int batch_cnt, int initial_fail_batch_cnt) {
        rd_kafka_t *rk;
        const char *topic = test_mk_topic_name("0090_idempotence_impl_ack", 1);
        const int32_t partition = 0;
        uint64_t testid;
        int msgcnt = 10*batch_cnt;
        rd_kafka_conf_t *conf;
        rd_kafka_topic_t *rkt;
        test_msgver_t mv;

        TEST_SAY(_C_MAG "[ Test implicit ack: %s ]\n", what);

        rd_atomic32_init(&state.produce_cnt, 0);
        state.batch_cnt = batch_cnt;
        state.initial_fail_batch_cnt = initial_fail_batch_cnt;

        testid = test_id_generate();

        test_conf_init(&conf, NULL, 60);
        rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
        test_conf_set(conf, "enable.idempotence", "true");
        test_conf_set(conf, "batch.num.messages", "10");
        test_conf_set(conf, "linger.ms", "500");
        test_conf_set(conf, "retry.backoff.ms", "2000");

        /* The ProduceResponse handler will inject timed-out-in-flight
         * errors for the first N ProduceRequests, which will trigger retries
         * that in turn will result in OutOfSequence errors. */
        test_conf_set(conf, "ut_handle_ProduceResponse",
                      (char *)handle_ProduceResponse);

        test_create_topic(topic, 1, 1);

        rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
        rkt = test_create_producer_topic(rk, topic, NULL);


        TEST_SAY("Producing %d messages\n", msgcnt);
        test_produce_msgs(rk, rkt, testid, -1, 0, msgcnt, NULL, 0);

        TEST_SAY("Flushing..\n");
        rd_kafka_flush(rk, 10000);

        rd_kafka_topic_destroy(rkt);
        rd_kafka_destroy(rk);

        TEST_SAY("Verifying messages with consumer\n");
        test_msgver_init(&mv, testid);
        test_consume_msgs_easy_mv(NULL, topic, partition,
                                  testid, 1, msgcnt, NULL, &mv);
        test_msgver_verify("verify", &mv, TEST_MSGVER_ALL, 0, msgcnt);
        test_msgver_clear(&mv);

        TEST_SAY(_C_GRN "[ Test implicit ack: %s : PASS ]\n", what);
}
예제 #10
0
static void do_test_non_exist_and_partchange (void) {
	char *topic_a = rd_strdup(test_mk_topic_name("topic_a", 1));
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_queue_t *queue;

	/**
	 * Test #1:
	 * - Subscribe to non-existing topic.
	 * - Verify empty assignment
	 * - Create topic
	 * - Verify new assignment containing topic
	 */
	TEST_SAY("#1 & #2 testing\n");
	test_conf_init(&conf, NULL, 60);

	/* Decrease metadata interval to speed up topic change discovery. */
	test_conf_set(conf, "metadata.max.age.ms", "5000");

	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
	rk = test_create_consumer(test_str_id_generate_tmp(),
				  NULL, conf, NULL, NULL);
	queue = rd_kafka_queue_get_consumer(rk);

	TEST_SAY("#1: Subscribing to %s\n", topic_a);
	test_consumer_subscribe(rk, topic_a);

	/* Should not see a rebalance since no topics are matched. */
	await_no_rebalance("#1: empty", rk, queue, 10000);

	TEST_SAY("#1: creating topic %s\n", topic_a);
	test_create_topic(topic_a, 2, 1);

	await_assignment("#1: proper", rk, queue, 1,
			 topic_a, 2);


	/**
	 * Test #2 (continue with #1 consumer)
	 * - Increase the partition count
	 * - Verify updated assignment
	 */
	test_kafka_topics("--alter --topic %s --partitions 4",
			  topic_a);
	await_revoke("#2", rk, queue);

	await_assignment("#2: more partitions", rk, queue, 1,
			 topic_a, 4);

	test_consumer_close(rk);
	rd_kafka_queue_destroy(queue);
	rd_kafka_destroy(rk);

	rd_free(topic_a);
}
예제 #11
0
static void do_produce (const char *topic, int msgcnt) {
        rd_kafka_t *rk;
        rd_kafka_conf_t *conf;
        int i;
        rd_kafka_resp_err_t err;

        test_conf_init(&conf, NULL, 0);
        test_conf_set(conf, "acks", "all");
        rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);

        rd_kafka_conf_interceptor_add_on_new(conf, __FILE__, on_new, NULL);

        rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

        /* First message is without headers (negative testing) */
        i = 0;
        err = rd_kafka_producev(
                rk,
                RD_KAFKA_V_TOPIC(topic),
                RD_KAFKA_V_PARTITION(0),
                RD_KAFKA_V_VALUE(&i, sizeof(i)),
                RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
                RD_KAFKA_V_END);
        TEST_ASSERT(!err,
                    "producev() failed: %s", rd_kafka_err2str(err));
        exp_msgid++;

        for (i = 1 ; i < msgcnt ; i++, exp_msgid++) {
                err = rd_kafka_producev(
                        rk,
                        RD_KAFKA_V_TOPIC(topic),
                        RD_KAFKA_V_PARTITION(0),
                        RD_KAFKA_V_VALUE(&i, sizeof(i)),
                        RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
                        RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)),
                        RD_KAFKA_V_HEADER("static", "hey", -1),
                        RD_KAFKA_V_HEADER("multi", "multi1", -1),
                        RD_KAFKA_V_HEADER("multi", "multi2", 6),
                        RD_KAFKA_V_HEADER("multi", "multi3", strlen("multi3")),
                        RD_KAFKA_V_HEADER("null", NULL, 0),
                        RD_KAFKA_V_HEADER("empty", "", 0),
                        RD_KAFKA_V_END);
                TEST_ASSERT(!err,
                            "producev() failed: %s", rd_kafka_err2str(err));
        }

        /* Reset expected message id for dr */
        exp_msgid = 0;

        /* Wait for timeouts and delivery reports */
        rd_kafka_flush(rk, tmout_multip(5000));

        rd_kafka_destroy(rk);
}
예제 #12
0
int main_0041_fetch_max_bytes (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	const int partition = 0;
	const int msgcnt = 2*1000;
	const int MAX_BYTES = 100000;
	uint64_t testid;
	rd_kafka_conf_t *conf;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;

	test_conf_init(NULL, NULL, 60);
	
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt/2, NULL, MAX_BYTES/10);
	test_produce_msgs(rk, rkt, testid, partition, msgcnt/2, msgcnt/2, NULL, MAX_BYTES*5);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	TEST_SAY("Creating consumer\n");
	test_conf_init(&conf, NULL, 0);

	test_conf_set(conf, "fetch.message.max.bytes", tsprintf("%d", MAX_BYTES));
	
	rk = test_create_consumer(NULL, NULL, conf, NULL);
	rkt = rd_kafka_topic_new(rk, topic, NULL);

	test_consumer_start("CONSUME", rkt, partition,
			    RD_KAFKA_OFFSET_BEGINNING);
	test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK,
			  0, msgcnt, 1);
	test_consumer_stop("CONSUME", rkt, partition);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	return 0;
}
예제 #13
0
int main_0037_destroy_hang_local (int argc, char **argv) {
        int fails = 0;

	test_conf_init(NULL, NULL, 30);

	fails += legacy_consumer_early_destroy();

        if (fails > 0)
                TEST_FAIL("See %d previous error(s)\n", fails);

        return 0;
}
예제 #14
0
static void do_test_apis (rd_kafka_type_t cltype) {
        rd_kafka_t *rk;
        rd_kafka_conf_t *conf;
        rd_kafka_queue_t *mainq;

        /* Get the available brokers, but use a separate rd_kafka_t instance
         * so we don't jinx the tests by having up-to-date metadata. */
        avail_brokers = test_get_broker_ids(NULL, &avail_broker_cnt);
        TEST_SAY("%"PRIusz" brokers in cluster "
                 "which will be used for replica sets\n",
                 avail_broker_cnt);

        do_test_unclean_destroy(cltype, 0/*tempq*/);
        do_test_unclean_destroy(cltype, 1/*mainq*/);

        test_conf_init(&conf, NULL, 60);
        test_conf_set(conf, "socket.timeout.ms", "10000");
        rk = test_create_handle(cltype, conf);

        mainq = rd_kafka_queue_get_main(rk);

        /* Create topics */
        do_test_CreateTopics("temp queue, op timeout 0",
                             rk, NULL, 0, 0);
        do_test_CreateTopics("temp queue, op timeout 15000",
                             rk, NULL, 15000, 0);
        do_test_CreateTopics("temp queue, op timeout 300, "
                             "validate only",
                             rk, NULL, 300, rd_true);
        do_test_CreateTopics("temp queue, op timeout 9000, validate_only",
                             rk, NULL, 9000, rd_true);
        do_test_CreateTopics("main queue, options", rk, mainq, -1, 0);

        /* Delete topics */
        do_test_DeleteTopics("temp queue, op timeout 0", rk, NULL, 0);
        do_test_DeleteTopics("main queue, op timeout 15000", rk, mainq, 1500);

        /* Create Partitions */
        do_test_CreatePartitions("temp queue, op timeout 6500", rk, NULL, 6500);
        do_test_CreatePartitions("main queue, op timeout 0", rk, mainq, 0);

        /* AlterConfigs */
        do_test_AlterConfigs(rk, mainq);

        /* DescribeConfigs */
        do_test_DescribeConfigs(rk, mainq);

        rd_kafka_queue_destroy(mainq);

        rd_kafka_destroy(rk);

        free(avail_brokers);
}
예제 #15
0
/**
 * Enable statistics with a set interval, make sure the stats callbacks are
 * called within reasonable intervals.
 */
static void do_test_stats_timer (void) {
    rd_kafka_t *rk;
    rd_kafka_conf_t *conf;
    const int exp_calls = 10;
    char errstr[512];
    struct state state;
    test_timing_t t_new;

    memset(&state, 0, sizeof(state));

    state.interval = 600*1000;

    test_conf_init(&conf, NULL, 200);

    test_conf_set(conf, "statistics.interval.ms", "600");
    rd_kafka_conf_set_stats_cb(conf, stats_cb);
    rd_kafka_conf_set_opaque(conf, &state);


    TIMING_START(&t_new, "rd_kafka_new()");
    rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
    TIMING_STOP(&t_new);
    if (!rk)
        TEST_FAIL("Failed to create instance: %s\n", errstr);

    TEST_SAY("Starting wait loop for %d expected stats_cb calls "
             "with an interval of %dms\n",
             exp_calls, state.interval/1000);


    while (state.calls < exp_calls) {
        test_timing_t t_poll;
        TIMING_START(&t_poll, "rd_kafka_poll()");
        rd_kafka_poll(rk, 100);
        TIMING_STOP(&t_poll);

        if (TIMING_DURATION(&t_poll) > 150*1000)
            TEST_WARN("rd_kafka_poll(rk,100) "
                      "took more than 50%% extra\n");
    }

    rd_kafka_destroy(rk);

    if (state.calls > exp_calls)
        TEST_SAY("Got more calls than expected: %d > %d\n",
                 state.calls, exp_calls);

    if (state.fails)
        TEST_FAIL("%d/%d intervals failed\n", state.fails, state.calls);
    else
        TEST_SAY("All %d intervals okay\n", state.calls);
}
예제 #16
0
static void do_test_apis (rd_kafka_type_t cltype) {
        rd_kafka_t *rk;
        char errstr[512];
        rd_kafka_queue_t *mainq, *backgroundq;
        rd_kafka_conf_t *conf;

        mtx_init(&last_event_lock, mtx_plain);
        cnd_init(&last_event_cnd);

        do_test_unclean_destroy(cltype, 0/*tempq*/);
        do_test_unclean_destroy(cltype, 1/*mainq*/);

        test_conf_init(&conf, NULL, 0);
        /* Remove brokers, if any, since this is a local test and we
         * rely on the controller not being found. */
        test_conf_set(conf, "bootstrap.servers", "");
        test_conf_set(conf, "socket.timeout.ms", MY_SOCKET_TIMEOUT_MS_STR);
        /* For use with the background queue */
        rd_kafka_conf_set_background_event_cb(conf, background_event_cb);

        rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr));
        TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr);

        mainq = rd_kafka_queue_get_main(rk);
        backgroundq = rd_kafka_queue_get_background(rk);

        do_test_options(rk);

        do_test_CreateTopics("temp queue, no options", rk, NULL, 0, 0);
        do_test_CreateTopics("temp queue, no options, background_event_cb",
                             rk, backgroundq, 1, 0);
        do_test_CreateTopics("temp queue, options", rk, NULL, 0, 1);
        do_test_CreateTopics("main queue, options", rk, mainq, 0, 1);

        do_test_DeleteTopics("temp queue, no options", rk, NULL, 0);
        do_test_DeleteTopics("temp queue, options", rk, NULL, 1);
        do_test_DeleteTopics("main queue, options", rk, mainq, 1);

        do_test_mix(rk, mainq);

        do_test_configs(rk, mainq);

        rd_kafka_queue_destroy(backgroundq);
        rd_kafka_queue_destroy(mainq);

        rd_kafka_destroy(rk);

        mtx_destroy(&last_event_lock);
        cnd_destroy(&last_event_cnd);

}
예제 #17
0
int main_0001_multiobj (int argc, char **argv) {
	int partition = RD_KAFKA_PARTITION_UA; /* random */
	int i;
	const int NUM_ITER = 10;
        const char *topic = NULL;

	TEST_SAY("Creating and destroying %i kafka instances\n", NUM_ITER);

	/* Create, use and destroy NUM_ITER kafka instances. */
	for (i = 0 ; i < NUM_ITER ; i++) {
		rd_kafka_t *rk;
		rd_kafka_topic_t *rkt;
		rd_kafka_conf_t *conf;
		rd_kafka_topic_conf_t *topic_conf;
		char msg[128];
                test_timing_t t_destroy;

		test_conf_init(&conf, &topic_conf, 30);

                if (!topic)
                        topic = test_mk_topic_name("0001", 0);

		rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
		if (!rkt)
			TEST_FAIL("Failed to create topic for "
				  "rdkafka instance #%i: %s\n",
				  i, rd_kafka_err2str(rd_kafka_errno2err(errno)));

		rd_snprintf(msg, sizeof(msg), "%s test message for iteration #%i",
			 argv[0], i);

		/* Produce a message */
		rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY,
				 msg, strlen(msg), NULL, 0, NULL);
		
		/* Wait for it to be sent (and possibly acked) */
		rd_kafka_flush(rk, -1);

		/* Destroy topic */
		rd_kafka_topic_destroy(rkt);

		/* Destroy rdkafka instance */
                TIMING_START(&t_destroy, "rd_kafka_destroy()");
		rd_kafka_destroy(rk);
                TIMING_STOP(&t_destroy);
	}

	return 0;
}
예제 #18
0
rd_kafka_topic_t *test_create_consumer_topic (rd_kafka_t *rk,
                                              const char *topic) {
	rd_kafka_topic_t *rkt;
	rd_kafka_topic_conf_t *topic_conf;

	test_conf_init(NULL, &topic_conf, 20);

	rkt = rd_kafka_topic_new(rk, topic, topic_conf);
	if (!rkt)
		TEST_FAIL("Failed to create topic: %s\n",
                          rd_kafka_err2str(rd_kafka_errno2err(errno)));

	return rkt;
}
예제 #19
0
/**
 * @brief Verify that an unclean rd_kafka_destroy() does not hang.
 */
static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) {
        rd_kafka_t *rk;
        char errstr[512];
        rd_kafka_conf_t *conf;
        rd_kafka_queue_t *q;
        rd_kafka_event_t *rkev;
        rd_kafka_DeleteTopic_t *topic;
        test_timing_t t_destroy;

        test_conf_init(&conf, NULL, 0);
        /* Remove brokers, if any, since this is a local test and we
         * rely on the controller not being found. */
        test_conf_set(conf, "bootstrap.servers", "");
        test_conf_set(conf, "socket.timeout.ms", "60000");

        rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr));
        TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr);

        TEST_SAY(_C_MAG "[ Test unclean destroy for %s using %s]\n", rd_kafka_name(rk),
                 with_mainq ? "mainq" : "tempq");

        if (with_mainq)
                q = rd_kafka_queue_get_main(rk);
        else
                q = rd_kafka_queue_new(rk);

        topic = rd_kafka_DeleteTopic_new("test");
        rd_kafka_DeleteTopics(rk, &topic, 1, NULL, q);
        rd_kafka_DeleteTopic_destroy(topic);

        /* We're not expecting a result yet since DeleteTopics will attempt
         * to look up the controller for socket.timeout.ms (1 minute). */
        rkev = rd_kafka_queue_poll(q, 100);
        TEST_ASSERT(!rkev, "Did not expect result: %s", rd_kafka_event_name(rkev));

        rd_kafka_queue_destroy(q);

        TEST_SAY("Giving rd_kafka_destroy() 5s to finish, "
                 "despite Admin API request being processed\n");
        test_timeout_set(5);
        TIMING_START(&t_destroy, "rd_kafka_destroy()");
        rd_kafka_destroy(rk);
        TIMING_STOP(&t_destroy);

        /* Restore timeout */
        test_timeout_set(60);
}
예제 #20
0
static void legacy_consume_many (char **topics, int topic_cnt, uint64_t testid){
	rd_kafka_t *rk;
        test_timing_t t_rkt_create;
        int i;
	rd_kafka_topic_t **rkts;
	int msg_base = 0;

	TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);

	test_conf_init(NULL, NULL, 60);

	rk = test_create_consumer(NULL, NULL, NULL, NULL);

	TEST_SAY("Creating %d topic objects\n", topic_cnt);
		 
	rkts = malloc(sizeof(*rkts) * topic_cnt);
	TIMING_START(&t_rkt_create, "Topic object create");
	for (i = 0 ; i < topic_cnt ; i++)
		rkts[i] = test_create_topic_object(rk, topics[i], NULL);
	TIMING_STOP(&t_rkt_create);

	TEST_SAY("Start consumer for %d topics\n", topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++)
		test_consumer_start("legacy", rkts[i], 0,
				    RD_KAFKA_OFFSET_BEGINNING);
	
	TEST_SAY("Consuming from %d messages from each %d topics\n",
		 msgs_per_topic, topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++) {
		test_consume_msgs("legacy", rkts[i], testid, 0, TEST_NO_SEEK,
				  msg_base, msgs_per_topic, 1);
		msg_base += msgs_per_topic;
	}

	TEST_SAY("Stopping consumers\n");
	for (i = 0 ; i < topic_cnt ; i++)
		test_consumer_stop("legacy", rkts[i], 0);


	TEST_SAY("Destroying %d topic objects\n", topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++)
		rd_kafka_topic_destroy(rkts[i]);

	free(rkts);

	rd_kafka_destroy(rk);
}
예제 #21
0
/**
 * @brief Produce messages according to compress \p codec
 */
static void produce_msgs (const char *topic, int partition, uint64_t testid,
                          int msgcnt, const char *broker_version,
                          const char *codec) {
        rd_kafka_conf_t *conf;
        rd_kafka_t *rk;
        int i;
        char key[128], buf[100];
        int msgcounter = msgcnt;

        test_conf_init(&conf, NULL, 0);
        rd_kafka_conf_set_dr_cb(conf, test_dr_cb);
        test_conf_set(conf, "compression.codec", codec);
        test_conf_set(conf, "broker.version.fallback", broker_version);
        if (strstr(broker_version, "0.10."))
                test_conf_set(conf, "api.version.request", "true");
        else
                test_conf_set(conf, "api.version.request", "false");
        /* Make sure to trigger a bunch of MessageSets */
        test_conf_set(conf, "batch.num.messages", tsprintf("%d", msgcnt/5));
        rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

        for (i = 0 ; i < msgcnt ; i++) {
                rd_kafka_resp_err_t err;

                test_prepare_msg(testid, partition, i,
                                 buf, sizeof(buf), key, sizeof(key));

                err = rd_kafka_producev(rk,
                                        RD_KAFKA_V_TOPIC(topic),
                                        RD_KAFKA_V_VALUE(buf, sizeof(buf)),
                                        RD_KAFKA_V_KEY(key, sizeof(key)),
                                        RD_KAFKA_V_TIMESTAMP(my_timestamp.min),
                                        RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
                                        RD_KAFKA_V_OPAQUE(&msgcounter),
                                        RD_KAFKA_V_END);
                if (err)
                        TEST_FAIL("producev() failed at msg #%d/%d: %s",
                                  i, msgcnt, rd_kafka_err2str(err));
        }

        TEST_SAY("Waiting for %d messages to be produced\n", msgcounter);
        while (msgcounter > 0)
                rd_kafka_poll(rk, 100);

        rd_kafka_destroy(rk);
}
예제 #22
0
rd_kafka_t *test_create_consumer (const char *group_id,
				  void (*rebalance_cb) (
					  rd_kafka_t *rk,
					  rd_kafka_resp_err_t err,
					  rd_kafka_topic_partition_list_t
					  *partitions,
					  void *opaque),
                                  rd_kafka_topic_conf_t *default_topic_conf,
				  void *opaque) {
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	char errstr[512];
	char tmp[64];

	test_conf_init(&conf, NULL, 20);

        if (group_id) {
                if (rd_kafka_conf_set(conf, "group.id", group_id,
                                      errstr, sizeof(errstr)) !=
                    RD_KAFKA_CONF_OK)
                        TEST_FAIL("Conf failed: %s\n", errstr);
        }

	rd_snprintf(tmp, sizeof(tmp), "%d", test_session_timeout_ms);
	if (rd_kafka_conf_set(conf, "session.timeout.ms", tmp,
			      errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
		TEST_FAIL("Conf failed: %s\n", errstr);

	rd_kafka_conf_set_opaque(conf, opaque);

	if (rebalance_cb)
		rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);

        if (default_topic_conf)
                rd_kafka_conf_set_default_topic_conf(conf, default_topic_conf);

	/* Create kafka instance */
	rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
	if (!rk)
		TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr);

	TEST_SAY("Created    kafka instance %s\n", rd_kafka_name(rk));

	return rk;
}
예제 #23
0
/**
 * Trigger an empty cgrp commit (issue #803)
 */
static void do_empty_commit (void) {
	rd_kafka_t *rk;
	char group_id[64];
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_resp_err_t err, expect;

	test_conf_init(&conf, &tconf, 20);
	test_conf_set(conf, "enable.auto.commit", "false");
	test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
	test_str_id_generate(group_id, sizeof(group_id));

	TEST_SAY(_C_MAG "[ do_empty_commit group.id %s ]\n", group_id);

	rk = test_create_consumer(group_id, NULL, conf, tconf, NULL);

	test_consumer_subscribe(rk, topic);

	test_consumer_poll("consume", rk, testid, -1, -1, 100, NULL);

	TEST_SAY("First commit\n");
	expect = RD_KAFKA_RESP_ERR_NO_ERROR;
	err = rd_kafka_commit_queue(rk, NULL, NULL,
				    empty_offset_commit_cb, &expect);
	if (err != expect)
		TEST_FAIL("commit failed: %s", rd_kafka_err2str(err));
	else
		TEST_SAY("First commit returned %s\n",
			 rd_kafka_err2str(err));

	TEST_SAY("Second commit, should be empty\n");
	expect = RD_KAFKA_RESP_ERR__NO_OFFSET;
	err = rd_kafka_commit_queue(rk, NULL, NULL,
				    empty_offset_commit_cb, &expect);
	if (err != RD_KAFKA_RESP_ERR__NO_OFFSET)
		TEST_FAIL("unexpected commit result, wanted NO_OFFSET, got: %s",
			  rd_kafka_err2str(err));
	else
		TEST_SAY("Second commit returned %s\n",
			 rd_kafka_err2str(err));

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
예제 #24
0
rd_kafka_t *test_create_producer (void) {
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	char errstr[512];

	test_conf_init(&conf, NULL, 20);

	rd_kafka_conf_set_dr_cb(conf, test_dr_cb);

	/* Create kafka instance */
	rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
	if (!rk)
		TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr);

	TEST_SAY("Created    kafka instance %s\n", rd_kafka_name(rk));

	return rk;
}
예제 #25
0
static void do_test_consumer (const char *topic) {

        rd_kafka_conf_t *conf;
        int i;
        rd_kafka_t *rk;

        TEST_SAY(_C_MAG "[ %s ]\n" _C_CLR, __FUNCTION__);

        test_conf_init(&conf, NULL, 0);

        rd_kafka_conf_interceptor_add_on_new(conf, "on_new_consumer",
                                             on_new_consumer, NULL);

        test_conf_set(conf, "auto.offset.reset", "earliest");

        /* Create producer */
        rk = test_create_consumer(topic, NULL, conf, NULL);

        test_consumer_subscribe(rk, topic);

        /* Consume messages (-1 for the one that failed producing) */
        test_consumer_poll("interceptors.consume", rk, 0, -1, -1, msgcnt-1,
                           NULL);

        /* Verify on_consume */
        for (i = 0 ; i < msgcnt-1 ; i++) {
                struct msg_state *msg = &msgs[i];
                msg_verify_ic_cnt(msg, "on_consume", msg->bits[_ON_CONSUME],
                                  consumer_ic_cnt);
        }

        /* Verify that the produce-failed message didnt have
         * interceptors called */
        msg_verify_ic_cnt(&msgs[msgcnt-1], "on_consume",
                          msgs[msgcnt-1].bits[_ON_CONSUME], 0);

        test_consumer_close(rk);

        verify_ic_cnt("on_commit", on_commit_bits, consumer_ic_cnt);

        rd_kafka_destroy(rk);
}
예제 #26
0
static void subscribe_consume_many (char **topics, int topic_cnt,
				    uint64_t testid) {
	rd_kafka_t *rk;
        int i;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_topic_partition_list_t *parts;
	rd_kafka_resp_err_t err;
	test_msgver_t mv;

	TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);

	test_conf_init(NULL, &tconf, 60);
	test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
	rk = test_create_consumer(__FUNCTION__, NULL, NULL, tconf);

	parts = rd_kafka_topic_partition_list_new(topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++)
		rd_kafka_topic_partition_list_add(parts, topics[i],
						  RD_KAFKA_PARTITION_UA);

	TEST_SAY("Subscribing to %d topics\n", topic_cnt);
	err = rd_kafka_subscribe(rk, parts);
	if (err)
		TEST_FAIL("subscribe() failed: %s\n", rd_kafka_err2str(err));

	rd_kafka_topic_partition_list_destroy(parts);

	test_msgver_init(&mv, testid);
	test_consumer_poll("consume.subscribe", rk, testid,
			   -1, 0, msgs_per_topic * topic_cnt, &mv);

	for (i = 0 ; i < topic_cnt ; i++)
		test_msgver_verify_part("subscribe", &mv, TEST_MSGVER_ALL_PART,
					topics[i], 0, i * msgs_per_topic,
					msgs_per_topic);
	test_msgver_clear(&mv);

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
예제 #27
0
int main_0036_partial_fetch (int argc, char **argv) {
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	const int partition = 0;
	const int msgcnt = 100;
	const int msgsize = 1000;
	uint64_t testid;
	rd_kafka_conf_t *conf;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;

	TEST_SAY("Producing %d messages of size %d to %s [%d]\n",
		 msgcnt, (int)msgsize, topic, partition);
	testid = test_id_generate();
	rk = test_create_producer();
	rkt = test_create_producer_topic(rk, topic, NULL);

	test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, msgsize);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	TEST_SAY("Creating consumer\n");
	test_conf_init(&conf, NULL, 0);
	/* This should fetch 1.5 messages per fetch, thus resulting in
	 * partial fetches, hopefully. */
	test_conf_set(conf, "fetch.message.max.bytes", "1500");
	
	rk = test_create_consumer(NULL, NULL, conf, NULL, NULL);
	rkt = rd_kafka_topic_new(rk, topic, NULL);

	test_consumer_start("CONSUME", rkt, partition,
			    RD_KAFKA_OFFSET_BEGINNING);
	test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK,
			  0, msgcnt, 1);
	test_consumer_stop("CONSUME", rkt, partition);

	rd_kafka_topic_destroy(rkt);
	rd_kafka_destroy(rk);

	return 0;
}
예제 #28
0
/**
 * Create high-level consumer subscribing to \p topic from BEGINNING
 * and expects \d exp_msgcnt with matching \p testid
 * Destroys consumer when done.
 *
 * If \p group_id is NULL a new unique group is generated
 */
void
test_consume_msgs_easy (const char *group_id, const char *topic,
                        uint64_t testid, int exp_msgcnt) {
        rd_kafka_t *rk;
        rd_kafka_topic_conf_t *tconf;
        rd_kafka_resp_err_t err;
        rd_kafka_topic_partition_list_t *topics;
	char grpid0[64];

        test_conf_init(NULL, &tconf, 0);

	if (!group_id)
		group_id = test_str_id_generate(grpid0, sizeof(grpid0));

        test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
        rk = test_create_consumer(group_id, NULL, tconf, NULL);

        rd_kafka_poll_set_consumer(rk);

        topics = rd_kafka_topic_partition_list_new(1);
        rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA);

        TEST_SAY("Subscribing to topic %s in group %s "
                 "(expecting %d msgs with testid %"PRIu64")\n",
                 topic, group_id, exp_msgcnt, testid);

        err = rd_kafka_subscribe(rk, topics);
        if (err)
                TEST_FAIL("Failed to subscribe to %s: %s\n",
                          topic, rd_kafka_err2str(err));

        rd_kafka_topic_partition_list_destroy(topics);

        /* Consume messages */
        test_consumer_poll("consume.easy", rk, testid, -1, -1, exp_msgcnt);

        test_consumer_close(rk);

        rd_kafka_destroy(rk);
}
예제 #29
0
static void test_producer_partition_cnt_change (void) {
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_t *rkt;
	const char *topic = test_mk_topic_name(__FUNCTION__, 1);
	const int partition_cnt = 4;
	int msgcnt = 100000;
	test_timing_t t_destroy;
	int produced = 0;

	test_kafka_topics("--create --topic %s --replication-factor 1 "
			  "--partitions %d",
			  topic, partition_cnt/2);

	test_conf_init(&conf, NULL, 20);

	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
	rkt = test_create_topic_object(rk, __FUNCTION__,
				       "message.timeout.ms",
                                       tsprintf("%d", tmout_multip(5000)),
                                       NULL);

	test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt/2,
				 NULL, 100, &produced);

	test_kafka_topics("--alter --topic %s --partitions %d",
			  topic, partition_cnt);

	test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA,
				 msgcnt/2, msgcnt/2,
				 NULL, 100, &produced);

	test_wait_delivery(rk, &produced);

	rd_kafka_topic_destroy(rkt);

	TIMING_START(&t_destroy, "rd_kafka_destroy()");
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_destroy);
}
예제 #30
0
/**
 * @brief Verify that an unclean rd_kafka_destroy() does not hang.
 */
static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) {
        rd_kafka_t *rk;
        char errstr[512];
        rd_kafka_conf_t *conf;
        rd_kafka_queue_t *q;
        rd_kafka_NewTopic_t *topic;
        test_timing_t t_destroy;

        test_conf_init(&conf, NULL, 0);

        rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr));
        TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr);

        TEST_SAY(_C_MAG "[ Test unclean destroy for %s using %s]\n", rd_kafka_name(rk),
                 with_mainq ? "mainq" : "tempq");

        if (with_mainq)
                q = rd_kafka_queue_get_main(rk);
        else
                q = rd_kafka_queue_new(rk);

        topic = rd_kafka_NewTopic_new(test_mk_topic_name(__FUNCTION__, 1),
                                      3, 1, NULL, 0);
        rd_kafka_CreateTopics(rk, &topic, 1, NULL, q);
        rd_kafka_NewTopic_destroy(topic);

        rd_kafka_queue_destroy(q);

        TEST_SAY("Giving rd_kafka_destroy() 5s to finish, "
                 "despite Admin API request being processed\n");
        test_timeout_set(5);
        TIMING_START(&t_destroy, "rd_kafka_destroy()");
        rd_kafka_destroy(rk);
        TIMING_STOP(&t_destroy);

        /* Restore timeout */
        test_timeout_set(60);;
}