Beispiel #1
0
/**
 * @brief Test a mix of APIs using the same replyq.
 *
 *  - Create topics A,B
 *  - Delete topic B
 *  - Create topic C
 *  - Create extra partitions for topic D
 */
static void do_test_mix (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) {
        char *topics[] = { "topicA", "topicB", "topicC" };
        int cnt = 0;
        struct waiting {
                rd_kafka_event_type_t evtype;
                int seen;
        };
        struct waiting id1 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT};
        struct waiting id2 = {RD_KAFKA_EVENT_DELETETOPICS_RESULT};
        struct waiting id3 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT};
        struct waiting id4 = {RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT};

        TEST_SAY(_C_MAG "[ Mixed mode test on %s]\n", rd_kafka_name(rk));

        test_CreateTopics_simple(rk, rkqu, topics, 2, 1, &id1);
        test_DeleteTopics_simple(rk, rkqu, &topics[1], 1, &id2);
        test_CreateTopics_simple(rk, rkqu, &topics[2], 1, 1, &id3);
        test_CreatePartitions_simple(rk, rkqu, "topicD", 15, &id4);

        while (cnt < 4) {
                rd_kafka_event_t *rkev;
                struct waiting *w;

                rkev = rd_kafka_queue_poll(rkqu, -1);
                TEST_ASSERT(rkev);

                TEST_SAY("Got event %s: %s\n",
                         rd_kafka_event_name(rkev),
                         rd_kafka_event_error_string(rkev));

                w = rd_kafka_event_opaque(rkev);
                TEST_ASSERT(w);

                TEST_ASSERT(w->evtype == rd_kafka_event_type(rkev),
                            "Expected evtype %d, not %d (%s)",
                            w->evtype, rd_kafka_event_type(rkev),
                            rd_kafka_event_name(rkev));

                TEST_ASSERT(w->seen == 0, "Duplicate results");

                w->seen++;
                cnt++;

                rd_kafka_event_destroy(rkev);
        }
}
Beispiel #2
0
/**
 * @brief Local test: test event generation
 */
int main_0039_event (int argc, char **argv) {
        rd_kafka_t *rk;
        rd_kafka_conf_t *conf;
        rd_kafka_queue_t *eventq;
        int waitevent = 1;

        /* Set up a config with ERROR events enabled and
         * configure an invalid broker so that _TRANSPORT or ALL_BROKERS_DOWN
         * is promptly generated. */

        conf = rd_kafka_conf_new();

        rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_ERROR);
        rd_kafka_conf_set(conf, "bootstrap.servers", "0:65534", NULL, 0);

        /* Create kafka instance */
        rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

        eventq = rd_kafka_queue_get_main(rk);

        while (waitevent) {
                rd_kafka_event_t *rkev;
                rkev = rd_kafka_queue_poll(eventq, 1000);
                switch (rd_kafka_event_type(rkev))
                {
                case RD_KAFKA_EVENT_ERROR:
                        TEST_SAY("Got %s%s event: %s: %s\n",
                                 rd_kafka_event_error_is_fatal(rkev) ?
                                 "FATAL " : "",
                                 rd_kafka_event_name(rkev),
                                 rd_kafka_err2name(rd_kafka_event_error(rkev)),
                                 rd_kafka_event_error_string(rkev));
                        waitevent = 0;
                        break;
                default:
                        TEST_SAY("Unhandled event: %s\n",
                                 rd_kafka_event_name(rkev));
                        break;
                }
                rd_kafka_event_destroy(rkev);
        }

        rd_kafka_queue_destroy(eventq);

        /* Destroy rdkafka instance */
        TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
        rd_kafka_destroy(rk);

        return 0;
}
Beispiel #3
0
static void do_test_CreateTopics (const char *what,
                                  rd_kafka_t *rk, rd_kafka_queue_t *useq,
                                  int op_timeout, rd_bool_t validate_only) {
        rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
#define MY_NEW_TOPICS_CNT 6
        char *topics[MY_NEW_TOPICS_CNT];
        rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT];
        rd_kafka_AdminOptions_t *options = NULL;
        rd_kafka_resp_err_t exp_topicerr[MY_NEW_TOPICS_CNT] = {0};
        rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
        /* Expected topics in metadata */
        rd_kafka_metadata_topic_t exp_mdtopics[MY_NEW_TOPICS_CNT] = {{0}};
        int exp_mdtopic_cnt = 0;
        /* Not expected topics in metadata */
        rd_kafka_metadata_topic_t exp_not_mdtopics[MY_NEW_TOPICS_CNT] = {{0}};
        int exp_not_mdtopic_cnt = 0;
        int i;
        char errstr[512];
        const char *errstr2;
        rd_kafka_resp_err_t err;
        test_timing_t timing;
        rd_kafka_event_t *rkev;
        const rd_kafka_CreateTopics_result_t *res;
        const rd_kafka_topic_result_t **restopics;
        size_t restopic_cnt;
        int metadata_tmout ;
        int num_replicas = (int)avail_broker_cnt;
        int32_t *replicas;

        /* Set up replicas */
        replicas = rd_alloca(sizeof(*replicas) * num_replicas);
        for (i = 0 ; i < num_replicas ; i++)
                replicas[i] = avail_brokers[i];

        TEST_SAY(_C_MAG "[ %s CreateTopics with %s, "
                 "op_timeout %d, validate_only %d ]\n",
                 rd_kafka_name(rk), what, op_timeout, validate_only);

        /**
         * Construct NewTopic array with different properties for
         * different partitions.
         */
        for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) {
                char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
                int num_parts = i * 7 + 1;
                int set_config = (i & 1);
                int add_invalid_config = (i == 1);
                int set_replicas = !(i % 3);
                rd_kafka_resp_err_t this_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;

                topics[i] = topic;
                new_topics[i] = rd_kafka_NewTopic_new(topic,
                                                      num_parts,
                                                      set_replicas ? -1 :
                                                      num_replicas,
                                                      NULL, 0);

                if (set_config) {
                        /*
                         * Add various configuration properties
                         */
                        err = rd_kafka_NewTopic_set_config(
                                new_topics[i], "compression.type", "lz4");
                        TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));

                        err = rd_kafka_NewTopic_set_config(
                                new_topics[i], "delete.retention.ms", "900");
                        TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
                }

                if (add_invalid_config) {
                        /* Add invalid config property */
                        err = rd_kafka_NewTopic_set_config(
                                new_topics[i],
                                "dummy.doesntexist",
                                "broker is verifying this");
                        TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
                        this_exp_err = RD_KAFKA_RESP_ERR_INVALID_CONFIG;
                }

                TEST_SAY("Expected result for topic #%d: %s "
                         "(set_config=%d, add_invalid_config=%d, "
                         "set_replicas=%d)\n",
                         i, rd_kafka_err2name(this_exp_err),
                         set_config, add_invalid_config, set_replicas);

                if (set_replicas) {
                        int32_t p;

                        /*
                         * Set valid replica assignments
                         */
                        for (p = 0 ; p < num_parts ; p++) {
                                err = rd_kafka_NewTopic_set_replica_assignment(
                                        new_topics[i], p,
                                        replicas, num_replicas,
                                        errstr, sizeof(errstr));
                                TEST_ASSERT(!err, "%s", errstr);
                        }
                }

                if (this_exp_err || validate_only) {
                        exp_topicerr[i] = this_exp_err;
                        exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic;

                } else {
                        exp_mdtopics[exp_mdtopic_cnt].topic = topic;
                        exp_mdtopics[exp_mdtopic_cnt].partition_cnt =
                                num_parts;
                        exp_mdtopic_cnt++;
                }
        }

        if (op_timeout != -1 || validate_only) {
                options = rd_kafka_AdminOptions_new(
                        rk, RD_KAFKA_ADMIN_OP_CREATETOPICS);

                if (op_timeout != -1) {
                        err = rd_kafka_AdminOptions_set_operation_timeout(
                                options, op_timeout, errstr, sizeof(errstr));
                        TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
                }

                if (validate_only) {
                        err = rd_kafka_AdminOptions_set_validate_only(
                                options, validate_only, errstr, sizeof(errstr));
                        TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
                }
        }

        TIMING_START(&timing, "CreateTopics");
        TEST_SAY("Call CreateTopics\n");
        rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT,
                                    options, q);
        TIMING_ASSERT_LATER(&timing, 0, 50);

        /* Poll result queue for CreateTopics result.
         * Print but otherwise ignore other event types
         * (typically generic Error events). */
        TIMING_START(&timing, "CreateTopics.queue_poll");
        do {
                rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000));
                TEST_SAY("CreateTopics: got %s in %.3fms\n",
                         rd_kafka_event_name(rkev),
                         TIMING_DURATION(&timing) / 1000.0f);
                if (rd_kafka_event_error(rkev))
                        TEST_SAY("%s: %s\n",
                                 rd_kafka_event_name(rkev),
                                 rd_kafka_event_error_string(rkev));
        } while (rd_kafka_event_type(rkev) !=
                 RD_KAFKA_EVENT_CREATETOPICS_RESULT);

        /* Convert event to proper result */
        res = rd_kafka_event_CreateTopics_result(rkev);
        TEST_ASSERT(res, "expected CreateTopics_result, not %s",
                    rd_kafka_event_name(rkev));

        /* Expecting error */
        err = rd_kafka_event_error(rkev);
        errstr2 = rd_kafka_event_error_string(rkev);
        TEST_ASSERT(err == exp_err,
                    "expected CreateTopics to return %s, not %s (%s)",
                    rd_kafka_err2str(exp_err),
                    rd_kafka_err2str(err),
                    err ? errstr2 : "n/a");

        TEST_SAY("CreateTopics: returned %s (%s)\n",
                 rd_kafka_err2str(err), err ? errstr2 : "n/a");

        /* Extract topics */
        restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt);


        /* Scan topics for proper fields and expected failures. */
        for (i = 0 ; i < (int)restopic_cnt ; i++) {
                const rd_kafka_topic_result_t *terr = restopics[i];

                /* Verify that topic order matches our request. */
                if (strcmp(rd_kafka_topic_result_name(terr), topics[i]))
                        TEST_FAIL_LATER("Topic result order mismatch at #%d: "
                                        "expected %s, got %s",
                                        i, topics[i],
                                        rd_kafka_topic_result_name(terr));

                TEST_SAY("CreateTopics result: #%d: %s: %s: %s\n",
                         i,
                         rd_kafka_topic_result_name(terr),
                         rd_kafka_err2name(rd_kafka_topic_result_error(terr)),
                         rd_kafka_topic_result_error_string(terr));
                if (rd_kafka_topic_result_error(terr) != exp_topicerr[i])
                        TEST_FAIL_LATER(
                                "Expected %s, not %d: %s",
                                rd_kafka_err2name(exp_topicerr[i]),
                                rd_kafka_topic_result_error(terr),
                                rd_kafka_err2name(rd_kafka_topic_result_error(
                                                          terr)));
        }

        /**
         * Verify that the expecteded topics are created and the non-expected
         * are not. Allow it some time to propagate.
         */
        if (validate_only) {
                /* No topics should have been created, give it some time
                 * before checking. */
                rd_sleep(2);
                metadata_tmout = 5 * 1000;
        } else {
                if (op_timeout > 0)
                        metadata_tmout = op_timeout + 1000;
                else
                        metadata_tmout = 10 * 1000;
        }

        test_wait_metadata_update(rk,
                                  exp_mdtopics,
                                  exp_mdtopic_cnt,
                                  exp_not_mdtopics,
                                  exp_not_mdtopic_cnt,
                                  metadata_tmout);

        rd_kafka_event_destroy(rkev);

        for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) {
                rd_kafka_NewTopic_destroy(new_topics[i]);
                rd_free(topics[i]);
        }

        if (options)
                rd_kafka_AdminOptions_destroy(options);

        if (!useq)
                rd_kafka_queue_destroy(q);

#undef MY_NEW_TOPICS_CNT
}
Beispiel #4
0
/**
 * @brief Test deletion of topics
 *
 *
 */
static void do_test_DeleteTopics (const char *what,
                                  rd_kafka_t *rk, rd_kafka_queue_t *useq,
                                  int op_timeout) {
        rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
        const int skip_topic_cnt = 2;
#define MY_DEL_TOPICS_CNT 9
        char *topics[MY_DEL_TOPICS_CNT];
        rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT];
        rd_kafka_AdminOptions_t *options = NULL;
        rd_kafka_resp_err_t exp_topicerr[MY_DEL_TOPICS_CNT] = {0};
        rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
        /* Expected topics in metadata */
        rd_kafka_metadata_topic_t exp_mdtopics[MY_DEL_TOPICS_CNT] = {{0}};
        int exp_mdtopic_cnt = 0;
        /* Not expected topics in metadata */
        rd_kafka_metadata_topic_t exp_not_mdtopics[MY_DEL_TOPICS_CNT] = {{0}};
        int exp_not_mdtopic_cnt = 0;
        int i;
        char errstr[512];
        const char *errstr2;
        rd_kafka_resp_err_t err;
        test_timing_t timing;
        rd_kafka_event_t *rkev;
        const rd_kafka_DeleteTopics_result_t *res;
        const rd_kafka_topic_result_t **restopics;
        size_t restopic_cnt;
        int metadata_tmout;

        TEST_SAY(_C_MAG "[ %s DeleteTopics with %s, op_timeout %d ]\n",
                 rd_kafka_name(rk), what, op_timeout);

        /**
         * Construct DeleteTopic array
         */
        for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) {
                char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
                int notexist_topic = i >= MY_DEL_TOPICS_CNT - skip_topic_cnt;

                topics[i] = topic;

                del_topics[i] = rd_kafka_DeleteTopic_new(topic);

                if (notexist_topic)
                        exp_topicerr[i] =
                                RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
                else {
                        exp_topicerr[i] =
                                RD_KAFKA_RESP_ERR_NO_ERROR;

                        exp_mdtopics[exp_mdtopic_cnt++].topic = topic;
                }

                exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic;
        }

        if (op_timeout != -1) {
                options = rd_kafka_AdminOptions_new(
                        rk, RD_KAFKA_ADMIN_OP_ANY);

                err = rd_kafka_AdminOptions_set_operation_timeout(
                        options, op_timeout, errstr, sizeof(errstr));
                TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
        }


        /* Create the topics first, minus the skip count. */
        test_CreateTopics_simple(rk, NULL, topics,
                                 MY_DEL_TOPICS_CNT-skip_topic_cnt,
                                 2/*num_partitions*/,
                                 NULL);

        /* Verify that topics are reported by metadata */
        test_wait_metadata_update(rk,
                                  exp_mdtopics, exp_mdtopic_cnt,
                                  NULL, 0,
                                  15*1000);

        TIMING_START(&timing, "DeleteTopics");
        TEST_SAY("Call DeleteTopics\n");
        rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT,
                                    options, q);
        TIMING_ASSERT_LATER(&timing, 0, 50);

        /* Poll result queue for DeleteTopics result.
         * Print but otherwise ignore other event types
         * (typically generic Error events). */
        TIMING_START(&timing, "DeleteTopics.queue_poll");
        while (1) {
                rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000));
                TEST_SAY("DeleteTopics: got %s in %.3fms\n",
                         rd_kafka_event_name(rkev),
                         TIMING_DURATION(&timing) / 1000.0f);
                if (rd_kafka_event_error(rkev))
                        TEST_SAY("%s: %s\n",
                                 rd_kafka_event_name(rkev),
                                 rd_kafka_event_error_string(rkev));

                if (rd_kafka_event_type(rkev) ==
                    RD_KAFKA_EVENT_DELETETOPICS_RESULT)
                        break;

                rd_kafka_event_destroy(rkev);
        }

        /* Convert event to proper result */
        res = rd_kafka_event_DeleteTopics_result(rkev);
        TEST_ASSERT(res, "expected DeleteTopics_result, not %s",
                    rd_kafka_event_name(rkev));

        /* Expecting error */
        err = rd_kafka_event_error(rkev);
        errstr2 = rd_kafka_event_error_string(rkev);
        TEST_ASSERT(err == exp_err,
                    "expected DeleteTopics to return %s, not %s (%s)",
                    rd_kafka_err2str(exp_err),
                    rd_kafka_err2str(err),
                    err ? errstr2 : "n/a");

        TEST_SAY("DeleteTopics: returned %s (%s)\n",
                 rd_kafka_err2str(err), err ? errstr2 : "n/a");

        /* Extract topics */
        restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt);


        /* Scan topics for proper fields and expected failures. */
        for (i = 0 ; i < (int)restopic_cnt ; i++) {
                const rd_kafka_topic_result_t *terr = restopics[i];

                /* Verify that topic order matches our request. */
                if (strcmp(rd_kafka_topic_result_name(terr), topics[i]))
                        TEST_FAIL_LATER("Topic result order mismatch at #%d: "
                                        "expected %s, got %s",
                                        i, topics[i],
                                        rd_kafka_topic_result_name(terr));

                TEST_SAY("DeleteTopics result: #%d: %s: %s: %s\n",
                         i,
                         rd_kafka_topic_result_name(terr),
                         rd_kafka_err2name(rd_kafka_topic_result_error(terr)),
                         rd_kafka_topic_result_error_string(terr));
                if (rd_kafka_topic_result_error(terr) != exp_topicerr[i])
                        TEST_FAIL_LATER(
                                "Expected %s, not %d: %s",
                                rd_kafka_err2name(exp_topicerr[i]),
                                rd_kafka_topic_result_error(terr),
                                rd_kafka_err2name(rd_kafka_topic_result_error(
                                                          terr)));
        }

        /**
         * Verify that the expected topics are deleted and the non-expected
         * are not. Allow it some time to propagate.
         */
        if (op_timeout > 0)
                metadata_tmout = op_timeout + 1000;
        else
                metadata_tmout = 10 * 1000;

        test_wait_metadata_update(rk,
                                  NULL, 0,
                                  exp_not_mdtopics,
                                  exp_not_mdtopic_cnt,
                                  metadata_tmout);

        rd_kafka_event_destroy(rkev);

        for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) {
                rd_kafka_DeleteTopic_destroy(del_topics[i]);
                rd_free(topics[i]);
        }

        if (options)
                rd_kafka_AdminOptions_destroy(options);

        if (!useq)
                rd_kafka_queue_destroy(q);

#undef MY_DEL_TOPICS_CNT
}
int main_0040_io_event (int argc, char **argv) {
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_t *rk_p, *rk_c;
	const char *topic;
	rd_kafka_topic_t *rkt_p;
	rd_kafka_queue_t *queue;
	uint64_t testid;
	int msgcnt = 100;
	int recvd = 0;
	int fds[2];
	int wait_multiplier = 1;
	struct pollfd pfd;
        int r;
	enum {
		_NOPE,
		_YEP,
		_REBALANCE
	} expecting_io = _REBALANCE;

	testid = test_id_generate();
	topic = test_mk_topic_name(__FUNCTION__, 1);

	rk_p = test_create_producer();
	rkt_p = test_create_producer_topic(rk_p, topic, NULL);
	test_auto_create_topic_rkt(rk_p, rkt_p);

	test_conf_init(&conf, &tconf, 0);
	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
	test_conf_set(conf, "session.timeout.ms", "6000");
	test_conf_set(conf, "enable.partition.eof", "false");
	/* Speed up propagation of new topics */
	test_conf_set(conf, "metadata.max.age.ms", "5000");
	test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
	rk_c = test_create_consumer(topic, NULL, conf, tconf);

	queue = rd_kafka_queue_get_consumer(rk_c);

	test_consumer_subscribe(rk_c, topic);

#ifndef _MSC_VER
        r = pipe(fds);
#else
        r = _pipe(fds, 2, _O_BINARY);
#endif
        if (r == -1)
		TEST_FAIL("pipe() failed: %s\n", strerror(errno));
	
	rd_kafka_queue_io_event_enable(queue, fds[1], "1", 1);

	pfd.fd = fds[0];
	pfd.events = POLLIN;
	pfd.revents = 0;

	/**
	 * 1) Wait for rebalance event
	 * 2) Wait 1 interval (1s) expecting no IO (nothing produced).
	 * 3) Produce half the messages
	 * 4) Expect IO
	 * 5) Consume the available messages
	 * 6) Wait 1 interval expecting no IO.
	 * 7) Produce remaing half
	 * 8) Expect IO
	 * 9) Done.
	 */
	while (recvd < msgcnt) {
		int r;

#ifndef _MSC_VER
		r = poll(&pfd, 1, 1000 * wait_multiplier);
#else
                r = WSAPoll(&pfd, 1, 1000 * wait_multiplier);
#endif
		if (r == -1) {
			TEST_FAIL("poll() failed: %s", strerror(errno));
			
		} else if (r == 1) {
			rd_kafka_event_t *rkev;
			char b;
			int eventcnt = 0;

			if (pfd.events & POLLERR)
				TEST_FAIL("Poll error\n");
			if (!(pfd.events & POLLIN)) {
				TEST_SAY("Stray event 0x%x\n", (int)pfd.events);
				continue;
			}

			TEST_SAY("POLLIN\n");
                        /* Read signaling token to purge socket queue and
                         * eventually silence POLLIN */
#ifndef _MSC_VER
			r = read(pfd.fd, &b, 1);
#else
			r = _read((int)pfd.fd, &b, 1);
#endif
			if (r == -1)
				TEST_FAIL("read failed: %s\n", strerror(errno));

			if (!expecting_io)
				TEST_WARN("Got unexpected IO after %d/%d msgs\n",
					  recvd, msgcnt);

			while ((rkev = rd_kafka_queue_poll(queue, 0))) {
				eventcnt++;
				switch (rd_kafka_event_type(rkev))
				{
				case RD_KAFKA_EVENT_REBALANCE:
					TEST_SAY("Got %s: %s\n", rd_kafka_event_name(rkev),
						 rd_kafka_err2str(rd_kafka_event_error(rkev)));
					if (expecting_io != _REBALANCE)
						TEST_FAIL("Got Rebalance when expecting message\n");
					if (rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
						rd_kafka_assign(rk_c, rd_kafka_event_topic_partition_list(rkev));
						expecting_io = _NOPE;
					} else
						rd_kafka_assign(rk_c, NULL);
					break;
					
				case RD_KAFKA_EVENT_FETCH:
					if (expecting_io != _YEP)
						TEST_FAIL("Did not expect more messages at %d/%d\n",
							  recvd, msgcnt);
					recvd++;
					if (recvd == (msgcnt / 2) || recvd == msgcnt)
						expecting_io = _NOPE;
					break;

				case RD_KAFKA_EVENT_ERROR:
					TEST_FAIL("Error: %s\n", rd_kafka_event_error_string(rkev));
					break;

				default:
					TEST_SAY("Ignoring event %s\n", rd_kafka_event_name(rkev));
				}
					
				rd_kafka_event_destroy(rkev);
			}
			TEST_SAY("%d events, Consumed %d/%d messages\n", eventcnt, recvd, msgcnt);

			wait_multiplier = 1;

		} else {
			if (expecting_io == _REBALANCE) {
				continue;
			} else if (expecting_io == _YEP) {
				TEST_FAIL("Did not see expected IO after %d/%d msgs\n",
					  recvd, msgcnt);
			}

			TEST_SAY("IO poll timeout (good)\n");

			TEST_SAY("Got idle period, producing\n");
			test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, msgcnt/2,
					  NULL, 10);

			expecting_io = _YEP;
			/* When running slowly (e.g., valgrind) it might take
			 * some time before the first message is received
			 * after producing. */
			wait_multiplier = 3;
		}
	}
	TEST_SAY("Done\n");

	rd_kafka_topic_destroy(rkt_p);
	rd_kafka_destroy(rk_p);

	rd_kafka_queue_destroy(queue);
	rd_kafka_consumer_close(rk_c);
	rd_kafka_destroy(rk_c);

#ifndef _MSC_VER
	close(fds[0]);
	close(fds[1]);
#else
        _close(fds[0]);
        _close(fds[1]);
#endif

	return 0;
}
Beispiel #6
0
/**
 * @brief Test delivery report events
 */
int main_0039_event_dr (int argc, char **argv) {
	int partition = 0;
	int r;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *topic_conf;
	char msg[128];
	int msgcnt = test_on_ci ? 5000 : 50000;
	int i;
        test_timing_t t_produce, t_delivery;
	rd_kafka_queue_t *eventq;

	test_conf_init(&conf, &topic_conf, 10);

	/* Set delivery report callback */
	rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);

	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_DR);

	/* Create kafka instance */
	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

	eventq = rd_kafka_queue_get_main(rk);

	rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0),
                                 topic_conf);
	if (!rkt)
		TEST_FAIL("Failed to create topic: %s\n",
			  rd_strerror(errno));

	/* Produce messages */
        TIMING_START(&t_produce, "PRODUCE");
	for (i = 0 ; i < msgcnt ; i++) {
		int *msgidp = malloc(sizeof(*msgidp));
		*msgidp = i;
		rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i);
		r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY,
				     msg, strlen(msg), NULL, 0, msgidp);
		if (r == -1)
			TEST_FAIL("Failed to produce message #%i: %s\n",
				  i, rd_strerror(errno));
	}
        TIMING_STOP(&t_produce);
	TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt);

	/* Wait for messages to be delivered */
        TIMING_START(&t_delivery, "DELIVERY");
	while (rd_kafka_outq_len(rk) > 0) {
		rd_kafka_event_t *rkev;
		rkev = rd_kafka_queue_poll(eventq, 1000);
		switch (rd_kafka_event_type(rkev))
		{
		case RD_KAFKA_EVENT_DR:
                        TEST_SAYL(3, "%s event with %zd messages\n",
                                  rd_kafka_event_name(rkev),
                                  rd_kafka_event_message_count(rkev));
			handle_drs(rkev);
			break;
		default:
			TEST_SAY("Unhandled event: %s\n",
				 rd_kafka_event_name(rkev));
			break;
		}
		rd_kafka_event_destroy(rkev);
	}
        TIMING_STOP(&t_delivery);

	if (fails)
		TEST_FAIL("%i failures, see previous errors", fails);

	if (msgid_next != msgcnt)
		TEST_FAIL("Still waiting for messages: next %i != end %i\n",
			  msgid_next, msgcnt);

	rd_kafka_queue_destroy(eventq);

	/* Destroy topic */
	rd_kafka_topic_destroy(rkt);

	/* Destroy rdkafka instance */
	TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
	rd_kafka_destroy(rk);

	return 0;
}
int main_0062_stats_event (int argc, char **argv) {
    rd_kafka_t *rk;
    rd_kafka_conf_t *conf;
    test_timing_t t_delivery;
    rd_kafka_queue_t *eventq;
    const int iterations = 5;
    int i;
    test_conf_init(NULL, NULL, 10);

    /* Set up a global config object */
    conf = rd_kafka_conf_new();
    rd_kafka_conf_set(conf,"statistics.interval.ms", "100", NULL, 0);

    rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_STATS);

    /* Create kafka instance */
    rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

    eventq = rd_kafka_queue_get_main(rk);

    /* Wait for stats event */
    for (i = 0 ; i < iterations ; i++) {
            TIMING_START(&t_delivery, "STATS_EVENT");
            stats_count = 0;
            while (stats_count == 0) {
                    rd_kafka_event_t *rkev;
                    rkev = rd_kafka_queue_poll(eventq, 100);
                    switch (rd_kafka_event_type(rkev))
                    {
                    case RD_KAFKA_EVENT_STATS:
                            TEST_SAY("%s event\n", rd_kafka_event_name(rkev));
                            handle_stats(rkev);
                            break;
                    case RD_KAFKA_EVENT_NONE:
                            break;
                    default:
                            TEST_SAY("Ignore event: %s\n",
                                     rd_kafka_event_name(rkev));
                            break;
                    }
                    rd_kafka_event_destroy(rkev);
            }
            TIMING_STOP(&t_delivery);

            if (!strcmp(test_mode, "bare")) {
                    /* valgrind is too slow to make this meaningful. */
                    if (TIMING_DURATION(&t_delivery) < 1000 * 100 * 0.8 ||
                        TIMING_DURATION(&t_delivery) > 1000 * 100 * 1.2)
                            TEST_FAIL("Stats duration %.3fms is >= 20%% "
                                      "outside statistics.interval.ms 100",
                                      (float)TIMING_DURATION(&t_delivery)/
                                      1000.0f);
            }
    }

    rd_kafka_queue_destroy(eventq);

    rd_kafka_destroy(rk);

    return 0;
}