コード例 #1
0
ファイル: 0035-api_version.c プロジェクト: BDeus/librdkafka
int main_0035_api_version (int argc, char **argv) {
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	const struct rd_kafka_metadata *metadata;
	rd_kafka_resp_err_t err;
	test_timing_t t_meta;

	test_conf_init(&conf, NULL, 30);
	test_conf_set(conf, "socket.timeout.ms", "12000");
	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

	TEST_SAY("Querying for metadata\n");
	TIMING_START(&t_meta, "metadata()");
	err = rd_kafka_metadata(rk, 0, NULL, &metadata, 10*1000);
	TIMING_STOP(&t_meta);
	if (err)
		TEST_FAIL("metadata() failed: %s",
			  rd_kafka_err2str(err));

	if (TIMING_DURATION(&t_meta) / 1000 > 11*1000)
		TEST_FAIL("metadata() took too long: %.3fms",
			  (float)TIMING_DURATION(&t_meta) / 1000.0f);

	rd_kafka_metadata_destroy(metadata);

	TEST_SAY("Metadata succeeded\n");

	rd_kafka_destroy(rk);

	return 0;
}
コード例 #2
0
static void do_nonexist_commit (void) {
	rd_kafka_t *rk;
	char group_id[64];
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_topic_partition_list_t *offsets;
	const char *unk_topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_resp_err_t err;

	test_conf_init(&conf, &tconf, 20);
	test_str_id_generate(group_id, sizeof(group_id));

	TEST_SAY(_C_MAG "[ do_nonexist_commit group.id %s ]\n", group_id);

	rk = test_create_consumer(group_id, NULL, conf, tconf, NULL);

	TEST_SAY("Try nonexist commit\n");
	offsets = rd_kafka_topic_partition_list_new(2);
	rd_kafka_topic_partition_list_add(offsets, unk_topic, 0)->offset = 123;
	rd_kafka_topic_partition_list_add(offsets, unk_topic, 1)->offset = 456;

	err = rd_kafka_commit_queue(rk, offsets, NULL,
				    nonexist_offset_commit_cb, NULL);
	TEST_SAY("nonexist commit returned %s\n", rd_kafka_err2str(err));
	if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
		TEST_FAIL("commit() should succeed, not: %s",
			  rd_kafka_err2str(err));

	rd_kafka_topic_partition_list_destroy(offsets);

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
コード例 #3
0
/**
 * Verify that all groups in 'groups' are seen, if so returns group_cnt,
 * else returns -1.
 */
static int verify_groups (const struct rd_kafka_group_list *grplist,
                          char **groups, int group_cnt) {
        int i;
        int seen = 0;

        for (i = 0 ; i < grplist->group_cnt ; i++) {
                const struct rd_kafka_group_info *gi = &grplist->groups[i];
                int j;

                for (j = 0 ; j < group_cnt ; j++) {
                        if (strcmp(gi->group, groups[j]))
                                continue;

                        if (gi->err)
                                TEST_SAY("Group %s has broker-reported "
                                         "error: %s\n", gi->group,
                                         rd_kafka_err2str(gi->err));

                        seen++;
                }
        }

        TEST_SAY("Found %d/%d desired groups in list of %d groups\n",
                 seen, group_cnt, grplist->group_cnt);

        if (seen != group_cnt)
                return -1;
        else
                return seen;
}
コード例 #4
0
/**
 * Commit non-existent topic (issue #704)
 */
static void nonexist_offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
				       rd_kafka_topic_partition_list_t *offsets,
				       void *opaque) {
	int i;
	int failed_offsets = 0;

	TEST_SAY("Offset commit callback for %d partitions: %s\n",
		 offsets ? offsets->cnt : 0,
		 rd_kafka_err2str(err));

	TEST_ASSERT(offsets != NULL);

	for (i = 0 ; i < offsets->cnt ; i++) {
		TEST_SAY("committed: %s [%"PRId32"] offset %"PRId64
			 ": %s\n",
			 offsets->elems[i].topic,
			 offsets->elems[i].partition,
			 offsets->elems[i].offset,
			 rd_kafka_err2str(offsets->elems[i].err));
		failed_offsets += offsets->elems[i].err ? 1 : 0;
	}

	TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR,
		    "expected global success, not %s", rd_kafka_err2str(err));
	TEST_ASSERT(offsets->cnt == 2, "expected %d offsets", offsets->cnt);
	TEST_ASSERT(failed_offsets == offsets->cnt,
		    "expected %d offsets to have failed, got %d",
		    offsets->cnt, failed_offsets);
}
コード例 #5
0
static void verify_consumed_msg_check0 (const char *func, int line) {
	int i;
	int fails = 0;

	if (cons_msgs_cnt < cons_msgs_size) {
		TEST_SAY("Missing %i messages in consumer\n",
			 cons_msgs_size - cons_msgs_cnt);
		fails++;
	}

	qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp);

	for (i = 0 ; i < cons_msgs_size ; i++) {
		if (cons_msgs[i] != i) {
			TEST_SAY("Consumed message #%i is wrong, "
				 "expected #%i\n",
				 cons_msgs[i], i);
			fails++;
		}
	}

	if (fails)
		TEST_FAIL("See above error(s)");

	verify_consumed_msg_reset(0);
}
コード例 #6
0
ファイル: test-1.c プロジェクト: antoniocorreia/cprojects
/**
 * Wait 'timeout' seconds for rdkafka to kill all its threads and clean up.
 */
void test_wait_exit (int timeout) {
	int r;
        time_t start = time(NULL);

	while ((r = rd_kafka_thread_cnt()) && timeout-- >= 0) {
		TEST_SAY("%i thread(s) in use by librdkafka, waiting...\n", r);
		rd_sleep(1);
	}

	TEST_SAY("%i thread(s) in use by librdkafka\n", r);

	if (r > 0) {
		TEST_FAIL("%i thread(s) still active in librdkafka", r);
	}

        timeout -= (int)(time(NULL) - start);
        if (timeout > 0) {
		TEST_SAY("Waiting %d seconds for all librdkafka memory "
			 "to be released\n", timeout);
                if (rd_kafka_wait_destroyed(timeout * 1000) == -1)
			TEST_FAIL("Not all internal librdkafka "
				  "objects destroyed\n");
	}

}
コード例 #7
0
ファイル: test-1.c プロジェクト: antoniocorreia/cprojects
static int run_test0 (struct run_args *run_args) {
	test_timing_t t_run;
	int r;

	test_curr = run_args->testname;
	TEST_SAY("================= Running test %s =================\n",
		 run_args->testname);
	TIMING_START(&t_run, run_args->testname);
	test_start = t_run.ts_start;
	r = run_args->test_main(run_args->argc, run_args->argv);
	TIMING_STOP(&t_run);

	if (r)
		TEST_SAY("\033[31m"
			 "================= Test %s FAILED ================="
			 "\033[0m\n",
			 run_args->testname);
	else
		TEST_SAY("\033[32m"
			 "================= Test %s PASSED ================="
			 "\033[0m\n",
			 run_args->testname);

	return r;
}
コード例 #8
0
static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
			  rd_kafka_topic_partition_list_t *parts, void *opaque) {
	int i;

	TEST_SAY("rebalance_cb: %s:\n", rd_kafka_err2str(err));
	test_print_partition_list(parts);

	if (parts->cnt < partitions)
		TEST_FAIL("rebalance_cb: Expected %d partitions, not %d",
			  partitions, parts->cnt);

	switch (err)
	{
	case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
		for (i = 0 ; i < parts->cnt ; i++) {
			if (i < partitions)
				parts->elems[i].offset = msgcnt / 2;
			else
				parts->elems[i].offset = RD_KAFKA_OFFSET_END;
		}
		TEST_SAY("Use these offsets:\n");
		test_print_partition_list(parts);
		test_consumer_assign("HL.REBALANCE", rk, parts);
		break;

	case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
		test_consumer_unassign("HL.REBALANCE", rk);
		break;

	default:
		TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err));
	}
}
コード例 #9
0
ファイル: 0013-null-msgs.c プロジェクト: Kitter/librdkafka
static void consume_messages (uint64_t testid, const char *topic,
			      int32_t partition, int msg_base, int batch_cnt,
			      int msgcnt) {
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *topic_conf;
	int i;

	test_conf_init(&conf, &topic_conf, 20);

	/* Create kafka instance */
	rk = test_create_handle(RD_KAFKA_CONSUMER, conf);

	rkt = rd_kafka_topic_new(rk, topic, topic_conf);
	if (!rkt)
		TEST_FAIL("Failed to create topic: %s\n",
                          rd_kafka_err2str(rd_kafka_last_error()));

	TEST_SAY("Consuming %i messages from partition %i\n",
		 batch_cnt, partition);

	/* Consume messages */
	if (rd_kafka_consume_start(rkt, partition,
			     RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1)
		TEST_FAIL("consume_start(%i, -%i) failed: %s",
			  (int)partition, batch_cnt,
			  rd_kafka_err2str(rd_kafka_last_error()));

	for (i = 0 ; i < batch_cnt ; i++) {
		rd_kafka_message_t *rkmessage;

		rkmessage = rd_kafka_consume(rkt, partition, tmout_multip(5000));
		if (!rkmessage)
			TEST_FAIL("Failed to consume message %i/%i from "
				  "partition %i: %s",
				  i, batch_cnt, (int)partition,
				  rd_kafka_err2str(rd_kafka_last_error()));
		if (rkmessage->err)
			TEST_FAIL("Consume message %i/%i from partition %i "
				  "has error: %s",
				  i, batch_cnt, (int)partition,
				  rd_kafka_err2str(rkmessage->err));

		verify_consumed_msg(testid, partition, msg_base+i, rkmessage);

		rd_kafka_message_destroy(rkmessage);
	}

	rd_kafka_consume_stop(rkt, partition);

	/* Destroy topic */
	rd_kafka_topic_destroy(rkt);

	/* Destroy rdkafka instance */
	TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
	rd_kafka_destroy(rk);
}
コード例 #10
0
ファイル: 0013-null-msgs.c プロジェクト: Kitter/librdkafka
static void verify_consumed_msg0 (const char *func, int line,
				  uint64_t testid, int32_t partition,
				  int msgnum,
				  rd_kafka_message_t *rkmessage) {
	uint64_t in_testid;
	int in_part;
	int in_msgnum;
	char buf[128];

        if (rkmessage->len != 0)
                TEST_FAIL("Incoming message not NULL: %i bytes",
                          (int)rkmessage->len);

	if (rkmessage->key_len +1 >= sizeof(buf))
		TEST_FAIL("Incoming message key too large (%i): "
			  "not sourced by this test",
			  (int)rkmessage->key_len);

	rd_snprintf(buf, sizeof(buf), "%.*s",
		 (int)rkmessage->key_len, (char *)rkmessage->key);

	if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i",
		   &in_testid, &in_part, &in_msgnum) != 3)
		TEST_FAIL("Incorrect key format: %s", buf);

	if (testid != in_testid ||
	    (partition != -1 && partition != in_part) ||
	    (msgnum != -1 && msgnum != in_msgnum) ||
	    (in_msgnum < 0 || in_msgnum > cons_msgs_size))
		goto fail_match;

	if (test_level > 2) {
		TEST_SAY("%s:%i: Our testid %"PRIu64", part %i (%i), "
			 "msg %i/%i did "
			 ", key's: \"%s\"\n",
			 func, line,
			 testid, (int)partition, (int)rkmessage->partition,
			 msgnum, cons_msgs_size, buf);
	}

	if (cons_msgs_cnt == cons_msgs_size) {
		TEST_SAY("Too many messages in cons_msgs (%i) while reading "
			 "message key \"%s\"\n",
			 cons_msgs_cnt, buf);
		verify_consumed_msg_check();
		TEST_FAIL("See above error(s)");
	}

	cons_msgs[cons_msgs_cnt++] = in_msgnum;

	return;

 fail_match:
	TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i/%i did "
		  "not match message's key: \"%s\"\n",
		  func, line,
		  testid, (int)partition, msgnum, cons_msgs_size, buf);
}
コード例 #11
0
ファイル: test.c プロジェクト: lindsay-show/librdkafka
int test_consumer_poll (const char *what, rd_kafka_t *rk, uint64_t testid,
                        int exp_eof_cnt, int exp_msg_base, int exp_cnt) {
        int eof_cnt = 0;
        int cnt = 0;
        test_timing_t t_cons;

        TEST_SAY("%s: consume %d messages\n", what, exp_cnt);

        TIMING_START(&t_cons, "CONSUME");

        while ((exp_eof_cnt == -1 || eof_cnt < exp_eof_cnt) &&
               (cnt < exp_cnt)) {
                rd_kafka_message_t *rkmessage;

                rkmessage = rd_kafka_consumer_poll(rk, 10*1000);
                if (!rkmessage) /* Shouldn't take this long to get a msg */
                        TEST_FAIL("%s: consumer_poll() timeout\n", what);


                if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
                        TEST_SAY("%s [%"PRId32"] reached EOF at "
                                 "offset %"PRId64"\n",
                                 rd_kafka_topic_name(rkmessage->rkt),
                                 rkmessage->partition,
                                 rkmessage->offset);
                        eof_cnt++;

                } else if (rkmessage->err) {
                        TEST_SAY("%s [%"PRId32"] error (offset %"PRId64"): %s",
                                 rkmessage->rkt ?
                                 rd_kafka_topic_name(rkmessage->rkt) :
                                 "(no-topic)",
                                 rkmessage->partition,
                                 rkmessage->offset,
                                 rd_kafka_message_errstr(rkmessage));

                } else {
			if (test_level > 2)
				TEST_SAY("%s [%"PRId32"] "
					 "message at offset %"PRId64"\n",
					 rd_kafka_topic_name(rkmessage->rkt),
					 rkmessage->partition,
					 rkmessage->offset);

                        test_verify_rkmessage(rkmessage, testid, -1, -1);
                        cnt++;
                }

                rd_kafka_message_destroy(rkmessage);
        }

        TIMING_STOP(&t_cons);

        TEST_SAY("%s: consumed %d/%d messages (%d/%d EOFs)\n",
                 what, cnt, exp_cnt, eof_cnt, exp_eof_cnt);
        return cnt;
}
コード例 #12
0
ファイル: 0090-idempotence.c プロジェクト: Whissi/librdkafka
/**
 * @brief Test handling of implicit acks.
 *
 * @param batch_cnt Total number of batches, ProduceRequests, sent.
 * @param initial_fail_batch_cnt How many of the initial batches should
 *                               fail with an emulated network timeout.
 */
static void do_test_implicit_ack (const char *what,
                                  int batch_cnt, int initial_fail_batch_cnt) {
        rd_kafka_t *rk;
        const char *topic = test_mk_topic_name("0090_idempotence_impl_ack", 1);
        const int32_t partition = 0;
        uint64_t testid;
        int msgcnt = 10*batch_cnt;
        rd_kafka_conf_t *conf;
        rd_kafka_topic_t *rkt;
        test_msgver_t mv;

        TEST_SAY(_C_MAG "[ Test implicit ack: %s ]\n", what);

        rd_atomic32_init(&state.produce_cnt, 0);
        state.batch_cnt = batch_cnt;
        state.initial_fail_batch_cnt = initial_fail_batch_cnt;

        testid = test_id_generate();

        test_conf_init(&conf, NULL, 60);
        rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
        test_conf_set(conf, "enable.idempotence", "true");
        test_conf_set(conf, "batch.num.messages", "10");
        test_conf_set(conf, "linger.ms", "500");
        test_conf_set(conf, "retry.backoff.ms", "2000");

        /* The ProduceResponse handler will inject timed-out-in-flight
         * errors for the first N ProduceRequests, which will trigger retries
         * that in turn will result in OutOfSequence errors. */
        test_conf_set(conf, "ut_handle_ProduceResponse",
                      (char *)handle_ProduceResponse);

        test_create_topic(topic, 1, 1);

        rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
        rkt = test_create_producer_topic(rk, topic, NULL);


        TEST_SAY("Producing %d messages\n", msgcnt);
        test_produce_msgs(rk, rkt, testid, -1, 0, msgcnt, NULL, 0);

        TEST_SAY("Flushing..\n");
        rd_kafka_flush(rk, 10000);

        rd_kafka_topic_destroy(rkt);
        rd_kafka_destroy(rk);

        TEST_SAY("Verifying messages with consumer\n");
        test_msgver_init(&mv, testid);
        test_consume_msgs_easy_mv(NULL, topic, partition,
                                  testid, 1, msgcnt, NULL, &mv);
        test_msgver_verify("verify", &mv, TEST_MSGVER_ALL, 0, msgcnt);
        test_msgver_clear(&mv);

        TEST_SAY(_C_GRN "[ Test implicit ack: %s : PASS ]\n", what);
}
コード例 #13
0
static void do_test_non_exist_and_partchange (void) {
	char *topic_a = rd_strdup(test_mk_topic_name("topic_a", 1));
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_queue_t *queue;

	/**
	 * Test #1:
	 * - Subscribe to non-existing topic.
	 * - Verify empty assignment
	 * - Create topic
	 * - Verify new assignment containing topic
	 */
	TEST_SAY("#1 & #2 testing\n");
	test_conf_init(&conf, NULL, 60);

	/* Decrease metadata interval to speed up topic change discovery. */
	test_conf_set(conf, "metadata.max.age.ms", "5000");

	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
	rk = test_create_consumer(test_str_id_generate_tmp(),
				  NULL, conf, NULL, NULL);
	queue = rd_kafka_queue_get_consumer(rk);

	TEST_SAY("#1: Subscribing to %s\n", topic_a);
	test_consumer_subscribe(rk, topic_a);

	/* Should not see a rebalance since no topics are matched. */
	await_no_rebalance("#1: empty", rk, queue, 10000);

	TEST_SAY("#1: creating topic %s\n", topic_a);
	test_create_topic(topic_a, 2, 1);

	await_assignment("#1: proper", rk, queue, 1,
			 topic_a, 2);


	/**
	 * Test #2 (continue with #1 consumer)
	 * - Increase the partition count
	 * - Verify updated assignment
	 */
	test_kafka_topics("--alter --topic %s --partitions 4",
			  topic_a);
	await_revoke("#2", rk, queue);

	await_assignment("#2: more partitions", rk, queue, 1,
			 topic_a, 4);

	test_consumer_close(rk);
	rd_kafka_queue_destroy(queue);
	rd_kafka_destroy(rk);

	rd_free(topic_a);
}
コード例 #14
0
ファイル: test.c プロジェクト: lindsay-show/librdkafka
static int run_test0 (struct run_args *run_args) {
        struct test *test = run_args->test;
	test_timing_t t_run;
	int r;
        char stats_file[256];

        rd_snprintf(stats_file, sizeof(stats_file), "stats_%s_%"PRIu64".json",
                    test->name, test_id_generate());
        if (!(test->stats_fp = fopen(stats_file, "w+")))
                TEST_SAY("=== Failed to create stats file %s: %s ===\n",
                         stats_file, strerror(errno));

	test_curr = test;
	TEST_SAY("================= Running test %s =================\n",
		 test->name);
        if (test->stats_fp)
                TEST_SAY("==== Stats written to file %s ====\n", stats_file);
	TIMING_START(&t_run, test->name);
        test->start = t_run.ts_start;
	r = test->mainfunc(run_args->argc, run_args->argv);
	TIMING_STOP(&t_run);

        TEST_LOCK();
        test->duration = TIMING_DURATION(&t_run);
	if (r) {
                test->state = TEST_FAILED;
		TEST_SAY("\033[31m"
			 "================= Test %s FAILED ================="
			 "\033[0m\n",
                         run_args->test->name);
        } else {
                test->state = TEST_PASSED;
		TEST_SAY("\033[32m"
			 "================= Test %s PASSED ================="
			 "\033[0m\n",
                         run_args->test->name);
        }
        TEST_UNLOCK();

        if (test->stats_fp) {
                long pos = ftell(test->stats_fp);
                fclose(test->stats_fp);
                test->stats_fp = NULL;
                /* Delete file if nothing was written */
                if (pos == 0) {
#ifndef _MSC_VER
                        unlink(stats_file);
#else
                        _unlink(stats_file);
#endif
                }
        }

	return r;
}
コード例 #15
0
/**
 * List groups by:
 *   - List all groups, check that the groups in 'groups' are seen.
 *   - List each group in 'groups', one by one.
 *
 * Returns 'group_cnt' if all groups in 'groups' were seen by both
 * methods, else 0, or -1 on error.
 */
static int list_groups (rd_kafka_t *rk, char **groups, int group_cnt,
                        const char *desc) {
        rd_kafka_resp_err_t err = 0;
        const struct rd_kafka_group_list *grplist;
        int i, r;
        int fails = 0;
        int seen = 0;
        int seen_all = 0;
	int retries = 5;

        TEST_SAY("List groups (expect %d): %s\n", group_cnt, desc);

	/* FIXME: Wait for broker to come up. This should really be abstracted
	 *        by librdkafka. */
	do {
		if (err) {
			TEST_SAY("Retrying group list in 1s because of: %s\n",
				 rd_kafka_err2str(err));
			rd_sleep(1);
		}
		err = rd_kafka_list_groups(rk, NULL, &grplist, 5000);
	} while ((err == RD_KAFKA_RESP_ERR__TRANSPORT ||
		  err == RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS) &&
		 retries-- > 0);

        if (err) {
                TEST_SAY("Failed to list all groups: %s\n",
                         rd_kafka_err2str(err));
                return -1;
        }

        seen_all = verify_groups(grplist, groups, group_cnt);
        rd_kafka_group_list_destroy(grplist);

        for (i = 0 ; i < group_cnt ; i++) {
                err = rd_kafka_list_groups(rk, groups[i], &grplist, 5000);
                if (err) {
                        TEST_SAY("Failed to list group %s: %s\n",
                                 groups[i], rd_kafka_err2str(err));
                        fails++;
                        continue;
                }

                r = verify_groups(grplist, &groups[i], 1);
                if (r == 1)
                        seen++;
                rd_kafka_group_list_destroy(grplist);
        }


        if (seen_all != seen)
                return 0;

        return seen;
}
コード例 #16
0
ファイル: 0025-timers.c プロジェクト: sfdazsdf/librdkafka
/**
 * Enable statistics with a set interval, make sure the stats callbacks are
 * called within reasonable intervals.
 */
static void do_test_stats_timer (void) {
    rd_kafka_t *rk;
    rd_kafka_conf_t *conf;
    const int exp_calls = 10;
    char errstr[512];
    struct state state;
    test_timing_t t_new;

    memset(&state, 0, sizeof(state));

    state.interval = 600*1000;

    test_conf_init(&conf, NULL, 200);

    test_conf_set(conf, "statistics.interval.ms", "600");
    rd_kafka_conf_set_stats_cb(conf, stats_cb);
    rd_kafka_conf_set_opaque(conf, &state);


    TIMING_START(&t_new, "rd_kafka_new()");
    rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
    TIMING_STOP(&t_new);
    if (!rk)
        TEST_FAIL("Failed to create instance: %s\n", errstr);

    TEST_SAY("Starting wait loop for %d expected stats_cb calls "
             "with an interval of %dms\n",
             exp_calls, state.interval/1000);


    while (state.calls < exp_calls) {
        test_timing_t t_poll;
        TIMING_START(&t_poll, "rd_kafka_poll()");
        rd_kafka_poll(rk, 100);
        TIMING_STOP(&t_poll);

        if (TIMING_DURATION(&t_poll) > 150*1000)
            TEST_WARN("rd_kafka_poll(rk,100) "
                      "took more than 50%% extra\n");
    }

    rd_kafka_destroy(rk);

    if (state.calls > exp_calls)
        TEST_SAY("Got more calls than expected: %d > %d\n",
                 state.calls, exp_calls);

    if (state.fails)
        TEST_FAIL("%d/%d intervals failed\n", state.fails, state.calls);
    else
        TEST_SAY("All %d intervals okay\n", state.calls);
}
コード例 #17
0
/**
 * Wait for REBALANCE ASSIGN event and perform assignment
 *
 * Va-args are \p topic_cnt tuples of the expected assignment:
 *   { const char *topic, int partition_cnt }
 */
static void await_assignment (const char *pfx, rd_kafka_t *rk,
			      rd_kafka_queue_t *queue,
			      int topic_cnt, ...) {
	rd_kafka_event_t *rkev;
	rd_kafka_topic_partition_list_t *tps;
	int i;
	va_list ap;
	int fails = 0;
	int exp_part_cnt = 0;

	TEST_SAY("%s: waiting for assignment\n", pfx);
	rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000);
	if (!rkev)
		TEST_FAIL("timed out waiting for assignment");
	TEST_ASSERT(rd_kafka_event_error(rkev) ==
		    RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
		    "expected ASSIGN, got %s",
		    rd_kafka_err2str(rd_kafka_event_error(rkev)));
	tps = rd_kafka_event_topic_partition_list(rkev);

	TEST_SAY("%s: assignment:\n", pfx);
	test_print_partition_list(tps);

	va_start(ap, topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++) {
		const char *topic = va_arg(ap, const char *);
		int partition_cnt = va_arg(ap, int);
		int p;
		TEST_SAY("%s: expecting %s with %d partitions\n",
			 pfx, topic, partition_cnt);
		for (p = 0 ; p < partition_cnt ; p++) {
			if (!rd_kafka_topic_partition_list_find(tps, topic, p)) {
				TEST_FAIL_LATER("%s: expected partition %s [%d] "
						"not found in assginment",
						pfx, topic, p);
				fails++;
			}
		}
		exp_part_cnt += partition_cnt;
	}
	va_end(ap);

	TEST_ASSERT(exp_part_cnt == tps->cnt,
		    "expected assignment of %d partitions, got %d",
		    exp_part_cnt, tps->cnt);

	if (fails > 0)
		TEST_FAIL("%s: assignment mismatch: see above", pfx);

	rd_kafka_assign(rk, tps);
	rd_kafka_event_destroy(rkev);
}
コード例 #18
0
ファイル: 0039-event.c プロジェクト: Whissi/librdkafka
/**
 * @brief Local test: test event generation
 */
int main_0039_event (int argc, char **argv) {
        rd_kafka_t *rk;
        rd_kafka_conf_t *conf;
        rd_kafka_queue_t *eventq;
        int waitevent = 1;

        /* Set up a config with ERROR events enabled and
         * configure an invalid broker so that _TRANSPORT or ALL_BROKERS_DOWN
         * is promptly generated. */

        conf = rd_kafka_conf_new();

        rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_ERROR);
        rd_kafka_conf_set(conf, "bootstrap.servers", "0:65534", NULL, 0);

        /* Create kafka instance */
        rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

        eventq = rd_kafka_queue_get_main(rk);

        while (waitevent) {
                rd_kafka_event_t *rkev;
                rkev = rd_kafka_queue_poll(eventq, 1000);
                switch (rd_kafka_event_type(rkev))
                {
                case RD_KAFKA_EVENT_ERROR:
                        TEST_SAY("Got %s%s event: %s: %s\n",
                                 rd_kafka_event_error_is_fatal(rkev) ?
                                 "FATAL " : "",
                                 rd_kafka_event_name(rkev),
                                 rd_kafka_err2name(rd_kafka_event_error(rkev)),
                                 rd_kafka_event_error_string(rkev));
                        waitevent = 0;
                        break;
                default:
                        TEST_SAY("Unhandled event: %s\n",
                                 rd_kafka_event_name(rkev));
                        break;
                }
                rd_kafka_event_destroy(rkev);
        }

        rd_kafka_queue_destroy(eventq);

        /* Destroy rdkafka instance */
        TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
        rd_kafka_destroy(rk);

        return 0;
}
コード例 #19
0
ファイル: test.c プロジェクト: neurodrone/librdkafka
/**
 * Wait 'timeout' seconds for rdkafka to kill all its threads and clean up.
 */
void test_wait_exit (int timeout) {
	int r;

	while ((r = rd_kafka_thread_cnt()) && timeout-- >= 0) {
		TEST_SAY("%i thread(s) in use by librdkafka, waiting...\n", r);
		sleep(1);
	}

	TEST_SAY("%i thread(s) in use by librdkafka\n", r);

	if (r > 0) {
		assert(0);
		TEST_FAIL("%i thread(s) still active in librdkafka", r);
	}
}
コード例 #20
0
ファイル: 0025-timers.c プロジェクト: sfdazsdf/librdkafka
static int stats_cb (rd_kafka_t *rk, char *json, size_t json_len,
                     void *opaque) {
    struct state *state = opaque;
    const int64_t now = test_clock();
    /* Fake the first elapsed time since we dont really know how
     * long rd_kafka_new() takes and at what time the timer is started. */
    const int64_t elapsed = state->ts_last ?
                            now - state->ts_last : state->interval;
    const int64_t overshoot = elapsed - state->interval;
    const int wiggleroom = (state->interval * 0.2);

    TEST_SAY("Call #%d: after %"PRId64"ms, %.0f%% outside "
             "interval %"PRId64" +-%d\n",
             state->calls, elapsed / 1000,
             ((double)overshoot / state->interval) * 100.0,
             (int64_t)state->interval / 1000, wiggleroom / 1000);

    if (overshoot < -wiggleroom || overshoot > wiggleroom) {
        TEST_WARN("^ outside range\n");
        state->fails++;
    }

    state->ts_last = now;
    state->calls++;

    return 0;
}
コード例 #21
0
int main_0028_long_topicnames (int argc, char **argv) {
        const int msgcnt = 1000;
        uint64_t testid;
	char topic[256];
	rd_kafka_t *rk_c;

	memset(topic, 'a', sizeof(topic)-1);
	topic[sizeof(topic)-1] = '\0';

	strncpy(topic, test_mk_topic_name(topic, 1), sizeof(topic)-1);

	TEST_SAY("Using topic name of %d bytes: %s\n",
		 (int)strlen(topic), topic);

	/* Create topic */
	test_create_topic(topic, 1, 1);

	/* First try a non-verifying consumer. The consumer has been known
	 * to crash when the broker bug kicks in. */
	rk_c = test_create_consumer(topic, NULL, NULL, NULL, NULL);
	test_consumer_subscribe(rk_c, topic);
	test_consumer_poll_no_msgs("consume.nomsgs", rk_c, 0, 5000);
	test_consumer_close(rk_c);

        /* Produce messages */
        testid = test_produce_msgs_easy(topic, 0,
                                        RD_KAFKA_PARTITION_UA, msgcnt);

	/* Consume messages */
	test_consume_msgs_easy(NULL, topic, testid, msgcnt);

        return 0;
}
コード例 #22
0
static void assign_consume_many (char **topics, int topic_cnt, uint64_t testid){
	rd_kafka_t *rk;
	rd_kafka_topic_partition_list_t *parts;
	int i;
	test_msgver_t mv;

	TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);

	test_conf_init(NULL, NULL, 60);
	rk = test_create_consumer(__FUNCTION__, NULL, NULL, NULL);

	parts = rd_kafka_topic_partition_list_new(topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++)
		rd_kafka_topic_partition_list_add(parts, topics[i], 0)->
			offset = RD_KAFKA_OFFSET_TAIL(msgs_per_topic);

	test_consumer_assign("consume.assign", rk, parts);
	rd_kafka_topic_partition_list_destroy(parts);

	test_msgver_init(&mv, testid);
	test_consumer_poll("consume.assign", rk, testid,
			   -1, 0, msgs_per_topic * topic_cnt, &mv);

	for (i = 0 ; i < topic_cnt ; i++)
		test_msgver_verify_part("assign", &mv, TEST_MSGVER_ALL_PART,
					topics[i], 0, i * msgs_per_topic,
					msgs_per_topic);
	test_msgver_clear(&mv);

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
コード例 #23
0
static void test_producer_no_connection (void) {
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_t *rkt;
	int i;
	const int partition_cnt = 2;
	int msgcnt = 0;
	test_timing_t t_destroy;

	test_conf_init(&conf, NULL, 20);

	test_conf_set(conf, "bootstrap.servers", NULL);

	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
	rkt = test_create_topic_object(rk, __FUNCTION__,
				       "message.timeout.ms", "5000", NULL);

	test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 100,
				 NULL, 100, 0, &msgcnt);
	for (i = 0 ; i < partition_cnt ; i++)
		test_produce_msgs_nowait(rk, rkt, 0, i,
					 0, 100, NULL, 100, 0, &msgcnt);

	rd_kafka_poll(rk, 1000);

	TEST_SAY("%d messages in queue\n", rd_kafka_outq_len(rk));

	rd_kafka_topic_destroy(rkt);

	TIMING_START(&t_destroy, "rd_kafka_destroy()");
	rd_kafka_destroy(rk);
	TIMING_STOP(&t_destroy);
}
コード例 #24
0
/**
 * Issue #530:
 * "Legacy Consumer. Delete hangs if done right after RdKafka::Consumer::create.
 *  But If I put a start and stop in between, there is no issue."
 */
static int legacy_consumer_early_destroy (void) {
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	int pass;
	const char *topic = test_mk_topic_name(__FUNCTION__, 0);

	for (pass = 0 ; pass < 2 ; pass++) {
		TEST_SAY("%s: pass #%d\n", __FUNCTION__, pass);

		rk = test_create_handle(RD_KAFKA_CONSUMER, NULL);

		if (pass == 1) {
			/* Second pass, create a topic too. */
			rkt = rd_kafka_topic_new(rk, topic, NULL);
			TEST_ASSERT(rkt, "failed to create topic: %s",
				    rd_kafka_err2str(
					    rd_kafka_errno2err(errno)));
			rd_sleep(1);
			rd_kafka_topic_destroy(rkt);
		}

		rd_kafka_destroy(rk);
	}

	return 0;
}
コード例 #25
0
ファイル: 0090-idempotence.c プロジェクト: Whissi/librdkafka
/**
 * @brief This is called prior to parsing the ProduceResponse,
 *        we use it to inject errors.
 *
 * @locality an internal rdkafka thread
 */
static rd_kafka_resp_err_t handle_ProduceResponse (rd_kafka_t *rk,
                                                   int32_t brokerid,
                                                   uint64_t msgseq,
                                                   rd_kafka_resp_err_t err) {
        rd_kafka_resp_err_t new_err = err;
        int n;

        if (err == RD_KAFKA_RESP_ERR__RETRY)
                return err; /* Skip internal retries, such as triggered by
                             * rd_kafka_broker_bufq_purge_by_toppar() */

        n = rd_atomic32_add(&state.produce_cnt, 1);

        /* Let the first N ProduceRequests fail with request timeout.
         * Do allow the first request through. */
        if (n > 1 && n <= state.initial_fail_batch_cnt) {
                if (err)
                        TEST_WARN("First %d ProduceRequests should not "
                                  "have failed, this is #%d with error %s for "
                                  "brokerid %"PRId32" and msgseq %"PRIu64"\n",
                                  state.initial_fail_batch_cnt, n,
                                  rd_kafka_err2name(err), brokerid, msgseq);
                assert(!err &&
                       *"First N ProduceRequests should not have failed");
                new_err = RD_KAFKA_RESP_ERR__TIMED_OUT;
        }

        TEST_SAY("handle_ProduceResponse(broker %"PRId32
                 ", MsgSeq %"PRId64", Error %s) -> new Error %s\n",
                 brokerid, msgseq,
                 rd_kafka_err2name(err),
                 rd_kafka_err2name(new_err));

        return new_err;
}
コード例 #26
0
static void rebalance_cb (rd_kafka_t *rk,
			  rd_kafka_resp_err_t err,
			  rd_kafka_topic_partition_list_t *partitions,
			  void *opaque) {
        char *memberid = rd_kafka_memberid(rk);

	TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n",
		 rd_kafka_name(rk), memberid, rd_kafka_err2str(err));

        if (memberid)
                free(memberid);

	test_print_partition_list(partitions);

	switch (err)
	{
	case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
		assign_cnt++;
		rd_kafka_assign(rk, partitions);
		break;

	case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
		if (assign_cnt == 0)
			TEST_FAIL("asymetric rebalance_cb\n");
		assign_cnt--;
		rd_kafka_assign(rk, NULL);
		break;

	default:
		TEST_FAIL("rebalance failed: %s\n",
			  rd_kafka_err2str(err));
		break;
	}
}
コード例 #27
0
ファイル: 0075-retry.c プロジェクト: Whissi/librdkafka
/**
 * @brief Set socket delay to kick in after \p after ms
 */
static void set_delay (int after, int delay) {
        TEST_SAY("Set delay to %dms (after %dms)\n", delay, after);

        mtx_lock(&ctrl.lock);
        ctrl.cmd.ts_at = test_clock() + (after*1000);
        ctrl.cmd.delay = delay;
        ctrl.cmd.ack = 0;
        cnd_broadcast(&ctrl.cnd);

        /* Wait for ack from sockem thread */
        while (!ctrl.cmd.ack) {
                TEST_SAY("Waiting for sockem control ack\n");
                cnd_timedwait_ms(&ctrl.cnd, &ctrl.lock, 1000);
        }
        mtx_unlock(&ctrl.lock);
}
コード例 #28
0
ファイル: 0080-admin_ut.c プロジェクト: Whissi/librdkafka
/**
 * @brief Verify that an unclean rd_kafka_destroy() does not hang.
 */
static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) {
        rd_kafka_t *rk;
        char errstr[512];
        rd_kafka_conf_t *conf;
        rd_kafka_queue_t *q;
        rd_kafka_event_t *rkev;
        rd_kafka_DeleteTopic_t *topic;
        test_timing_t t_destroy;

        test_conf_init(&conf, NULL, 0);
        /* Remove brokers, if any, since this is a local test and we
         * rely on the controller not being found. */
        test_conf_set(conf, "bootstrap.servers", "");
        test_conf_set(conf, "socket.timeout.ms", "60000");

        rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr));
        TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr);

        TEST_SAY(_C_MAG "[ Test unclean destroy for %s using %s]\n", rd_kafka_name(rk),
                 with_mainq ? "mainq" : "tempq");

        if (with_mainq)
                q = rd_kafka_queue_get_main(rk);
        else
                q = rd_kafka_queue_new(rk);

        topic = rd_kafka_DeleteTopic_new("test");
        rd_kafka_DeleteTopics(rk, &topic, 1, NULL, q);
        rd_kafka_DeleteTopic_destroy(topic);

        /* We're not expecting a result yet since DeleteTopics will attempt
         * to look up the controller for socket.timeout.ms (1 minute). */
        rkev = rd_kafka_queue_poll(q, 100);
        TEST_ASSERT(!rkev, "Did not expect result: %s", rd_kafka_event_name(rkev));

        rd_kafka_queue_destroy(q);

        TEST_SAY("Giving rd_kafka_destroy() 5s to finish, "
                 "despite Admin API request being processed\n");
        test_timeout_set(5);
        TIMING_START(&t_destroy, "rd_kafka_destroy()");
        rd_kafka_destroy(rk);
        TIMING_STOP(&t_destroy);

        /* Restore timeout */
        test_timeout_set(60);
}
コード例 #29
0
ファイル: 0080-admin_ut.c プロジェクト: Whissi/librdkafka
/**
 * @brief Test a mix of APIs using the same replyq.
 *
 *  - Create topics A,B
 *  - Delete topic B
 *  - Create topic C
 *  - Create extra partitions for topic D
 */
static void do_test_mix (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) {
        char *topics[] = { "topicA", "topicB", "topicC" };
        int cnt = 0;
        struct waiting {
                rd_kafka_event_type_t evtype;
                int seen;
        };
        struct waiting id1 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT};
        struct waiting id2 = {RD_KAFKA_EVENT_DELETETOPICS_RESULT};
        struct waiting id3 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT};
        struct waiting id4 = {RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT};

        TEST_SAY(_C_MAG "[ Mixed mode test on %s]\n", rd_kafka_name(rk));

        test_CreateTopics_simple(rk, rkqu, topics, 2, 1, &id1);
        test_DeleteTopics_simple(rk, rkqu, &topics[1], 1, &id2);
        test_CreateTopics_simple(rk, rkqu, &topics[2], 1, 1, &id3);
        test_CreatePartitions_simple(rk, rkqu, "topicD", 15, &id4);

        while (cnt < 4) {
                rd_kafka_event_t *rkev;
                struct waiting *w;

                rkev = rd_kafka_queue_poll(rkqu, -1);
                TEST_ASSERT(rkev);

                TEST_SAY("Got event %s: %s\n",
                         rd_kafka_event_name(rkev),
                         rd_kafka_event_error_string(rkev));

                w = rd_kafka_event_opaque(rkev);
                TEST_ASSERT(w);

                TEST_ASSERT(w->evtype == rd_kafka_event_type(rkev),
                            "Expected evtype %d, not %d (%s)",
                            w->evtype, rd_kafka_event_type(rkev),
                            rd_kafka_event_name(rkev));

                TEST_ASSERT(w->seen == 0, "Duplicate results");

                w->seen++;
                cnt++;

                rd_kafka_event_destroy(rkev);
        }
}
コード例 #30
0
ファイル: test-1.c プロジェクト: antoniocorreia/cprojects
int test_consumer_poll (const char *what, rd_kafka_t *rk, uint64_t testid,
                        int exp_eof_cnt, int exp_msg_base, int exp_cnt) {
        int eof_cnt = 0;
        int cnt = 0;

        while (eof_cnt < exp_eof_cnt) {
                rd_kafka_message_t *rkmessage;

                rkmessage = rd_kafka_consumer_poll(rk, 10*1000);
                if (!rkmessage) /* Shouldn't take this long to get a msg */
                        TEST_FAIL("%s: consumer_poll() timeout\n", what);


                if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
                        TEST_SAY("%s [%"PRId32"] reached EOF at "
                                 "offset %"PRId64"\n",
                                 rd_kafka_topic_name(rkmessage->rkt),
                                 rkmessage->partition,
                                 rkmessage->offset);
                        eof_cnt++;

                } else if (rkmessage->err) {
                        TEST_SAY("%s [%"PRId32"] error (offset %"PRId64"): %s",
                                 rkmessage->rkt ?
                                 rd_kafka_topic_name(rkmessage->rkt) :
                                 "(no-topic)",
                                 rkmessage->partition,
                                 rkmessage->offset,
                                 rd_kafka_message_errstr(rkmessage));

                } else {
			if (test_level > 2)
				TEST_SAY("%s [%"PRId32"] "
					 "message at offset %"PRId64"\n",
					 rd_kafka_topic_name(rkmessage->rkt),
					 rkmessage->partition,
					 rkmessage->offset);
                        cnt++;
                }

                rd_kafka_message_destroy(rkmessage);
        }

        return cnt;
}