/** * Issue #530: * "Legacy Consumer. Delete hangs if done right after RdKafka::Consumer::create. * But If I put a start and stop in between, there is no issue." */ static int legacy_consumer_early_destroy (void) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; int pass; const char *topic = test_mk_topic_name(__FUNCTION__, 0); for (pass = 0 ; pass < 2 ; pass++) { TEST_SAY("%s: pass #%d\n", __FUNCTION__, pass); rk = test_create_handle(RD_KAFKA_CONSUMER, NULL); if (pass == 1) { /* Second pass, create a topic too. */ rkt = rd_kafka_topic_new(rk, topic, NULL); TEST_ASSERT(rkt, "failed to create topic: %s", rd_kafka_err2str( rd_kafka_errno2err(errno))); rd_sleep(1); rd_kafka_topic_destroy(rkt); } rd_kafka_destroy(rk); } return 0; }
/** * Wait 'timeout' seconds for rdkafka to kill all its threads and clean up. */ void test_wait_exit (int timeout) { int r; time_t start = time(NULL); while ((r = rd_kafka_thread_cnt()) && timeout-- >= 0) { TEST_SAY("%i thread(s) in use by librdkafka, waiting...\n", r); rd_sleep(1); } TEST_SAY("%i thread(s) in use by librdkafka\n", r); if (r > 0) { TEST_FAIL("%i thread(s) still active in librdkafka", r); } timeout -= (int)(time(NULL) - start); if (timeout > 0) { TEST_SAY("Waiting %d seconds for all librdkafka memory " "to be released\n", timeout); if (rd_kafka_wait_destroyed(timeout * 1000) == -1) TEST_FAIL("Not all internal librdkafka " "objects destroyed\n"); } }
/** * List groups by: * - List all groups, check that the groups in 'groups' are seen. * - List each group in 'groups', one by one. * * Returns 'group_cnt' if all groups in 'groups' were seen by both * methods, else 0, or -1 on error. */ static int list_groups (rd_kafka_t *rk, char **groups, int group_cnt, const char *desc) { rd_kafka_resp_err_t err = 0; const struct rd_kafka_group_list *grplist; int i, r; int fails = 0; int seen = 0; int seen_all = 0; int retries = 5; TEST_SAY("List groups (expect %d): %s\n", group_cnt, desc); /* FIXME: Wait for broker to come up. This should really be abstracted * by librdkafka. */ do { if (err) { TEST_SAY("Retrying group list in 1s because of: %s\n", rd_kafka_err2str(err)); rd_sleep(1); } err = rd_kafka_list_groups(rk, NULL, &grplist, 5000); } while ((err == RD_KAFKA_RESP_ERR__TRANSPORT || err == RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS) && retries-- > 0); if (err) { TEST_SAY("Failed to list all groups: %s\n", rd_kafka_err2str(err)); return -1; } seen_all = verify_groups(grplist, groups, group_cnt); rd_kafka_group_list_destroy(grplist); for (i = 0 ; i < group_cnt ; i++) { err = rd_kafka_list_groups(rk, groups[i], &grplist, 5000); if (err) { TEST_SAY("Failed to list group %s: %s\n", groups[i], rd_kafka_err2str(err)); fails++; continue; } r = verify_groups(grplist, &groups[i], 1); if (r == 1) seen++; rd_kafka_group_list_destroy(grplist); } if (seen_all != seen) return 0; return seen; }
/** * @brief The background event callback is called automatically * by librdkafka from a background thread. */ static void background_event_cb (rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque) { mtx_lock(&last_event_lock); TEST_ASSERT(!last_event, "Multiple events seen in background_event_cb " "(existing %s, new %s)", rd_kafka_event_name(last_event), rd_kafka_event_name(rkev)); last_event = rkev; mtx_unlock(&last_event_lock); cnd_broadcast(&last_event_cnd); rd_sleep(1); }
static void do_consume (struct _consumer *cons, int timeout_s) { rd_kafka_message_t *rkm; rkm = rd_kafka_consumer_poll(cons->rk, 100+(timeout_s*1000)); if (!rkm) return; TEST_ASSERT(!rkm->err, "%s consumer error: %s (last poll was %dms ago)", rd_kafka_name(cons->rk), rd_kafka_message_errstr(rkm), (int)((test_clock() - cons->last)/1000)); rd_kafka_message_destroy(rkm); cons->cnt++; cons->last = test_clock(); if (timeout_s > 0) { TEST_SAY("%s: simulate processing by sleeping for %ds\n", rd_kafka_name(cons->rk), timeout_s); rd_sleep(timeout_s); } }
static int run_test (struct test *test, int argc, char **argv) { thrd_t thr; struct run_args *run_args = calloc(1, sizeof(*run_args)); int wait_cnt = 0; run_args->test = test; run_args->argc = argc; run_args->argv = argv; TEST_LOCK(); while (tests_running_cnt >= test_concurrent_max) { if (!(wait_cnt++ % 10)) TEST_SAY("Too many tests running (%d >= %d): " "postponing %s start...\n", tests_running_cnt, test_concurrent_max, test->name); TEST_UNLOCK(); rd_sleep(1); TEST_LOCK(); } tests_running_cnt++; test->state = TEST_RUNNING; TEST_UNLOCK(); if (thrd_create(&thr, run_test_from_thread, run_args) != thrd_success) { TEST_LOCK(); tests_running_cnt--; test->state = TEST_FAILED; TEST_UNLOCK(); TEST_FAIL("Failed to start thread for test %s\n", test->name); } return 0; }
static void do_test_CreateTopics (const char *what, rd_kafka_t *rk, rd_kafka_queue_t *useq, int op_timeout, rd_bool_t validate_only) { rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); #define MY_NEW_TOPICS_CNT 6 char *topics[MY_NEW_TOPICS_CNT]; rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT]; rd_kafka_AdminOptions_t *options = NULL; rd_kafka_resp_err_t exp_topicerr[MY_NEW_TOPICS_CNT] = {0}; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Expected topics in metadata */ rd_kafka_metadata_topic_t exp_mdtopics[MY_NEW_TOPICS_CNT] = {{0}}; int exp_mdtopic_cnt = 0; /* Not expected topics in metadata */ rd_kafka_metadata_topic_t exp_not_mdtopics[MY_NEW_TOPICS_CNT] = {{0}}; int exp_not_mdtopic_cnt = 0; int i; char errstr[512]; const char *errstr2; rd_kafka_resp_err_t err; test_timing_t timing; rd_kafka_event_t *rkev; const rd_kafka_CreateTopics_result_t *res; const rd_kafka_topic_result_t **restopics; size_t restopic_cnt; int metadata_tmout ; int num_replicas = (int)avail_broker_cnt; int32_t *replicas; /* Set up replicas */ replicas = rd_alloca(sizeof(*replicas) * num_replicas); for (i = 0 ; i < num_replicas ; i++) replicas[i] = avail_brokers[i]; TEST_SAY(_C_MAG "[ %s CreateTopics with %s, " "op_timeout %d, validate_only %d ]\n", rd_kafka_name(rk), what, op_timeout, validate_only); /** * Construct NewTopic array with different properties for * different partitions. */ for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); int num_parts = i * 7 + 1; int set_config = (i & 1); int add_invalid_config = (i == 1); int set_replicas = !(i % 3); rd_kafka_resp_err_t this_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; topics[i] = topic; new_topics[i] = rd_kafka_NewTopic_new(topic, num_parts, set_replicas ? -1 : num_replicas, NULL, 0); if (set_config) { /* * Add various configuration properties */ err = rd_kafka_NewTopic_set_config( new_topics[i], "compression.type", "lz4"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_NewTopic_set_config( new_topics[i], "delete.retention.ms", "900"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } if (add_invalid_config) { /* Add invalid config property */ err = rd_kafka_NewTopic_set_config( new_topics[i], "dummy.doesntexist", "broker is verifying this"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); this_exp_err = RD_KAFKA_RESP_ERR_INVALID_CONFIG; } TEST_SAY("Expected result for topic #%d: %s " "(set_config=%d, add_invalid_config=%d, " "set_replicas=%d)\n", i, rd_kafka_err2name(this_exp_err), set_config, add_invalid_config, set_replicas); if (set_replicas) { int32_t p; /* * Set valid replica assignments */ for (p = 0 ; p < num_parts ; p++) { err = rd_kafka_NewTopic_set_replica_assignment( new_topics[i], p, replicas, num_replicas, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); } } if (this_exp_err || validate_only) { exp_topicerr[i] = this_exp_err; exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic; } else { exp_mdtopics[exp_mdtopic_cnt].topic = topic; exp_mdtopics[exp_mdtopic_cnt].partition_cnt = num_parts; exp_mdtopic_cnt++; } } if (op_timeout != -1 || validate_only) { options = rd_kafka_AdminOptions_new( rk, RD_KAFKA_ADMIN_OP_CREATETOPICS); if (op_timeout != -1) { err = rd_kafka_AdminOptions_set_operation_timeout( options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } if (validate_only) { err = rd_kafka_AdminOptions_set_validate_only( options, validate_only, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } } TIMING_START(&timing, "CreateTopics"); TEST_SAY("Call CreateTopics\n"); rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); /* Poll result queue for CreateTopics result. * Print but otherwise ignore other event types * (typically generic Error events). */ TIMING_START(&timing, "CreateTopics.queue_poll"); do { rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000)); TEST_SAY("CreateTopics: got %s in %.3fms\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); if (rd_kafka_event_error(rkev)) TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); } while (rd_kafka_event_type(rkev) != RD_KAFKA_EVENT_CREATETOPICS_RESULT); /* Convert event to proper result */ res = rd_kafka_event_CreateTopics_result(rkev); TEST_ASSERT(res, "expected CreateTopics_result, not %s", rd_kafka_event_name(rkev)); /* Expecting error */ err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == exp_err, "expected CreateTopics to return %s, not %s (%s)", rd_kafka_err2str(exp_err), rd_kafka_err2str(err), err ? errstr2 : "n/a"); TEST_SAY("CreateTopics: returned %s (%s)\n", rd_kafka_err2str(err), err ? errstr2 : "n/a"); /* Extract topics */ restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt); /* Scan topics for proper fields and expected failures. */ for (i = 0 ; i < (int)restopic_cnt ; i++) { const rd_kafka_topic_result_t *terr = restopics[i]; /* Verify that topic order matches our request. */ if (strcmp(rd_kafka_topic_result_name(terr), topics[i])) TEST_FAIL_LATER("Topic result order mismatch at #%d: " "expected %s, got %s", i, topics[i], rd_kafka_topic_result_name(terr)); TEST_SAY("CreateTopics result: #%d: %s: %s: %s\n", i, rd_kafka_topic_result_name(terr), rd_kafka_err2name(rd_kafka_topic_result_error(terr)), rd_kafka_topic_result_error_string(terr)); if (rd_kafka_topic_result_error(terr) != exp_topicerr[i]) TEST_FAIL_LATER( "Expected %s, not %d: %s", rd_kafka_err2name(exp_topicerr[i]), rd_kafka_topic_result_error(terr), rd_kafka_err2name(rd_kafka_topic_result_error( terr))); } /** * Verify that the expecteded topics are created and the non-expected * are not. Allow it some time to propagate. */ if (validate_only) { /* No topics should have been created, give it some time * before checking. */ rd_sleep(2); metadata_tmout = 5 * 1000; } else { if (op_timeout > 0) metadata_tmout = op_timeout + 1000; else metadata_tmout = 10 * 1000; } test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, exp_not_mdtopics, exp_not_mdtopic_cnt, metadata_tmout); rd_kafka_event_destroy(rkev); for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { rd_kafka_NewTopic_destroy(new_topics[i]); rd_free(topics[i]); } if (options) rd_kafka_AdminOptions_destroy(options); if (!useq) rd_kafka_queue_destroy(q); #undef MY_NEW_TOPICS_CNT }
int main(int argc, char **argv) { int r = 0; const char *tests_to_run = NULL; /* all */ int i; test_timing_t t_all; #ifndef _MSC_VER tests_to_run = getenv("TESTS"); #endif for (i = 1 ; i < argc ; i++) { if (!strcmp(argv[i], "-p")) tests_run_in_parallel = 1; else if (i == 1) tests_to_run = argv[i]; else { printf("Unknown option: %s\n" "\n" "Usage: %s [options] [<test-match-substr>]\n" "Options:\n" " -p Run tests in parallel\n" "\n", argv[0], argv[i]); exit(1); } } test_curr = "<MAIN>"; test_start = test_clock(); TEST_SAY("Tests to run: %s\n", tests_to_run ? tests_to_run : "all"); #define RUN_TEST(NAME) do { \ extern int main_ ## NAME (int, char **); \ if (!tests_to_run || strstr(# NAME, tests_to_run)) { \ r |= run_test(# NAME, main_ ## NAME, argc, argv); \ } else { \ TEST_SAY("================= Skipping test %s " \ "================\n", # NAME ); \ } \ } while (0) TIMING_START(&t_all, "ALL-TESTS"); RUN_TEST(0001_multiobj); RUN_TEST(0002_unkpart); RUN_TEST(0003_msgmaxsize); RUN_TEST(0004_conf); RUN_TEST(0005_order); RUN_TEST(0006_symbols); RUN_TEST(0007_autotopic); RUN_TEST(0008_reqacks); RUN_TEST(0011_produce_batch); RUN_TEST(0012_produce_consume); RUN_TEST(0013_null_msgs); RUN_TEST(0014_reconsume_191); RUN_TEST(0015_offsets_seek); RUN_TEST(0017_compression); RUN_TEST(0018_cgrp_term); if (tests_run_in_parallel) { while (tests_running_cnt > 0) rd_sleep(1); } TIMING_STOP(&t_all); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(tests_run_in_parallel ? 10 : 5); /* If we havent failed at this point then * there were no threads leaked */ TEST_SAY("\n============== ALL TESTS PASSED ==============\n"); return r; }
int main(int argc, char **argv) { const char *tests_to_run = NULL; /* all */ int test_flags = 0; int i, r; test_timing_t t_all; mtx_init(&test_mtx, mtx_plain); test_init(); #ifndef _MSC_VER tests_to_run = getenv("TESTS"); #endif for (i = 1 ; i < argc ; i++) { if (!strncmp(argv[i], "-p", 2) && strlen(argv[i]) > 2) test_concurrent_max = strtod(argv[i]+2, NULL); else if (!strcmp(argv[i], "-l")) test_flags |= TEST_F_LOCAL; else if (!strcmp(argv[i], "-a")) test_assert_on_fail = 1; else if (*argv[i] != '-') tests_to_run = argv[i]; else { printf("Unknown option: %s\n" "\n" "Usage: %s [options] [<test-match-substr>]\n" "Options:\n" " -p<N> Run N tests in parallel\n" " -l Only run local tests (no broker needed)\n" " -a Assert on failures\n" "\n", argv[0], argv[i]); exit(1); } } test_curr = &tests[0]; test_curr->state = TEST_PASSED; test_curr->start = test_clock(); TEST_SAY("Tests to run: %s\n", tests_to_run ? tests_to_run : "all"); TEST_SAY("Test filter: %s\n", (test_flags & TEST_F_LOCAL) ? "local tests only" : "no filter"); TEST_SAY("Action on test failure: %s\n", test_assert_on_fail ? "assert crash" : "continue other tests"); test_timeout_set(20); TIMING_START(&t_all, "ALL-TESTS"); run_tests(tests_to_run, test_flags, argc, argv); TEST_LOCK(); while (tests_running_cnt > 0) { struct test *test; TEST_SAY("%d test(s) running:", tests_running_cnt); for (test = tests ; test->name ; test++) if (test->state == TEST_RUNNING) TEST_SAY0(" %s", test->name); TEST_SAY0("\n"); TEST_UNLOCK(); rd_sleep(1); TEST_LOCK(); } TIMING_STOP(&t_all); test_curr = &tests[0]; test_curr->duration = test_clock() - test_curr->start; TEST_UNLOCK(); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(10); r = test_summary(1/*lock*/) ? 1 : 0; /* If we havent failed at this point then * there were no threads leaked */ if (r == 0) TEST_SAY("\n============== ALL TESTS PASSED ==============\n"); return r; }
int main_0088_produce_metadata_timeout (int argc, char **argv) { int64_t testid; rd_kafka_t *rk; rd_kafka_topic_t *rkt; const char *topic = test_mk_topic_name("0088_produce_metadata_timeout", 1); int msgcnt = 0; rd_kafka_conf_t *conf; testid = test_id_generate(); /* Create topic with single partition, for simplicity. */ test_create_topic(topic, 1, 1); test_conf_init(&conf, NULL, 15*60*2); // msgcnt * 2); rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); test_conf_set(conf, "metadata.max.age.ms", "10000"); test_conf_set(conf, "topic.metadata.refresh.interval.ms", "-1"); test_conf_set(conf, "linger.ms", "5000"); test_conf_set(conf, "batch.num.messages", "5"); test_socket_enable(conf); test_curr->connect_cb = connect_cb; test_curr->is_fatal_cb = is_fatal_cb; rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, topic, NULL); /* Produce first set of messages and wait for delivery */ test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt, 20, NULL, 0, &msgcnt); while (msg_dr_cnt < 5) rd_kafka_poll(rk, 1000); TEST_SAY(_C_YEL "Disconnecting sockets and " "refusing future connections\n"); rd_atomic32_set(&refuse_connect, 1); test_socket_close_all(test_curr, 1/*reinit*/); /* Wait for metadata timeout */ TEST_SAY("Waiting for metadata timeout\n"); rd_sleep(10+5); /* These messages will be put on the UA queue */ test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt, 20, NULL, 0, &msgcnt); /* Restore the connection(s) when metadata has timed out. */ TEST_SAY(_C_YEL "Allowing connections\n"); rd_atomic32_set(&refuse_connect, 0); rd_sleep(3); test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt, 20, NULL, 0, &msgcnt); test_flush(rk, 2*5*1000); /* linger.ms * 2 */ TEST_ASSERT(msg_dr_cnt == msgcnt, "expected %d, got %d", msgcnt, msg_dr_cnt); TEST_ASSERT(msg_dr_fail_cnt == 0, "expected %d dr failures, got %d", 0, msg_dr_fail_cnt); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); return 0; }
/* @remark This test will fail if auto topic creation is enabled on the broker * since the client will issue a topic-creating metadata request to find * a new leader when the topic is removed. * * To run with trivup, do: * ./interactive_broker_version.py .. -conf '{"auto_create_topics":"false"}' .. * TESTS=0045 ./run-test.sh -k ./merged */ static void do_test_topic_remove (void) { char *topic_f = rd_strdup(test_mk_topic_name("topic_f", 1)); char *topic_g = rd_strdup(test_mk_topic_name("topic_g", 1)); int parts_f = 5; int parts_g = 9; rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *queue; rd_kafka_topic_partition_list_t *topics; rd_kafka_resp_err_t err; /** * Topic removal test: * - Create topic f & g * - Subscribe to f & g * - Verify f & g assignment * - Remove topic f * - Verify g assignment * - Remove topic g * - Verify empty assignment */ TEST_SAY("Topic removal testing\n"); test_conf_init(&conf, NULL, 60); /* Decrease metadata interval to speed up topic change discovery. */ test_conf_set(conf, "metadata.max.age.ms", "5000"); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL, NULL); queue = rd_kafka_queue_get_consumer(rk); TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_f); test_create_topic(topic_f, parts_f, 1); TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_g); test_create_topic(topic_g, parts_g, 1); rd_sleep(1); // FIXME: do check&wait loop instead TEST_SAY("Topic removal: Subscribing to %s & %s\n", topic_f, topic_g); topics = rd_kafka_topic_partition_list_new(2); rd_kafka_topic_partition_list_add(topics, topic_f, RD_KAFKA_PARTITION_UA); rd_kafka_topic_partition_list_add(topics, topic_g, RD_KAFKA_PARTITION_UA); err = rd_kafka_subscribe(rk, topics); TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, "%s", rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(topics); await_assignment("Topic removal: both topics exist", rk, queue, 2, topic_f, parts_f, topic_g, parts_g); TEST_SAY("Topic removal: removing %s\n", topic_f); test_kafka_topics("--delete --topic %s", topic_f); await_revoke("Topic removal: rebalance after topic removal", rk, queue); await_assignment("Topic removal: one topic exists", rk, queue, 1, topic_g, parts_g); TEST_SAY("Topic removal: removing %s\n", topic_g); test_kafka_topics("--delete --topic %s", topic_g); await_revoke("Topic removal: rebalance after 2nd topic removal", rk, queue); /* Should not see another rebalance since all topics now removed */ await_no_rebalance("Topic removal: empty", rk, queue, 10000); test_consumer_close(rk); rd_kafka_queue_destroy(queue); rd_kafka_destroy(rk); rd_free(topic_f); rd_free(topic_g); }
static void do_test_regex (void) { char *base_topic = rd_strdup(test_mk_topic_name("topic", 1)); char *topic_b = rd_strdup(tsprintf("%s_b", base_topic)); char *topic_c = rd_strdup(tsprintf("%s_c", base_topic)); char *topic_d = rd_strdup(tsprintf("%s_d", base_topic)); char *topic_e = rd_strdup(tsprintf("%s_e", base_topic)); rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *queue; /** * Regex test: * - Create topic b * - Subscribe to b & d & e * - Verify b assignment * - Create topic c * - Verify no rebalance * - Create topic d * - Verify b & d assignment */ TEST_SAY("Regex testing\n"); test_conf_init(&conf, NULL, 60); /* Decrease metadata interval to speed up topic change discovery. */ test_conf_set(conf, "metadata.max.age.ms", "5000"); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL, NULL); queue = rd_kafka_queue_get_consumer(rk); TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_b); test_create_topic(topic_b, 2, 1); rd_sleep(1); // FIXME: do check&wait loop instead TEST_SAY("Regex: Subscribing to %s & %s & %s\n", topic_b, topic_d, topic_e); test_consumer_subscribe(rk, tsprintf("^%s_[bde]$", base_topic)); await_assignment("Regex: just one topic exists", rk, queue, 1, topic_b, 2); TEST_SAY("Regex: creating topic %s (not subscribed)\n", topic_c); test_create_topic(topic_c, 4, 1); /* Should not see a rebalance since no topics are matched. */ await_no_rebalance("Regex: empty", rk, queue, 10000); TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_d); test_create_topic(topic_d, 1, 1); await_revoke("Regex: rebalance after topic creation", rk, queue); await_assignment("Regex: two topics exist", rk, queue, 2, topic_b, 2, topic_d, 1); test_consumer_close(rk); rd_kafka_queue_destroy(queue); rd_kafka_destroy(rk); rd_free(base_topic); rd_free(topic_b); rd_free(topic_c); rd_free(topic_d); rd_free(topic_e); }
int main_0049_consume_conn_close (int argc, char **argv) { rd_kafka_t *rk; const char *topic = test_mk_topic_name("0049_consume_conn_close", 1); uint64_t testid; int msgcnt = test_on_ci ? 1000 : 10000; test_msgver_t mv; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf; rd_kafka_topic_partition_list_t *assignment; rd_kafka_resp_err_t err; if (!test_conf_match(NULL, "sasl.mechanisms", "GSSAPI")) { TEST_SKIP("KNOWN ISSUE: ApiVersionRequest+SaslHandshake " "will not play well with sudden disconnects\n"); return 0; } test_conf_init(&conf, &tconf, 60); /* Want an even number so it is divisable by two without surprises */ msgcnt = (msgcnt / (int)test_timeout_multiplier) & ~1; testid = test_id_generate(); test_produce_msgs_easy(topic, testid, RD_KAFKA_PARTITION_UA, msgcnt); test_socket_enable(conf); test_curr->connect_cb = connect_cb; test_curr->is_fatal_cb = is_fatal_cb; test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); rk = test_create_consumer(topic, NULL, conf, tconf); test_consumer_subscribe(rk, topic); test_msgver_init(&mv, testid); test_consumer_poll("consume.up", rk, testid, -1, 0, msgcnt/2, &mv); err = rd_kafka_assignment(rk, &assignment); TEST_ASSERT(!err, "assignment() failed: %s", rd_kafka_err2str(err)); TEST_ASSERT(assignment->cnt > 0, "empty assignment"); TEST_SAY("Bringing down the network\n"); TEST_LOCK(); simulate_network_down = 1; TEST_UNLOCK(); test_socket_close_all(test_curr, 1/*reinit*/); TEST_SAY("Waiting for session timeout to expire (6s), and then some\n"); /* Commit an offset, which should fail, to trigger the offset commit * callback fallback (CONSUMER_ERR) */ assignment->elems[0].offset = 123456789; TEST_SAY("Committing offsets while down, should fail eventually\n"); err = rd_kafka_commit(rk, assignment, 1/*async*/); TEST_ASSERT(!err, "async commit failed: %s", rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(assignment); rd_sleep(10); TEST_SAY("Bringing network back up\n"); TEST_LOCK(); simulate_network_down = 0; TEST_UNLOCK(); TEST_SAY("Continuing to consume..\n"); test_consumer_poll("consume.up2", rk, testid, -1, msgcnt/2, msgcnt/2, &mv); test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER|TEST_MSGVER_DUP, 0, msgcnt); test_msgver_clear(&mv); test_consumer_close(rk); rd_kafka_destroy(rk); return 0; }
int main_0002_unkpart (int argc, char **argv) { int partition = 99; /* non-existent */ int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char msg[128]; int msgcnt = 10; int i; int fails = 0; const struct rd_kafka_metadata *metadata; test_conf_init(&conf, &topic_conf, 10); /* Set delivery report callback */ rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0002", 0), topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Request metadata so that we know the cluster is up before producing * messages, otherwise erroneous partitions will not fail immediately.*/ if ((r = rd_kafka_metadata(rk, 0, rkt, &metadata, tmout_multip(15000))) != RD_KAFKA_RESP_ERR_NO_ERROR) TEST_FAIL("Failed to acquire metadata: %s\n", rd_kafka_err2str(r)); rd_kafka_metadata_destroy(metadata); /* Produce a message */ for (i = 0 ; i < msgcnt ; i++) { int *msgidp = malloc(sizeof(*msgidp)); *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, strlen(msg), NULL, 0, msgidp); if (r == -1) { if (errno == ESRCH) TEST_SAY("Failed to produce message #%i: " "unknown partition: good!\n", i); else TEST_FAIL("Failed to produce message #%i: %s\n", i, rd_kafka_err2str( rd_kafka_errno2err(errno))); free(msgidp); } else { if (i > 5) { fails++; TEST_SAY("Message #%i produced: " "should've failed\n", i); } msgs_wait |= (1 << i); } /* After half the messages: sleep to allow the metadata * to be fetched from broker and update the actual partition * count: this will make subsequent produce() calls fail * immediately. */ if (i == 5) rd_sleep(2); } /* Wait for messages to time out */ rd_kafka_flush(rk, -1); if (msgs_wait != 0) TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); if (fails > 0) TEST_FAIL("See previous error(s)\n"); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); return 0; }
int main_0019_list_groups (int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); #define _CONS_CNT 2 char *groups[_CONS_CNT]; rd_kafka_t *rk, *rk_c[_CONS_CNT]; rd_kafka_topic_partition_list_t *topics; rd_kafka_resp_err_t err; test_timing_t t_grps; int i; int groups_seen; rd_kafka_topic_t *rkt; /* Handle for group listings */ rk = test_create_producer(); /* Produce messages so that topic is auto created */ rkt = test_create_topic_object(rk, topic, NULL); test_produce_msgs(rk, rkt, 0, 0, 0, 10, NULL, 64); rd_kafka_topic_destroy(rkt); /* Query groups before creation, should not list our groups. */ groups_seen = list_groups(rk, NULL, 0, "should be none"); if (groups_seen != 0) TEST_FAIL("Saw %d groups when there wasn't " "supposed to be any\n", groups_seen); /* Fill in topic subscription set */ topics = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(topics, topic, -1); /* Create consumers and start subscription */ for (i = 0 ; i < _CONS_CNT ; i++) { groups[i] = malloc(32); test_str_id_generate(groups[i], 32); rk_c[i] = test_create_consumer(groups[i], NULL, NULL, NULL); err = rd_kafka_poll_set_consumer(rk_c[i]); if (err) TEST_FAIL("poll_set_consumer: %s\n", rd_kafka_err2str(err)); err = rd_kafka_subscribe(rk_c[i], topics); if (err) TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err)); } rd_kafka_topic_partition_list_destroy(topics); TIMING_START(&t_grps, "WAIT.GROUPS"); /* Query groups again until both groups are seen. */ while (1) { int groups_seen = list_groups(rk, (char **)groups, _CONS_CNT, "should see my groups"); if (groups_seen == _CONS_CNT) break; rd_sleep(1); } TIMING_STOP(&t_grps); TEST_SAY("Closing remaining consumers\n"); for (i = 0 ; i < _CONS_CNT ; i++) { test_timing_t t_close; if (!rk_c[i]) continue; TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i])); TIMING_START(&t_close, "CONSUMER.CLOSE"); err = rd_kafka_consumer_close(rk_c[i]); TIMING_STOP(&t_close); if (err) TEST_FAIL("consumer_close failed: %s\n", rd_kafka_err2str(err)); rd_kafka_destroy(rk_c[i]); rk_c[i] = NULL; free(groups[i]); } rd_kafka_destroy(rk); return 0; }
static void do_test_consumer_lag (void) { const int msgcnt = 10; std::string errstr; RdKafka::ErrorCode err; topic = Test::mk_topic_name("0061-consumer_lag", 1); test_produce_msgs_easy(topic.c_str(), 0, 0, msgcnt); /* * Create consumer */ /* Create consumer */ RdKafka::Conf *conf; Test::conf_init(&conf, NULL, 10); StatsCb stats; if (conf->set("event_cb", &stats, errstr) != RdKafka::Conf::CONF_OK) Test::Fail("set event_cb failed: " + errstr); Test::conf_set(conf, "group.id", topic); Test::conf_set(conf, "enable.auto.commit", "false"); Test::conf_set(conf, "enable.partition.eof", "false"); Test::conf_set(conf, "auto.offset.reset", "earliest"); Test::conf_set(conf, "statistics.interval.ms", "100"); RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); if (!c) Test::Fail("Failed to create KafkaConsumer: " + errstr); delete conf; /* Assign partitions */ /* Subscribe */ std::vector<RdKafka::TopicPartition*> parts; parts.push_back(RdKafka::TopicPartition::create(topic, 0)); if ((err = c->assign(parts))) Test::Fail("assign failed: " + RdKafka::err2str(err)); RdKafka::TopicPartition::destroy(parts); /* Start consuming */ Test::Say("Consuming topic " + topic + "\n"); int cnt = 0; while (cnt < msgcnt) { RdKafka::Message *msg = c->consume(tmout_multip(1000)); switch (msg->err()) { case RdKafka::ERR__TIMED_OUT: break; case RdKafka::ERR__PARTITION_EOF: Test::Fail(tostr() << "Consume error after " << cnt << "/" << msgcnt << " messages: " << msg->errstr()); break; case RdKafka::ERR_NO_ERROR: /* Proper message. Update calculated lag for later * checking in stats callback */ stats.calc_lag = msgcnt - (msg->offset()+1); cnt++; Test::Say(2, tostr() << "Received message #" << cnt << "/" << msgcnt << " at offset " << msg->offset() << " (calc lag " << stats.calc_lag << ")\n"); /* Slow down message "processing" to make sure we get * at least one stats callback per message. */ if (cnt < msgcnt) rd_sleep(1); break; default: Test::Fail("Consume error: " + msg->errstr()); break; } delete msg; } Test::Say(tostr() << "Done, lag was valid " << stats.lag_valid << " times\n"); if (stats.lag_valid == 0) Test::Fail("No valid consumer_lag in statistics seen"); c->close(); delete c; }