static int run_test (const char *testname, int (*test_main) (int, char **), int argc, char **argv) { int r; if (tests_run_in_parallel) { #ifdef _MSC_VER TEST_FAIL("Parallel runs not supported on this platform, yet\n"); #else pthread_t thr; struct run_args *run_args = calloc(1, sizeof(*run_args)); run_args->testname = testname; run_args->test_main = test_main; run_args->argc = argc; run_args->argv = argv; pthread_mutex_lock(&test_lock); tests_running_cnt++; pthread_mutex_unlock(&test_lock); r = pthread_create(&thr, NULL, run_test_from_thread, run_args); if (r != 0) { pthread_mutex_lock(&test_lock); tests_running_cnt--; pthread_mutex_unlock(&test_lock); TEST_FAIL("Failed to start thread for test %s: %s\n", testname, strerror(r)); } #endif } else { struct run_args run_args = { .testname = testname, .test_main = test_main, .argc = argc, .argv = argv }; tests_running_cnt++; r = run_test0(&run_args); tests_running_cnt--; /* Wait for everything to be cleaned up since broker * destroys are handled in its own thread. */ test_wait_exit(5); test_curr = NULL; } return r; }
/** * Produce to two partitions. * Consume with standard interface from both, one after the other. * Consume with queue interface from both, simultanously. */ static void test_produce_consume (void) { const char *topic = "rdkafkatest0012"; int msgcnt = 10000; int partition_cnt = 2; int i; uint64_t testid; int msg_base = 0; /* Generate a testid so we can differentiate messages * from other tests */ testid = test_id_generate(); TEST_SAY("Topic %s, testid %"PRIu64"\n", topic, testid); /* Produce messages */ produce_messages(testid, topic, partition_cnt, msgcnt); /* Consume messages with standard interface */ verify_consumed_msg_reset(msgcnt); for (i = 0 ; i < partition_cnt ; i++) { consume_messages(testid, topic, i, msg_base, msgcnt / partition_cnt, msgcnt); msg_base += msgcnt / partition_cnt; } verify_consumed_msg_check(); /* Consume messages with queue interface */ verify_consumed_msg_reset(msgcnt); consume_messages_with_queues(testid, topic, partition_cnt, msgcnt); verify_consumed_msg_check(); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(10); /* If we havent failed at this point then * there were no threads leaked */ return; }
int main (int argc, char **argv) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *ignore_conf, *conf, *conf2; rd_kafka_topic_conf_t *ignore_topic_conf, *tconf, *tconf2; char errstr[512]; const char **arr_orig, **arr_dup; size_t cnt_orig, cnt_dup; int i; const char *topic; static const char *gconfs[] = { "message.max.bytes", "12345", /* int property */ "client.id", "my id", /* string property */ "debug", "topic,metadata", /* S2F property */ "compression.codec", "gzip", /* S2I property */ NULL }; static const char *tconfs[] = { "request.required.acks", "-1", /* int */ "auto.commit.enable", "false", /* bool */ "auto.offset.reset", "error", /* S2I */ "offset.store.path", "my/path", /* string */ NULL }; test_conf_init(&ignore_conf, &ignore_topic_conf, 10); rd_kafka_conf_destroy(ignore_conf); rd_kafka_topic_conf_destroy(ignore_topic_conf); topic = test_mk_topic_name("generic", 0); /* Set up a global config object */ conf = rd_kafka_conf_new(); rd_kafka_conf_set_dr_cb(conf, dr_cb); rd_kafka_conf_set_error_cb(conf, error_cb); for (i = 0 ; gconfs[i] ; i += 2) { if (rd_kafka_conf_set(conf, gconfs[i], gconfs[i+1], errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); } /* Set up a topic config object */ tconf = rd_kafka_topic_conf_new(); rd_kafka_topic_conf_set_partitioner_cb(tconf, partitioner); rd_kafka_topic_conf_set_opaque(tconf, (void *)0xbeef); for (i = 0 ; tconfs[i] ; i += 2) { if (rd_kafka_topic_conf_set(tconf, tconfs[i], tconfs[i+1], errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); } /* Verify global config */ arr_orig = rd_kafka_conf_dump(conf, &cnt_orig); conf_verify(__LINE__, arr_orig, cnt_orig, gconfs); /* Verify copied global config */ conf2 = rd_kafka_conf_dup(conf); arr_dup = rd_kafka_conf_dump(conf2, &cnt_dup); conf_verify(__LINE__, arr_dup, cnt_dup, gconfs); conf_cmp("global", arr_orig, cnt_orig, arr_dup, cnt_dup); rd_kafka_conf_dump_free(arr_orig, cnt_orig); rd_kafka_conf_dump_free(arr_dup, cnt_dup); /* Verify topic config */ arr_orig = rd_kafka_topic_conf_dump(tconf, &cnt_orig); conf_verify(__LINE__, arr_orig, cnt_orig, tconfs); /* Verify copied topic config */ tconf2 = rd_kafka_topic_conf_dup(tconf); arr_dup = rd_kafka_topic_conf_dump(tconf2, &cnt_dup); conf_verify(__LINE__, arr_dup, cnt_dup, tconfs); conf_cmp("topic", arr_orig, cnt_orig, arr_dup, cnt_dup); rd_kafka_conf_dump_free(arr_orig, cnt_orig); rd_kafka_conf_dump_free(arr_dup, cnt_dup); /* * Create kafka instances using original and copied confs */ /* original */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); rkt = rd_kafka_topic_new(rk, topic, tconf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); /* copied */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf2, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); rkt = rd_kafka_topic_new(rk, topic, tconf2); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(2); /* If we havent failed at this point then * there were no threads leaked */ return 0; }
int main(int argc, char **argv) { int r = 0; const char *tests_to_run = NULL; /* all */ int i; test_timing_t t_all; #ifndef _MSC_VER tests_to_run = getenv("TESTS"); #endif for (i = 1 ; i < argc ; i++) { if (!strcmp(argv[i], "-p")) tests_run_in_parallel = 1; else if (i == 1) tests_to_run = argv[i]; else { printf("Unknown option: %s\n" "\n" "Usage: %s [options] [<test-match-substr>]\n" "Options:\n" " -p Run tests in parallel\n" "\n", argv[0], argv[i]); exit(1); } } test_curr = "<MAIN>"; test_start = test_clock(); TEST_SAY("Tests to run: %s\n", tests_to_run ? tests_to_run : "all"); #define RUN_TEST(NAME) do { \ extern int main_ ## NAME (int, char **); \ if (!tests_to_run || strstr(# NAME, tests_to_run)) { \ r |= run_test(# NAME, main_ ## NAME, argc, argv); \ } else { \ TEST_SAY("================= Skipping test %s " \ "================\n", # NAME ); \ } \ } while (0) TIMING_START(&t_all, "ALL-TESTS"); RUN_TEST(0001_multiobj); RUN_TEST(0002_unkpart); RUN_TEST(0003_msgmaxsize); RUN_TEST(0004_conf); RUN_TEST(0005_order); RUN_TEST(0006_symbols); RUN_TEST(0007_autotopic); RUN_TEST(0008_reqacks); RUN_TEST(0011_produce_batch); RUN_TEST(0012_produce_consume); RUN_TEST(0013_null_msgs); RUN_TEST(0014_reconsume_191); RUN_TEST(0015_offsets_seek); RUN_TEST(0017_compression); RUN_TEST(0018_cgrp_term); if (tests_run_in_parallel) { while (tests_running_cnt > 0) rd_sleep(1); } TIMING_STOP(&t_all); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(tests_run_in_parallel ? 10 : 5); /* If we havent failed at this point then * there were no threads leaked */ TEST_SAY("\n============== ALL TESTS PASSED ==============\n"); return r; }
int main(int argc, char **argv) { const char *tests_to_run = NULL; /* all */ int test_flags = 0; int i, r; test_timing_t t_all; mtx_init(&test_mtx, mtx_plain); test_init(); #ifndef _MSC_VER tests_to_run = getenv("TESTS"); #endif for (i = 1 ; i < argc ; i++) { if (!strncmp(argv[i], "-p", 2) && strlen(argv[i]) > 2) test_concurrent_max = strtod(argv[i]+2, NULL); else if (!strcmp(argv[i], "-l")) test_flags |= TEST_F_LOCAL; else if (!strcmp(argv[i], "-a")) test_assert_on_fail = 1; else if (*argv[i] != '-') tests_to_run = argv[i]; else { printf("Unknown option: %s\n" "\n" "Usage: %s [options] [<test-match-substr>]\n" "Options:\n" " -p<N> Run N tests in parallel\n" " -l Only run local tests (no broker needed)\n" " -a Assert on failures\n" "\n", argv[0], argv[i]); exit(1); } } test_curr = &tests[0]; test_curr->state = TEST_PASSED; test_curr->start = test_clock(); TEST_SAY("Tests to run: %s\n", tests_to_run ? tests_to_run : "all"); TEST_SAY("Test filter: %s\n", (test_flags & TEST_F_LOCAL) ? "local tests only" : "no filter"); TEST_SAY("Action on test failure: %s\n", test_assert_on_fail ? "assert crash" : "continue other tests"); test_timeout_set(20); TIMING_START(&t_all, "ALL-TESTS"); run_tests(tests_to_run, test_flags, argc, argv); TEST_LOCK(); while (tests_running_cnt > 0) { struct test *test; TEST_SAY("%d test(s) running:", tests_running_cnt); for (test = tests ; test->name ; test++) if (test->state == TEST_RUNNING) TEST_SAY0(" %s", test->name); TEST_SAY0("\n"); TEST_UNLOCK(); rd_sleep(1); TEST_LOCK(); } TIMING_STOP(&t_all); test_curr = &tests[0]; test_curr->duration = test_clock() - test_curr->start; TEST_UNLOCK(); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(10); r = test_summary(1/*lock*/) ? 1 : 0; /* If we havent failed at this point then * there were no threads leaked */ if (r == 0) TEST_SAY("\n============== ALL TESTS PASSED ==============\n"); return r; }
/* Produce a batch of messages to a single partition. */ static void test_single_partition (void) { char *topic = "rdkafkatest1"; int partition = 0; int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; char msg[128]; int msgcnt = 100000; int failcnt; int i; rd_kafka_message_t *rkmessages; msgid_next = 0; test_conf_init(&conf, &topic_conf, 20); /* Set delivery report callback */ rd_kafka_conf_set_dr_cb(conf, dr_single_partition_cb); /* Create kafka instance */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); /* Create messages */ rkmessages = calloc(sizeof(*rkmessages), msgcnt); for (i = 0 ; i < msgcnt ; i++) { int *msgidp = malloc(sizeof(*msgidp)); *msgidp = i; snprintf(msg, sizeof(msg), "%s:%s test message #%i", __FILE__, __FUNCTION__, i); rkmessages[i].payload = strdup(msg); rkmessages[i].len = strlen(msg); rkmessages[i]._private = msgidp; } r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE, rkmessages, msgcnt); /* Scan through messages to check for errors. */ for (i = 0 ; i < msgcnt ; i++) { if (rkmessages[i].err) { failcnt++; if (failcnt < 100) TEST_SAY("Message #%i failed: %s\n", i, rd_kafka_err2str(rkmessages[i].err)); } } /* All messages should've been produced. */ if (r < msgcnt) { TEST_SAY("Not all messages were accepted " "by produce_batch(): %i < %i\n", r, msgcnt); if (msgcnt - r != failcnt) TEST_SAY("Discrepency between failed messages (%i) " "and return value %i (%i - %i)\n", failcnt, msgcnt - r, msgcnt, r); TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt); } free(rkmessages); TEST_SAY("Single partition: " "Produced %i messages, waiting for deliveries\n", r); /* Wait for messages to be delivered */ while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 50); if (fails) TEST_FAIL("%i failures, see previous errors", fails); if (msgid_next != msgcnt) TEST_FAIL("Still waiting for messages: next %i != end %i\n", msgid_next, msgcnt); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(10); /* If we havent failed at this point then * there were no threads leaked */ return; }
int main (int argc, char **argv) { char *topic = "rdkafkatest1"; int partition = 0; int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; char msg[100000]; int msgcnt = 10; int i; test_conf_init(&conf, &topic_conf, 10); /* Set a small maximum message size. */ if (rd_kafka_conf_set(conf, "message.max.bytes", "100000", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); /* Set delivery report callback */ rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Create kafka instance */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); memset(msg, 0, sizeof(msg)); /* Produce 'msgcnt' messages, size odd ones larger than max.bytes, * and even ones smaller than max.bytes. */ for (i = 0 ; i < msgcnt ; i++) { int *msgidp = malloc(sizeof(*msgidp)); size_t len; int toobig = i & 1; *msgidp = i; if (toobig) { /* Too big */ len = 200000; } else { /* Good size */ len = 5000; msgs_wait |= (1 << i); } snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, len, NULL, 0, msgidp); if (toobig) { if (r != -1) TEST_FAIL("Succeeded to produce too " "large message #%i\n", i); free(msgidp); } else if (r == -1) TEST_FAIL("Failed to produce message #%i: %s\n", i, strerror(errno)); } /* Wait for messages to be delivered. */ while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 50); if (msgs_wait != 0) TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(10); /* If we havent failed at this point then * there were no threads leaked */ return 0; }
/** * Generate unique topic name (there is a C function for that in test.h wihch you should use) * Query metadata for that topic * Wait one second * Query again, it should now have isrs and everything */ static void test_metadata_cpp (void) { RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); /* @TODO: Do we need to merge with C test_conf_init()? */ RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); /* @TODO: Same of prev */ RdKafka::Metadata *metadata; RdKafka::ErrorCode err; int msgcnt = 10000; int partition_cnt = 2; int i; uint64_t testid; int msg_base = 0; std::string errstr; const char *topic_str = test_mk_topic_name("0013", 1); /* if(!topic){ TEST_FAIL() }*/ //const RdKafka::Conf::ConfResult confResult = conf->set("debug","all",errstr); //if(confResult != RdKafka::Conf::CONF_OK){ // std::stringstream errstring; // errstring << "Can't set config" << errstr; // TEST_FAIL(errstring.str().c_str()); //} TEST_SAY("Topic %s.\n", topic_str); const RdKafka::Conf::ConfResult confBrokerResult = conf->set("metadata.broker.list", "localhost:9092", errstr); if(confBrokerResult != RdKafka::Conf::CONF_OK){ std::stringstream errstring; errstring << "Can't set broker" << errstr; TEST_FAIL(errstring.str().c_str()); } /* Create a producer to fetch metadata */ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); if (!producer) { std::stringstream errstring; errstring << "Can't create producer" << errstr; TEST_FAIL(errstring.str().c_str()); } /* * Create topic handle. */ RdKafka::Topic *topic = NULL; topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr); if (!topic) { std::stringstream errstring; errstring << "Can't create topic" << errstr; exit(1); } /* First request of metadata: It have to fail */ err = producer->metadata(topic!=NULL, topic, &metadata, 5000); if (err != RdKafka::ERR_NO_ERROR) { std::stringstream errstring; errstring << "Can't request first metadata: " << errstr; TEST_FAIL(errstring.str().c_str()); } /* It's a new topic, it should have no partitions */ if(metadata->topics()->at(0)->partitions()->size() != 0){ TEST_FAIL("ISRS != 0"); } sleep(1); /* Second request of metadata: It have to success */ err = producer->metadata(topic!=NULL, topic, &metadata, 5000); /* It should have now partitions */ if(metadata->topics()->at(0)->partitions()->size() == 0){ TEST_FAIL("ISRS == 0"); } delete topic; delete producer; delete tconf; delete conf; /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(10); /* If we havent failed at this point then * there were no threads leaked */ return; }
int main (int argc, char **argv) { char *topic = "rdkafkatest1"; int partition = 99; /* non-existent */ int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; char msg[128]; int msgcnt = 10; time_t t_start, t_spent; int i; /* Socket hangups are gracefully handled in librdkafka on socket error * without the use of signals, so SIGPIPE should be ignored by the * calling program. */ signal(SIGPIPE, SIG_IGN); test_conf_init(&conf, &topic_conf, 10); /* Set message.timeout.ms configuration for topic */ if (rd_kafka_topic_conf_set(topic_conf, "message.timeout.ms", "2000", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); /* Set delivery report callback */ rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Create kafka instance */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); /* Produce a message */ for (i = 0 ; i < msgcnt ; i++) { int *msgidp = malloc(sizeof(*msgidp)); *msgidp = i; msgs_wait |= (1 << i); snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, strlen(msg), NULL, 0, msgidp); if (r == -1) TEST_FAIL("Failed to produce message #%i: %s\n", i, strerror(errno)); } t_start = time(NULL); /* Wait for messages to time out */ while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 50); t_spent = time(NULL) - t_start; if (msgs_wait != 0) TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); if (t_spent > 5 /* 2000ms+cruft*/) TEST_FAIL("Messages timed out too slowly (%i seconds > 5)\n", (int)t_spent); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(10); /* If we havent failed at this point then * there were no threads leaked */ return 0; }
int main (int argc, char **argv) { int partition = 99; /* non-existent */ int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; char msg[128]; int msgcnt = 10; int i; const struct rd_kafka_metadata *metadata; test_conf_init(&conf, &topic_conf, 10); /* Set delivery report callback */ rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Create kafka instance */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); rkt = rd_kafka_topic_new(rk, test_mk_topic_name("generic", 0), topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); /* Request metadata so that we know the cluster is up before producing * messages, otherwise erroneous partitions will not fail immediately.*/ if ((r = rd_kafka_metadata(rk, 0, rkt, &metadata, 2000)) != RD_KAFKA_RESP_ERR_NO_ERROR) TEST_FAIL("Failed to acquire metadata: %s\n", rd_kafka_err2str(r)); rd_kafka_metadata_destroy(metadata); /* Produce a message */ for (i = 0 ; i < msgcnt ; i++) { int *msgidp = malloc(sizeof(*msgidp)); *msgidp = i; snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, strlen(msg), NULL, 0, msgidp); if (r == -1) { if (errno == ESRCH) TEST_SAY("Failed to produce message #%i: " "unknown partition: good!\n", i); else TEST_FAIL("Failed to produce message #%i: %s\n", i, strerror(errno)); } else { if (i > 5) TEST_FAIL("Message #%i produced: " "should've failed\n", i); msgs_wait |= (1 << i); } /* After half the messages: sleep to allow the metadata * to be fetched from broker and update the actual partition * count: this will make subsequent produce() calls fail * immediately. */ if (i == 5) sleep(2); } /* Wait for messages to time out */ while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 50); if (msgs_wait != 0) TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(10); /* If we havent failed at this point then * there were no threads leaked */ return 0; }