static void do_test_non_exist_and_partchange (void) { char *topic_a = rd_strdup(test_mk_topic_name("topic_a", 1)); rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *queue; /** * Test #1: * - Subscribe to non-existing topic. * - Verify empty assignment * - Create topic * - Verify new assignment containing topic */ TEST_SAY("#1 & #2 testing\n"); test_conf_init(&conf, NULL, 60); /* Decrease metadata interval to speed up topic change discovery. */ test_conf_set(conf, "metadata.max.age.ms", "5000"); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL, NULL); queue = rd_kafka_queue_get_consumer(rk); TEST_SAY("#1: Subscribing to %s\n", topic_a); test_consumer_subscribe(rk, topic_a); /* Should not see a rebalance since no topics are matched. */ await_no_rebalance("#1: empty", rk, queue, 10000); TEST_SAY("#1: creating topic %s\n", topic_a); test_create_topic(topic_a, 2, 1); await_assignment("#1: proper", rk, queue, 1, topic_a, 2); /** * Test #2 (continue with #1 consumer) * - Increase the partition count * - Verify updated assignment */ test_kafka_topics("--alter --topic %s --partitions 4", topic_a); await_revoke("#2", rk, queue); await_assignment("#2: more partitions", rk, queue, 1, topic_a, 4); test_consumer_close(rk); rd_kafka_queue_destroy(queue); rd_kafka_destroy(rk); rd_free(topic_a); }
static int Consumer_init (PyObject *selfobj, PyObject *args, PyObject *kwargs) { Handle *self = (Handle *)selfobj; char errstr[256]; rd_kafka_conf_t *conf; if (self->rk) { PyErr_SetString(PyExc_RuntimeError, "Consumer already initialized"); return -1; } self->type = RD_KAFKA_CONSUMER; if (!(conf = common_conf_setup(RD_KAFKA_CONSUMER, self, args, kwargs))) return -1; /* Exception raised by ..conf_setup() */ rd_kafka_conf_set_rebalance_cb(conf, Consumer_rebalance_cb); rd_kafka_conf_set_offset_commit_cb(conf, Consumer_offset_commit_cb); self->rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); if (!self->rk) { cfl_PyErr_Format(rd_kafka_last_error(), "Failed to create consumer: %s", errstr); rd_kafka_conf_destroy(conf); return -1; } /* Forward log messages to main queue which is then forwarded * to the consumer queue */ if (self->logger) rd_kafka_set_log_queue(self->rk, NULL); rd_kafka_poll_set_consumer(self->rk); self->u.Consumer.rkqu = rd_kafka_queue_get_consumer(self->rk); assert(self->u.Consumer.rkqu); return 0; }
/* @remark This test will fail if auto topic creation is enabled on the broker * since the client will issue a topic-creating metadata request to find * a new leader when the topic is removed. * * To run with trivup, do: * ./interactive_broker_version.py .. -conf '{"auto_create_topics":"false"}' .. * TESTS=0045 ./run-test.sh -k ./merged */ static void do_test_topic_remove (void) { char *topic_f = rd_strdup(test_mk_topic_name("topic_f", 1)); char *topic_g = rd_strdup(test_mk_topic_name("topic_g", 1)); int parts_f = 5; int parts_g = 9; rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *queue; rd_kafka_topic_partition_list_t *topics; rd_kafka_resp_err_t err; /** * Topic removal test: * - Create topic f & g * - Subscribe to f & g * - Verify f & g assignment * - Remove topic f * - Verify g assignment * - Remove topic g * - Verify empty assignment */ TEST_SAY("Topic removal testing\n"); test_conf_init(&conf, NULL, 60); /* Decrease metadata interval to speed up topic change discovery. */ test_conf_set(conf, "metadata.max.age.ms", "5000"); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL, NULL); queue = rd_kafka_queue_get_consumer(rk); TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_f); test_create_topic(topic_f, parts_f, 1); TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_g); test_create_topic(topic_g, parts_g, 1); rd_sleep(1); // FIXME: do check&wait loop instead TEST_SAY("Topic removal: Subscribing to %s & %s\n", topic_f, topic_g); topics = rd_kafka_topic_partition_list_new(2); rd_kafka_topic_partition_list_add(topics, topic_f, RD_KAFKA_PARTITION_UA); rd_kafka_topic_partition_list_add(topics, topic_g, RD_KAFKA_PARTITION_UA); err = rd_kafka_subscribe(rk, topics); TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, "%s", rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(topics); await_assignment("Topic removal: both topics exist", rk, queue, 2, topic_f, parts_f, topic_g, parts_g); TEST_SAY("Topic removal: removing %s\n", topic_f); test_kafka_topics("--delete --topic %s", topic_f); await_revoke("Topic removal: rebalance after topic removal", rk, queue); await_assignment("Topic removal: one topic exists", rk, queue, 1, topic_g, parts_g); TEST_SAY("Topic removal: removing %s\n", topic_g); test_kafka_topics("--delete --topic %s", topic_g); await_revoke("Topic removal: rebalance after 2nd topic removal", rk, queue); /* Should not see another rebalance since all topics now removed */ await_no_rebalance("Topic removal: empty", rk, queue, 10000); test_consumer_close(rk); rd_kafka_queue_destroy(queue); rd_kafka_destroy(rk); rd_free(topic_f); rd_free(topic_g); }
static void do_test_regex (void) { char *base_topic = rd_strdup(test_mk_topic_name("topic", 1)); char *topic_b = rd_strdup(tsprintf("%s_b", base_topic)); char *topic_c = rd_strdup(tsprintf("%s_c", base_topic)); char *topic_d = rd_strdup(tsprintf("%s_d", base_topic)); char *topic_e = rd_strdup(tsprintf("%s_e", base_topic)); rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *queue; /** * Regex test: * - Create topic b * - Subscribe to b & d & e * - Verify b assignment * - Create topic c * - Verify no rebalance * - Create topic d * - Verify b & d assignment */ TEST_SAY("Regex testing\n"); test_conf_init(&conf, NULL, 60); /* Decrease metadata interval to speed up topic change discovery. */ test_conf_set(conf, "metadata.max.age.ms", "5000"); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL, NULL); queue = rd_kafka_queue_get_consumer(rk); TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_b); test_create_topic(topic_b, 2, 1); rd_sleep(1); // FIXME: do check&wait loop instead TEST_SAY("Regex: Subscribing to %s & %s & %s\n", topic_b, topic_d, topic_e); test_consumer_subscribe(rk, tsprintf("^%s_[bde]$", base_topic)); await_assignment("Regex: just one topic exists", rk, queue, 1, topic_b, 2); TEST_SAY("Regex: creating topic %s (not subscribed)\n", topic_c); test_create_topic(topic_c, 4, 1); /* Should not see a rebalance since no topics are matched. */ await_no_rebalance("Regex: empty", rk, queue, 10000); TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_d); test_create_topic(topic_d, 1, 1); await_revoke("Regex: rebalance after topic creation", rk, queue); await_assignment("Regex: two topics exist", rk, queue, 2, topic_b, 2, topic_d, 1); test_consumer_close(rk); rd_kafka_queue_destroy(queue); rd_kafka_destroy(rk); rd_free(base_topic); rd_free(topic_b); rd_free(topic_c); rd_free(topic_d); rd_free(topic_e); }
int main_0040_io_event (int argc, char **argv) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf; rd_kafka_t *rk_p, *rk_c; const char *topic; rd_kafka_topic_t *rkt_p; rd_kafka_queue_t *queue; uint64_t testid; int msgcnt = 100; int recvd = 0; int fds[2]; int wait_multiplier = 1; struct pollfd pfd; int r; enum { _NOPE, _YEP, _REBALANCE } expecting_io = _REBALANCE; testid = test_id_generate(); topic = test_mk_topic_name(__FUNCTION__, 1); rk_p = test_create_producer(); rkt_p = test_create_producer_topic(rk_p, topic, NULL); test_auto_create_topic_rkt(rk_p, rkt_p); test_conf_init(&conf, &tconf, 0); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); test_conf_set(conf, "session.timeout.ms", "6000"); test_conf_set(conf, "enable.partition.eof", "false"); /* Speed up propagation of new topics */ test_conf_set(conf, "metadata.max.age.ms", "5000"); test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); rk_c = test_create_consumer(topic, NULL, conf, tconf); queue = rd_kafka_queue_get_consumer(rk_c); test_consumer_subscribe(rk_c, topic); #ifndef _MSC_VER r = pipe(fds); #else r = _pipe(fds, 2, _O_BINARY); #endif if (r == -1) TEST_FAIL("pipe() failed: %s\n", strerror(errno)); rd_kafka_queue_io_event_enable(queue, fds[1], "1", 1); pfd.fd = fds[0]; pfd.events = POLLIN; pfd.revents = 0; /** * 1) Wait for rebalance event * 2) Wait 1 interval (1s) expecting no IO (nothing produced). * 3) Produce half the messages * 4) Expect IO * 5) Consume the available messages * 6) Wait 1 interval expecting no IO. * 7) Produce remaing half * 8) Expect IO * 9) Done. */ while (recvd < msgcnt) { int r; #ifndef _MSC_VER r = poll(&pfd, 1, 1000 * wait_multiplier); #else r = WSAPoll(&pfd, 1, 1000 * wait_multiplier); #endif if (r == -1) { TEST_FAIL("poll() failed: %s", strerror(errno)); } else if (r == 1) { rd_kafka_event_t *rkev; char b; int eventcnt = 0; if (pfd.events & POLLERR) TEST_FAIL("Poll error\n"); if (!(pfd.events & POLLIN)) { TEST_SAY("Stray event 0x%x\n", (int)pfd.events); continue; } TEST_SAY("POLLIN\n"); /* Read signaling token to purge socket queue and * eventually silence POLLIN */ #ifndef _MSC_VER r = read(pfd.fd, &b, 1); #else r = _read((int)pfd.fd, &b, 1); #endif if (r == -1) TEST_FAIL("read failed: %s\n", strerror(errno)); if (!expecting_io) TEST_WARN("Got unexpected IO after %d/%d msgs\n", recvd, msgcnt); while ((rkev = rd_kafka_queue_poll(queue, 0))) { eventcnt++; switch (rd_kafka_event_type(rkev)) { case RD_KAFKA_EVENT_REBALANCE: TEST_SAY("Got %s: %s\n", rd_kafka_event_name(rkev), rd_kafka_err2str(rd_kafka_event_error(rkev))); if (expecting_io != _REBALANCE) TEST_FAIL("Got Rebalance when expecting message\n"); if (rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { rd_kafka_assign(rk_c, rd_kafka_event_topic_partition_list(rkev)); expecting_io = _NOPE; } else rd_kafka_assign(rk_c, NULL); break; case RD_KAFKA_EVENT_FETCH: if (expecting_io != _YEP) TEST_FAIL("Did not expect more messages at %d/%d\n", recvd, msgcnt); recvd++; if (recvd == (msgcnt / 2) || recvd == msgcnt) expecting_io = _NOPE; break; case RD_KAFKA_EVENT_ERROR: TEST_FAIL("Error: %s\n", rd_kafka_event_error_string(rkev)); break; default: TEST_SAY("Ignoring event %s\n", rd_kafka_event_name(rkev)); } rd_kafka_event_destroy(rkev); } TEST_SAY("%d events, Consumed %d/%d messages\n", eventcnt, recvd, msgcnt); wait_multiplier = 1; } else { if (expecting_io == _REBALANCE) { continue; } else if (expecting_io == _YEP) { TEST_FAIL("Did not see expected IO after %d/%d msgs\n", recvd, msgcnt); } TEST_SAY("IO poll timeout (good)\n"); TEST_SAY("Got idle period, producing\n"); test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, msgcnt/2, NULL, 10); expecting_io = _YEP; /* When running slowly (e.g., valgrind) it might take * some time before the first message is received * after producing. */ wait_multiplier = 3; } } TEST_SAY("Done\n"); rd_kafka_topic_destroy(rkt_p); rd_kafka_destroy(rk_p); rd_kafka_queue_destroy(queue); rd_kafka_consumer_close(rk_c); rd_kafka_destroy(rk_c); #ifndef _MSC_VER close(fds[0]); close(fds[1]); #else _close(fds[0]); _close(fds[1]); #endif return 0; }