RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::subscribe (const std::vector<std::string> &topics) { rd_kafka_topic_partition_list_t *c_topics; rd_kafka_resp_err_t err; c_topics = rd_kafka_topic_partition_list_new((int)topics.size()); for (unsigned int i = 0 ; i < topics.size() ; i++) rd_kafka_topic_partition_list_add(c_topics, topics[i].c_str(), RD_KAFKA_PARTITION_UA); err = rd_kafka_subscribe(rk_, c_topics); rd_kafka_topic_partition_list_destroy(c_topics); return static_cast<RdKafka::ErrorCode>(err); }
static void subscribe_consume_many (char **topics, int topic_cnt, uint64_t testid) { rd_kafka_t *rk; int i; rd_kafka_topic_conf_t *tconf; rd_kafka_topic_partition_list_t *parts; rd_kafka_resp_err_t err; test_msgver_t mv; TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); test_conf_init(NULL, &tconf, 60); test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); rk = test_create_consumer(__FUNCTION__, NULL, NULL, tconf); parts = rd_kafka_topic_partition_list_new(topic_cnt); for (i = 0 ; i < topic_cnt ; i++) rd_kafka_topic_partition_list_add(parts, topics[i], RD_KAFKA_PARTITION_UA); TEST_SAY("Subscribing to %d topics\n", topic_cnt); err = rd_kafka_subscribe(rk, parts); if (err) TEST_FAIL("subscribe() failed: %s\n", rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(parts); test_msgver_init(&mv, testid); test_consumer_poll("consume.subscribe", rk, testid, -1, 0, msgs_per_topic * topic_cnt, &mv); for (i = 0 ; i < topic_cnt ; i++) test_msgver_verify_part("subscribe", &mv, TEST_MSGVER_ALL_PART, topics[i], 0, i * msgs_per_topic, msgs_per_topic); test_msgver_clear(&mv); test_consumer_close(rk); rd_kafka_destroy(rk); }
/** * Create high-level consumer subscribing to \p topic from BEGINNING * and expects \d exp_msgcnt with matching \p testid * Destroys consumer when done. * * If \p group_id is NULL a new unique group is generated */ void test_consume_msgs_easy (const char *group_id, const char *topic, uint64_t testid, int exp_msgcnt) { rd_kafka_t *rk; rd_kafka_topic_conf_t *tconf; rd_kafka_resp_err_t err; rd_kafka_topic_partition_list_t *topics; char grpid0[64]; test_conf_init(NULL, &tconf, 0); if (!group_id) group_id = test_str_id_generate(grpid0, sizeof(grpid0)); test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); rk = test_create_consumer(group_id, NULL, tconf, NULL); rd_kafka_poll_set_consumer(rk); topics = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA); TEST_SAY("Subscribing to topic %s in group %s " "(expecting %d msgs with testid %"PRIu64")\n", topic, group_id, exp_msgcnt, testid); err = rd_kafka_subscribe(rk, topics); if (err) TEST_FAIL("Failed to subscribe to %s: %s\n", topic, rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(topics); /* Consume messages */ test_consumer_poll("consume.easy", rk, testid, -1, -1, exp_msgcnt); test_consumer_close(rk); rd_kafka_destroy(rk); }
/* @remark This test will fail if auto topic creation is enabled on the broker * since the client will issue a topic-creating metadata request to find * a new leader when the topic is removed. * * To run with trivup, do: * ./interactive_broker_version.py .. -conf '{"auto_create_topics":"false"}' .. * TESTS=0045 ./run-test.sh -k ./merged */ static void do_test_topic_remove (void) { char *topic_f = rd_strdup(test_mk_topic_name("topic_f", 1)); char *topic_g = rd_strdup(test_mk_topic_name("topic_g", 1)); int parts_f = 5; int parts_g = 9; rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *queue; rd_kafka_topic_partition_list_t *topics; rd_kafka_resp_err_t err; /** * Topic removal test: * - Create topic f & g * - Subscribe to f & g * - Verify f & g assignment * - Remove topic f * - Verify g assignment * - Remove topic g * - Verify empty assignment */ TEST_SAY("Topic removal testing\n"); test_conf_init(&conf, NULL, 60); /* Decrease metadata interval to speed up topic change discovery. */ test_conf_set(conf, "metadata.max.age.ms", "5000"); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL, NULL); queue = rd_kafka_queue_get_consumer(rk); TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_f); test_create_topic(topic_f, parts_f, 1); TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_g); test_create_topic(topic_g, parts_g, 1); rd_sleep(1); // FIXME: do check&wait loop instead TEST_SAY("Topic removal: Subscribing to %s & %s\n", topic_f, topic_g); topics = rd_kafka_topic_partition_list_new(2); rd_kafka_topic_partition_list_add(topics, topic_f, RD_KAFKA_PARTITION_UA); rd_kafka_topic_partition_list_add(topics, topic_g, RD_KAFKA_PARTITION_UA); err = rd_kafka_subscribe(rk, topics); TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, "%s", rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(topics); await_assignment("Topic removal: both topics exist", rk, queue, 2, topic_f, parts_f, topic_g, parts_g); TEST_SAY("Topic removal: removing %s\n", topic_f); test_kafka_topics("--delete --topic %s", topic_f); await_revoke("Topic removal: rebalance after topic removal", rk, queue); await_assignment("Topic removal: one topic exists", rk, queue, 1, topic_g, parts_g); TEST_SAY("Topic removal: removing %s\n", topic_g); test_kafka_topics("--delete --topic %s", topic_g); await_revoke("Topic removal: rebalance after 2nd topic removal", rk, queue); /* Should not see another rebalance since all topics now removed */ await_no_rebalance("Topic removal: empty", rk, queue, 10000); test_consumer_close(rk); rd_kafka_queue_destroy(queue); rd_kafka_destroy(rk); rd_free(topic_f); rd_free(topic_g); }
int main (int argc, char **argv) { char mode = 'C'; char *brokers = "localhost:9092"; int opt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; const char *debug = NULL; int do_conf_dump = 0; char tmp[16]; rd_kafka_resp_err_t err; char *group = NULL; rd_kafka_topic_partition_list_t *topics; int i; quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ conf = rd_kafka_conf_new(); /* Set logger */ rd_kafka_conf_set_log_cb(conf, logger); /* Quick termination */ snprintf(tmp, sizeof(tmp), "%i", SIGIO); rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); while ((opt = getopt(argc, argv, "g:b:qd:eX:As:DO")) != -1) { switch (opt) { case 'b': brokers = optarg; break; case 'g': group = optarg; break; case 'e': exit_eof = 1; break; case 'd': debug = optarg; break; case 'q': quiet = 1; break; case 'A': output = OUTPUT_RAW; break; case 'X': { char *name, *val; rd_kafka_conf_res_t res; if (!strcmp(optarg, "list") || !strcmp(optarg, "help")) { rd_kafka_conf_properties_show(stdout); exit(0); } if (!strcmp(optarg, "dump")) { do_conf_dump = 1; continue; } name = optarg; if (!(val = strchr(name, '='))) { fprintf(stderr, "%% Expected " "-X property=value, not %s\n", name); exit(1); } *val = '\0'; val++; res = RD_KAFKA_CONF_UNKNOWN; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ if (!strncmp(name, "topic.", strlen("topic."))) res = rd_kafka_topic_conf_set(topic_conf, name+ strlen("topic."), val, errstr, sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN) res = rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } } break; case 'D': case 'O': mode = opt; break; default: goto usage; } } if (do_conf_dump) { const char **arr; size_t cnt; int pass; for (pass = 0 ; pass < 2 ; pass++) { if (pass == 0) { arr = rd_kafka_conf_dump(conf, &cnt); printf("# Global config\n"); } else { printf("# Topic config\n"); arr = rd_kafka_topic_conf_dump(topic_conf, &cnt); } for (i = 0 ; i < (int)cnt ; i += 2) printf("%s = %s\n", arr[i], arr[i+1]); printf("\n"); rd_kafka_conf_dump_free(arr, cnt); } exit(0); } if (strchr("OC", mode) && optind == argc) { usage: fprintf(stderr, "Usage: %s [options] <topic[:part]> <topic[:part]>..\n" "\n" "librdkafka version %s (0x%08x)\n" "\n" " Options:\n" " -g <group> Consumer group (%s)\n" " -b <brokers> Broker address (%s)\n" " -e Exit consumer when last message\n" " in partition has been received.\n" " -D Describe group.\n" " -O Get commmitted offset(s)\n" " -d [facs..] Enable debugging contexts:\n" " %s\n" " -q Be quiet\n" " -A Raw payload output (consumer)\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" "\n" "\n", argv[0], rd_kafka_version_str(), rd_kafka_version(), group, brokers, RD_KAFKA_DEBUG_CONTEXTS); exit(1); } signal(SIGINT, stop); signal(SIGUSR1, sig_usr1); if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% Debug configuration failed: %s: %s\n", errstr, debug); exit(1); } /* * Client/Consumer group */ if (strchr("CO", mode)) { /* Consumer groups require a group id */ if (!group) group = "rdkafka_consumer_example"; if (rd_kafka_conf_set(conf, "group.id", group, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } /* Consumer groups always use broker based offset storage */ if (rd_kafka_topic_conf_set(topic_conf, "offset.store.method", "broker", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } /* Set default topic config for pattern-matched topics. */ rd_kafka_conf_set_default_topic_conf(conf, topic_conf); /* Callback called on partition assignment changes */ rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); } /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", errstr); exit(1); } rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } if (mode == 'D') { int r; /* Describe groups */ r = describe_groups(rk, group); rd_kafka_destroy(rk); exit(r == -1 ? 1 : 0); } /* Redirect rd_kafka_poll() to consumer_poll() */ rd_kafka_poll_set_consumer(rk); topics = rd_kafka_topic_partition_list_new(argc - optind); for (i = optind ; i < argc ; i++) { /* Parse "topic[:part] */ char *topic = argv[i]; char *t; int32_t partition = -1; if ((t = strstr(topic, ":"))) { *t = '\0'; partition = atoi(t+1); } rd_kafka_topic_partition_list_add(topics, topic, partition); } if (mode == 'O') { /* Offset query */ err = rd_kafka_position(rk, topics, 5000); if (err) { fprintf(stderr, "%% Failed to fetch offsets: %s\n", rd_kafka_err2str(err)); exit(1); } for (i = 0 ; i < topics->cnt ; i++) { rd_kafka_topic_partition_t *p = &topics->elems[i]; printf("Topic \"%s\" partition %"PRId32, p->topic, p->partition); if (p->err) printf(" error %s", rd_kafka_err2str(p->err)); else { printf(" offset %"PRId64"", p->offset); if (p->metadata_size) printf(" (%d bytes of metadata)", (int)p->metadata_size); } printf("\n"); } goto done; } if ((err = rd_kafka_subscribe(rk, topics))) { fprintf(stderr, "%% Failed to start consuming topics: %s\n", rd_kafka_err2str(err)); exit(1); } while (run) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consumer_poll(rk, 1000); if (rkmessage) { msg_consume(rkmessage, NULL); rd_kafka_message_destroy(rkmessage); } } done: err = rd_kafka_consumer_close(rk); if (err) fprintf(stderr, "%% Failed to close consumer: %s\n", rd_kafka_err2str(err)); else fprintf(stderr, "%% Consumer closed\n"); rd_kafka_topic_partition_list_destroy(topics); /* Destroy handle */ rd_kafka_destroy(rk); /* Let background threads clean up and terminate cleanly. */ run = 5; while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1) printf("Waiting for librdkafka to decommission\n"); if (run <= 0) rd_kafka_dump(stdout, rk); return 0; }
int main_0019_list_groups (int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); #define _CONS_CNT 2 char *groups[_CONS_CNT]; rd_kafka_t *rk, *rk_c[_CONS_CNT]; rd_kafka_topic_partition_list_t *topics; rd_kafka_resp_err_t err; test_timing_t t_grps; int i; int groups_seen; rd_kafka_topic_t *rkt; /* Handle for group listings */ rk = test_create_producer(); /* Produce messages so that topic is auto created */ rkt = test_create_topic_object(rk, topic, NULL); test_produce_msgs(rk, rkt, 0, 0, 0, 10, NULL, 64); rd_kafka_topic_destroy(rkt); /* Query groups before creation, should not list our groups. */ groups_seen = list_groups(rk, NULL, 0, "should be none"); if (groups_seen != 0) TEST_FAIL("Saw %d groups when there wasn't " "supposed to be any\n", groups_seen); /* Fill in topic subscription set */ topics = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(topics, topic, -1); /* Create consumers and start subscription */ for (i = 0 ; i < _CONS_CNT ; i++) { groups[i] = malloc(32); test_str_id_generate(groups[i], 32); rk_c[i] = test_create_consumer(groups[i], NULL, NULL, NULL); err = rd_kafka_poll_set_consumer(rk_c[i]); if (err) TEST_FAIL("poll_set_consumer: %s\n", rd_kafka_err2str(err)); err = rd_kafka_subscribe(rk_c[i], topics); if (err) TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err)); } rd_kafka_topic_partition_list_destroy(topics); TIMING_START(&t_grps, "WAIT.GROUPS"); /* Query groups again until both groups are seen. */ while (1) { int groups_seen = list_groups(rk, (char **)groups, _CONS_CNT, "should see my groups"); if (groups_seen == _CONS_CNT) break; rd_sleep(1); } TIMING_STOP(&t_grps); TEST_SAY("Closing remaining consumers\n"); for (i = 0 ; i < _CONS_CNT ; i++) { test_timing_t t_close; if (!rk_c[i]) continue; TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i])); TIMING_START(&t_close, "CONSUMER.CLOSE"); err = rd_kafka_consumer_close(rk_c[i]); TIMING_STOP(&t_close); if (err) TEST_FAIL("consumer_close failed: %s\n", rd_kafka_err2str(err)); rd_kafka_destroy(rk_c[i]); rk_c[i] = NULL; free(groups[i]); } rd_kafka_destroy(rk); return 0; }
int main_0006_symbols (int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); rd_kafka_get_debug_contexts(); rd_kafka_get_err_descs(NULL, NULL); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_errno2err(EINVAL); rd_kafka_errno(); rd_kafka_last_error(); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_conf_set_dr_cb(NULL, NULL); rd_kafka_conf_set_dr_msg_cb(NULL, NULL); rd_kafka_conf_set_error_cb(NULL, NULL); rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_log_cb(NULL, NULL); rd_kafka_conf_set_socket_cb(NULL, NULL); rd_kafka_conf_set_rebalance_cb(NULL, NULL); rd_kafka_conf_set_offset_commit_cb(NULL, NULL); rd_kafka_conf_set_throttle_cb(NULL, NULL); rd_kafka_conf_set_default_topic_conf(NULL, NULL); rd_kafka_conf_get(NULL, NULL, NULL, NULL); #ifndef _MSC_VER rd_kafka_conf_set_open_cb(NULL, NULL); #endif rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_opaque(NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); rd_kafka_conf_dump_free(NULL, 0); rd_kafka_conf_properties_show(NULL); rd_kafka_topic_conf_new(); rd_kafka_topic_conf_dup(NULL); rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); rd_kafka_topic_opaque(NULL); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); rd_kafka_name(NULL); rd_kafka_memberid(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); rd_kafka_message_timestamp(NULL, NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); rd_kafka_consume_batch(NULL, 0, 0, NULL, 0); rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL); rd_kafka_offset_store(NULL, 0, 0); rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL); rd_kafka_produce_batch(NULL, 0, 0, NULL, 0); rd_kafka_poll(NULL, 0); rd_kafka_brokers_add(NULL, NULL); /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */ rd_kafka_set_log_level(NULL, 0); rd_kafka_log_print(NULL, 0, NULL, NULL); #ifndef _MSC_VER rd_kafka_log_syslog(NULL, 0, NULL, NULL); #endif rd_kafka_outq_len(NULL); rd_kafka_dump(NULL, NULL); rd_kafka_thread_cnt(); rd_kafka_wait_destroyed(0); rd_kafka_metadata(NULL, 0, NULL, NULL, 0); rd_kafka_metadata_destroy(NULL); rd_kafka_queue_destroy(NULL); rd_kafka_consume_start_queue(NULL, 0, 0, NULL); rd_kafka_consume_queue(NULL, 0); rd_kafka_consume_batch_queue(NULL, 0, NULL, 0); rd_kafka_consume_callback_queue(NULL, 0, NULL, NULL); rd_kafka_seek(NULL, 0, 0, 0); rd_kafka_yield(NULL); rd_kafka_mem_free(NULL, NULL); rd_kafka_list_groups(NULL, NULL, NULL, 0); rd_kafka_group_list_destroy(NULL); /* KafkaConsumer API */ rd_kafka_subscribe(NULL, NULL); rd_kafka_unsubscribe(NULL); rd_kafka_subscription(NULL, NULL); rd_kafka_consumer_poll(NULL, 0); rd_kafka_consumer_close(NULL); rd_kafka_assign(NULL, NULL); rd_kafka_assignment(NULL, NULL); rd_kafka_commit(NULL, NULL, 0); rd_kafka_commit_message(NULL, NULL, 0); rd_kafka_committed(NULL, NULL, 0); rd_kafka_position(NULL, NULL); /* TopicPartition */ rd_kafka_topic_partition_list_new(0); rd_kafka_topic_partition_list_destroy(NULL); rd_kafka_topic_partition_list_add(NULL, NULL, 0); rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0); rd_kafka_topic_partition_list_del(NULL, NULL, 0); rd_kafka_topic_partition_list_del_by_idx(NULL, 0); rd_kafka_topic_partition_list_copy(NULL); rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0); rd_kafka_topic_partition_list_find(NULL, NULL, 0); rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0); rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL); } return 0; }
int main_0018_cgrp_term (int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); #define _CONS_CNT 2 rd_kafka_t *rk_p, *rk_c[_CONS_CNT]; rd_kafka_topic_t *rkt_p; int msg_cnt = 1000; int msg_base = 0; int partition_cnt = 2; int partition; uint64_t testid; rd_kafka_topic_conf_t *default_topic_conf; rd_kafka_topic_partition_list_t *topics; rd_kafka_resp_err_t err; test_timing_t t_assign, t_consume; char errstr[512]; int i; testid = test_id_generate(); /* Produce messages */ rk_p = test_create_producer(); rkt_p = test_create_producer_topic(rk_p, topic, NULL); for (partition = 0 ; partition < partition_cnt ; partition++) { test_produce_msgs(rk_p, rkt_p, testid, partition, msg_base+(partition*msg_cnt), msg_cnt, NULL, 0); } rd_kafka_topic_destroy(rkt_p); rd_kafka_destroy(rk_p); test_conf_init(NULL, &default_topic_conf, (test_session_timeout_ms * 3) / 1000); if (rd_kafka_topic_conf_set(default_topic_conf, "auto.offset.reset", "smallest", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); /* Fill in topic subscription set */ topics = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(topics, topic, -1); /* Create consumers and start subscription */ for (i = 0 ; i < _CONS_CNT ; i++) { rk_c[i] = test_create_consumer(topic/*group_id*/, rebalance_cb, NULL, rd_kafka_topic_conf_dup( default_topic_conf), NULL); err = rd_kafka_poll_set_consumer(rk_c[i]); if (err) TEST_FAIL("poll_set_consumer: %s\n", rd_kafka_err2str(err)); err = rd_kafka_subscribe(rk_c[i], topics); if (err) TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err)); } rd_kafka_topic_conf_destroy(default_topic_conf); rd_kafka_topic_partition_list_destroy(topics); /* Wait for both consumers to get an assignment */ TIMING_START(&t_assign, "WAIT.ASSIGN"); while (assign_cnt < _CONS_CNT) consume_all(rk_c, _CONS_CNT, msg_cnt, test_session_timeout_ms + 3000); TIMING_STOP(&t_assign); /* Now close one of the consumers, this will cause a rebalance. */ TEST_SAY("Closing down 1/%d consumer(s): %s\n", _CONS_CNT, rd_kafka_name(rk_c[0])); err = rd_kafka_consumer_close(rk_c[0]); if (err) TEST_FAIL("consumer_close failed: %s\n", rd_kafka_err2str(err)); rd_kafka_destroy(rk_c[0]); rk_c[0] = NULL; /* Let remaining consumers run for a while to take over the now * lost partitions. */ if (assign_cnt != _CONS_CNT-1) TEST_FAIL("assign_cnt %d, should be %d\n", assign_cnt, _CONS_CNT-1); TIMING_START(&t_consume, "CONSUME.WAIT"); consume_all(rk_c, _CONS_CNT, msg_cnt, test_session_timeout_ms + 3000); TIMING_STOP(&t_consume); TEST_SAY("Closing remaining consumers\n"); for (i = 0 ; i < _CONS_CNT ; i++) { test_timing_t t_close; rd_kafka_topic_partition_list_t *sub; int j; if (!rk_c[i]) continue; /* Query subscription */ err = rd_kafka_subscription(rk_c[i], &sub); if (err) TEST_FAIL("%s: subscription() failed: %s\n", rd_kafka_name(rk_c[i]), rd_kafka_err2str(err)); TEST_SAY("%s: subscription (%d):\n", rd_kafka_name(rk_c[i]), sub->cnt); for (j = 0 ; j < sub->cnt ; j++) TEST_SAY(" %s\n", sub->elems[j].topic); rd_kafka_topic_partition_list_destroy(sub); /* Run an explicit unsubscribe() (async) prior to close() * to trigger race condition issues on termination. */ TEST_SAY("Unsubscribing instance %s\n", rd_kafka_name(rk_c[i])); err = rd_kafka_unsubscribe(rk_c[i]); if (err) TEST_FAIL("%s: unsubscribe failed: %s\n", rd_kafka_name(rk_c[i]), rd_kafka_err2str(err)); TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i])); TIMING_START(&t_close, "CONSUMER.CLOSE"); err = rd_kafka_consumer_close(rk_c[i]); TIMING_STOP(&t_close); if (err) TEST_FAIL("consumer_close failed: %s\n", rd_kafka_err2str(err)); rd_kafka_destroy(rk_c[i]); rk_c[i] = NULL; } TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, msg_cnt); if (consumed_msg_cnt < msg_cnt) TEST_FAIL("Only %d/%d messages were consumed\n", consumed_msg_cnt, msg_cnt); else if (consumed_msg_cnt > msg_cnt) TEST_SAY("At least %d/%d messages were consumed " "multiple times\n", consumed_msg_cnt - msg_cnt, msg_cnt); return 0; }
static PyObject *Consumer_subscribe (Handle *self, PyObject *args, PyObject *kwargs) { rd_kafka_topic_partition_list_t *topics; static char *kws[] = { "topics", "on_assign", "on_revoke", NULL }; PyObject *tlist, *on_assign = NULL, *on_revoke = NULL; Py_ssize_t pos = 0; rd_kafka_resp_err_t err; if (!self->rk) { PyErr_SetString(PyExc_RuntimeError, "Consumer closed"); return NULL; } if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|OO", kws, &tlist, &on_assign, &on_revoke)) return NULL; if (!PyList_Check(tlist)) { PyErr_Format(PyExc_TypeError, "expected list of topic unicode strings"); return NULL; } if (on_assign && !PyCallable_Check(on_assign)) { PyErr_Format(PyExc_TypeError, "on_assign expects a callable"); return NULL; } if (on_revoke && !PyCallable_Check(on_revoke)) { PyErr_Format(PyExc_TypeError, "on_revoke expects a callable"); return NULL; } topics = rd_kafka_topic_partition_list_new((int)PyList_Size(tlist)); for (pos = 0 ; pos < PyList_Size(tlist) ; pos++) { PyObject *o = PyList_GetItem(tlist, pos); PyObject *uo, *uo8; if (!(uo = cfl_PyObject_Unistr(o))) { PyErr_Format(PyExc_TypeError, "expected list of unicode strings"); rd_kafka_topic_partition_list_destroy(topics); return NULL; } rd_kafka_topic_partition_list_add(topics, cfl_PyUnistr_AsUTF8(uo, &uo8), RD_KAFKA_PARTITION_UA); Py_XDECREF(uo8); Py_DECREF(uo); } err = rd_kafka_subscribe(self->rk, topics); rd_kafka_topic_partition_list_destroy(topics); if (err) { cfl_PyErr_Format(err, "Failed to set subscription: %s", rd_kafka_err2str(err)); return NULL; } /* * Update rebalance callbacks */ if (self->u.Consumer.on_assign) { Py_DECREF(self->u.Consumer.on_assign); self->u.Consumer.on_assign = NULL; } if (on_assign) { self->u.Consumer.on_assign = on_assign; Py_INCREF(self->u.Consumer.on_assign); } if (self->u.Consumer.on_revoke) { Py_DECREF(self->u.Consumer.on_revoke); self->u.Consumer.on_revoke = NULL; } if (on_revoke) { self->u.Consumer.on_revoke = on_revoke; Py_INCREF(self->u.Consumer.on_revoke); } Py_RETURN_NONE; }