Kafka::Kafka(std::string brokers, std::string topic) { std::string err_str; m_partition = RdKafka::Topic::PARTITION_UA; RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); conf->set("metadata.broker.list", brokers, err_str); MyEventCb my_event_cb; conf->set("event_cb", &my_event_cb, err_str); MyDeliveryReportCb my_dr_cb; conf->set("dr_cb", &my_dr_cb, err_str); k_producer = RdKafka::Producer::create(conf, err_str); if (!k_producer) { std::cerr << "Failed to create producer: " << err_str << std::endl; exit(1); } std::cout << "% Created producer " << k_producer->name() << std::endl; k_topic = RdKafka::Topic::create(k_producer, topic, tconf, err_str); if (!k_topic) { std::cerr << "Failed to create topic: " << err_str << std::endl; exit(1); } }
int main_0095_all_brokers_down (int argc, char **argv) { RdKafka::Conf *conf; std::string errstr; Test::conf_init(&conf, NULL, 20); /* Two broker addresses that will quickly reject the connection */ Test::conf_set(conf, "bootstrap.servers", "127.0.0.1:1,127.0.0.1:2"); /* * First test producer */ errorEventCb pEvent = errorEventCb(); if (conf->set("event_cb", &pEvent, errstr) != RdKafka::Conf::CONF_OK) Test::Fail(errstr); Test::Say("Test Producer\n"); RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); if (!p) Test::Fail("Failed to create Producer: " + errstr); /* Wait for all brokers down */ while (!pEvent.error_seen) p->poll(1000); delete p; /* * Test high-level consumer that has a logical broker (group coord), * which has caused AllBrokersDown generation problems (#2259) */ errorEventCb cEvent = errorEventCb(); Test::conf_set(conf, "group.id", "test"); if (conf->set("event_cb", &cEvent, errstr) != RdKafka::Conf::CONF_OK) Test::Fail(errstr); Test::Say("Test KafkaConsumer\n"); RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); if (!c) Test::Fail("Failed to create KafkaConsumer: " + errstr); /* Wait for all brokers down */ while (!cEvent.error_seen) { RdKafka::Message *m = c->consume(1000); if (m) delete m; } c->close(); delete c; return 0; }
void KafkaLogProducer::configure(){ //todo: modified with configmanager ConfigManager& configManager = ConfigManager::getInstance(); LogConfig* logConfig = (LogConfig*)configManager.get(CONFIG_LOG); topicName = logConfig->kafkaTopic; logKey = logConfig->kafkaKey; std::string brokers = logConfig->kafkaBroker; RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); std::string errstr; RdKafka::Conf::ConfResult res; if((res=conf->set("metadata.broker.list", brokers, errstr))!=RdKafka::Conf::CONF_OK || (res = conf->set("event_cb", &eventCb, errstr))!= RdKafka::Conf::CONF_OK || (res=conf->set("dr_cb", &drCb, errstr))!= RdKafka::Conf::CONF_OK || (res=conf->set("queue.buffering.max.messages",logConfig->kafkaMQMaxSize,errstr))!=RdKafka::Conf::CONF_OK|| (res=conf->set("message.send.max.retries","3",errstr))!= RdKafka::Conf::CONF_OK || (res=conf->set("retry.backoff.ms","500",errstr))!=RdKafka::Conf::CONF_OK){ LOG_ERROR<<"error occured when configuring kafka log producer,"<<errstr; throw LogClientException(errstr,-1); } producer = RdKafka::Producer::create(conf, errstr); if (!producer) { LOG_ERROR<<"error occured when configuring kafka log producer" << errstr; throw LogClientException(errstr,-1); } topic = RdKafka::Topic::create(producer, topicName, tconf, errstr); if (!topic) { LOG_ERROR<<"error occured when configuring kafka log producer" << errstr; throw LogClientException(errstr,-1); } }
int main (int argc, char **argv) { std::string brokers = "localhost"; std::string errstr; std::vector<std::string> topics; std::string conf_file; std::string mode = "P"; int throughput = 0; int32_t partition = RdKafka::Topic::PARTITION_UA; bool do_conf_dump = false; MyHashPartitionerCb hash_partitioner; /* * Create configuration objects */ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); { char hostname[128]; gethostname(hostname, sizeof(hostname)-1); conf->set("client.id", std::string("rdkafka@") + hostname, errstr); } conf->set("debug", "cgrp,topic", errstr); for (int i = 1 ; i < argc ; i++) { const char *name = argv[i]; const char *val = i+1 < argc ? argv[i+1] : NULL; if (val && !strncmp(val, "--", 2)) val = NULL; std::cout << now() << ": argument: " << name << " " << (val?val:"") << std::endl; if (val) { if (!strcmp(name, "--topic")) topics.push_back(val); else if (!strcmp(name, "--broker-list")) brokers = val; else if (!strcmp(name, "--max-messages")) state.maxMessages = atoi(val); else if (!strcmp(name, "--throughput")) throughput = atoi(val); else if (!strcmp(name, "--producer.config") || !strcmp(name, "--consumer.config")) read_conf_file(val); else if (!strcmp(name, "--group-id")) conf->set("group.id", val, errstr); else if (!strcmp(name, "--session-timeout")) conf->set("session.timeout.ms", val, errstr); else if (!strcmp(name, "--reset-policy")) { if (tconf->set("auto.offset.reset", val, errstr)) { std::cerr << now() << ": " << errstr << std::endl; exit(1); } } else if (!strcmp(name, "--debug")) { conf->set("debug", val, errstr); } else { std::cerr << now() << ": Unknown option " << name << std::endl; exit(1); } i++; } else { if (!strcmp(name, "--consumer")) mode = "C"; else if (!strcmp(name, "--producer")) mode = "P"; else if (!strcmp(name, "--enable-autocommit")) { state.consumer.useAutoCommit = true; conf->set("enable.auto.commit", "true", errstr); } else { std::cerr << now() << ": Unknown option or missing argument to " << name << std::endl; exit(1); } } } if (topics.empty() || brokers.empty()) { std::cerr << now() << ": Missing --topic and --broker-list" << std::endl; exit(1); } /* * Set configuration properties */ conf->set("metadata.broker.list", brokers, errstr); ExampleEventCb ex_event_cb; conf->set("event_cb", &ex_event_cb, errstr); if (do_conf_dump) { int pass; for (pass = 0 ; pass < 2 ; pass++) { std::list<std::string> *dump; if (pass == 0) { dump = conf->dump(); std::cerr << now() << ": # Global config" << std::endl; } else { dump = tconf->dump(); std::cerr << now() << ": # Topic config" << std::endl; } for (std::list<std::string>::iterator it = dump->begin(); it != dump->end(); ) { std::cerr << *it << " = "; it++; std::cerr << *it << std::endl; it++; } std::cerr << std::endl; } exit(0); } signal(SIGINT, sigterm); signal(SIGTERM, sigterm); signal(SIGALRM, sigwatchdog); if (mode == "P") { /* * Producer mode */ ExampleDeliveryReportCb ex_dr_cb; /* Set delivery report callback */ conf->set("dr_cb", &ex_dr_cb, errstr); /* * Create producer using accumulated global configuration. */ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); if (!producer) { std::cerr << now() << ": Failed to create producer: " << errstr << std::endl; exit(1); } std::cerr << now() << ": % Created producer " << producer->name() << std::endl; /* * Create topic handle. */ RdKafka::Topic *topic = RdKafka::Topic::create(producer, topics[0], tconf, errstr); if (!topic) { std::cerr << now() << ": Failed to create topic: " << errstr << std::endl; exit(1); } static const int delay_us = throughput ? 1000000/throughput : 0; if (state.maxMessages == -1) state.maxMessages = 1000000; /* Avoid infinite produce */ for (int i = 0 ; run && i < state.maxMessages ; i++) { /* * Produce message */ std::ostringstream msg; msg << i; RdKafka::ErrorCode resp = producer->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY /* Copy payload */, const_cast<char *>(msg.str().c_str()), msg.str().size(), NULL, NULL); if (resp != RdKafka::ERR_NO_ERROR) { errorString("producer_send_error", RdKafka::err2str(resp), topic->name(), NULL, msg.str()); state.producer.numErr++; } else { std::cerr << now() << ": % Produced message (" << msg.str().size() << " bytes)" << std::endl; state.producer.numSent++; } producer->poll(delay_us / 1000); watchdog_kick(); } run = true; while (run && producer->outq_len() > 0) { std::cerr << now() << ": Waiting for " << producer->outq_len() << std::endl; producer->poll(50); watchdog_kick(); } std::cerr << now() << ": " << state.producer.numAcked << "/" << state.producer.numSent << "/" << state.maxMessages << " msgs acked/sent/max, " << state.producer.numErr << " errored" << std::endl; delete topic; delete producer; } else if (mode == "C") { /* * Consumer mode */ tconf->set("auto.offset.reset", "smallest", errstr); /* Set default topic config */ conf->set("default_topic_conf", tconf, errstr); ExampleRebalanceCb ex_rebalance_cb; conf->set("rebalance_cb", &ex_rebalance_cb, errstr); ExampleOffsetCommitCb ex_offset_commit_cb; conf->set("offset_commit_cb", &ex_offset_commit_cb, errstr); /* * Create consumer using accumulated global configuration. */ consumer = RdKafka::KafkaConsumer::create(conf, errstr); if (!consumer) { std::cerr << now() << ": Failed to create consumer: " << errstr << std::endl; exit(1); } std::cerr << now() << ": % Created consumer " << consumer->name() << std::endl; /* * Subscribe to topic(s) */ RdKafka::ErrorCode resp = consumer->subscribe(topics); if (resp != RdKafka::ERR_NO_ERROR) { std::cerr << now() << ": Failed to subscribe to " << topics.size() << " topics: " << RdKafka::err2str(resp) << std::endl; exit(1); } /* * Consume messages */ while (run) { RdKafka::Message *msg = consumer->consume(500); msg_consume(consumer, msg, NULL); delete msg; watchdog_kick(); } /* Final commit */ do_commit(consumer, 1); /* * Stop consumer */ consumer->close(); delete consumer; } /* * Wait for RdKafka to decommission. * This is not strictly needed (when check outq_len() above), but * allows RdKafka to clean up all its resources before the application * exits so that memory profilers such as valgrind wont complain about * memory leaks. */ RdKafka::wait_destroyed(5000); std::cerr << now() << ": EXITING WITH RETURN VALUE 0" << std::endl; return 0; }
int main (int argc, char **argv) { std::string brokers = "localhost"; std::string errstr; std::string topic_str; std::string mode; std::string debug; std::vector<std::string> topics; bool do_conf_dump = false; int opt; int use_ccb = 0; /* * Create configuration objects */ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); ExampleRebalanceCb ex_rebalance_cb; conf->set("rebalance_cb", &ex_rebalance_cb, errstr); while ((opt = getopt(argc, argv, "g:b:z:qd:eX:AM:f:qv")) != -1) { switch (opt) { case 'g': if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } break; case 'b': brokers = optarg; break; case 'z': if (conf->set("compression.codec", optarg, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } break; case 'e': exit_eof = true; break; case 'd': debug = optarg; break; case 'M': if (conf->set("statistics.interval.ms", optarg, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } break; case 'X': { char *name, *val; if (!strcmp(optarg, "dump")) { do_conf_dump = true; continue; } name = optarg; if (!(val = strchr(name, '='))) { std::cerr << "%% Expected -X property=value, not " << name << std::endl; exit(1); } *val = '\0'; val++; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ RdKafka::Conf::ConfResult res = RdKafka::Conf::CONF_UNKNOWN; if (!strncmp(name, "topic.", strlen("topic."))) res = tconf->set(name+strlen("topic."), val, errstr); if (res == RdKafka::Conf::CONF_UNKNOWN) res = conf->set(name, val, errstr); if (res != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } } break; case 'f': if (!strcmp(optarg, "ccb")) use_ccb = 1; else { std::cerr << "Unknown option: " << optarg << std::endl; exit(1); } break; case 'q': verbosity--; break; case 'v': verbosity++; break; default: goto usage; } } for (; optind < argc ; optind++) topics.push_back(std::string(argv[optind])); if (topics.empty() || optind != argc) { usage: fprintf(stderr, "Usage: %s -g <group-id> [options] topic1 topic2..\n" "\n" "librdkafka version %s (0x%08x)\n" "\n" " Options:\n" " -g <group-id> Consumer group id\n" " -b <brokers> Broker address (localhost:9092)\n" " -z <codec> Enable compression:\n" " none|gzip|snappy\n" " -e Exit consumer when last message\n" " in partition has been received.\n" " -d [facs..] Enable debugging contexts:\n" " %s\n" " -M <intervalms> Enable statistics\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" " -f <flag> Set option:\n" " ccb - use consume_callback\n" " -q Quiet / Decrease verbosity\n" " -v Increase verbosity\n" "\n" "\n", argv[0], RdKafka::version_str().c_str(), RdKafka::version(), RdKafka::get_debug_contexts().c_str()); exit(1); } /* * Set configuration properties */ conf->set("metadata.broker.list", brokers, errstr); if (!debug.empty()) { if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } } ExampleEventCb ex_event_cb; conf->set("event_cb", &ex_event_cb, errstr); conf->set("default_topic_conf", tconf, errstr); delete tconf; if (do_conf_dump) { int pass; for (pass = 0 ; pass < 2 ; pass++) { std::list<std::string> *dump; if (pass == 0) { dump = conf->dump(); std::cout << "# Global config" << std::endl; } else { dump = tconf->dump(); std::cout << "# Topic config" << std::endl; } for (std::list<std::string>::iterator it = dump->begin(); it != dump->end(); ) { std::cout << *it << " = "; it++; std::cout << *it << std::endl; it++; } std::cout << std::endl; } exit(0); } signal(SIGINT, sigterm); signal(SIGTERM, sigterm); /* * Consumer mode */ /* * Create consumer using accumulated global configuration. */ RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr); if (!consumer) { std::cerr << "Failed to create consumer: " << errstr << std::endl; exit(1); } delete conf; std::cout << "% Created consumer " << consumer->name() << std::endl; /* * Subscribe to topics */ RdKafka::ErrorCode err = consumer->subscribe(topics); if (err) { std::cerr << "Failed to subscribe to " << topics.size() << " topics: " << RdKafka::err2str(err) << std::endl; exit(1); } /* * Consume messages */ while (run) { if (use_ccb) { std::cerr << "Use callback: Not implemented" << std::endl; break; } RdKafka::Message *msg = consumer->consume(1000); msg_consume(msg, NULL); delete msg; } alarm(10); /* * Stop consumer */ consumer->close(); delete consumer; std::cerr << "% Consumed " << msg_cnt << " messages (" << msg_bytes << " bytes)" << std::endl; /* * Wait for RdKafka to decommission. * This is not strictly needed (with check outq_len() above), but * allows RdKafka to clean up all its resources before the application * exits so that memory profilers such as valgrind wont complain about * memory leaks. */ RdKafka::wait_destroyed(5000); return 0; }
int main (int argc, char **argv) { std::string brokers = "localhost"; std::string errstr; std::string topic_str; std::string mode; std::string debug; int32_t partition = RdKafka::Topic::PARTITION_UA; int64_t start_offset = RdKafka::Topic::OFFSET_BEGINNING; bool do_conf_dump = false; int opt; MyHashPartitionerCb hash_partitioner; int use_ccb = 0; /* * Create configuration objects */ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:AM:f:")) != -1) { switch (opt) { case 'P': case 'C': case 'L': mode = opt; break; case 't': topic_str = optarg; break; case 'p': if (!strcmp(optarg, "random")) /* default */; else if (!strcmp(optarg, "hash")) { if (tconf->set("partitioner_cb", &hash_partitioner, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } } else partition = std::atoi(optarg); break; case 'b': brokers = optarg; break; case 'z': if (conf->set("compression.codec", optarg, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } break; case 'o': if (!strcmp(optarg, "end")) start_offset = RdKafka::Topic::OFFSET_END; else if (!strcmp(optarg, "beginning")) start_offset = RdKafka::Topic::OFFSET_BEGINNING; else if (!strcmp(optarg, "stored")) start_offset = RdKafka::Topic::OFFSET_STORED; else start_offset = strtoll(optarg, NULL, 10); break; case 'e': exit_eof = true; break; case 'd': debug = optarg; break; case 'M': if (conf->set("statistics.interval.ms", optarg, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } break; case 'X': { char *name, *val; if (!strcmp(optarg, "dump")) { do_conf_dump = true; continue; } name = optarg; if (!(val = strchr(name, '='))) { std::cerr << "%% Expected -X property=value, not " << name << std::endl; exit(1); } *val = '\0'; val++; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ RdKafka::Conf::ConfResult res; if (!strncmp(name, "topic.", strlen("topic."))) res = tconf->set(name+strlen("topic."), val, errstr); else res = conf->set(name, val, errstr); if (res != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } } break; case 'f': if (!strcmp(optarg, "ccb")) use_ccb = 1; else { std::cerr << "Unknown option: " << optarg << std::endl; exit(1); } break; default: goto usage; } } if (mode.empty() || (topic_str.empty() && mode != "L") || optind != argc) { usage: std::string features; conf->get("builtin.features", features); fprintf(stderr, "Usage: %s [-C|-P] -t <topic> " "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n" "\n" "librdkafka version %s (0x%08x, builtin.features \"%s\")\n" "\n" " Options:\n" " -C | -P Consumer or Producer mode\n" " -L Metadata list mode\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (random partitioner)\n" " -p <func> Use partitioner:\n" " random (default), hash\n" " -b <brokers> Broker address (localhost:9092)\n" " -z <codec> Enable compression:\n" " none|gzip|snappy\n" " -o <offset> Start offset (consumer)\n" " -e Exit consumer when last message\n" " in partition has been received.\n" " -d [facs..] Enable debugging contexts:\n" " %s\n" " -M <intervalms> Enable statistics\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" " -f <flag> Set option:\n" " ccb - use consume_callback\n" "\n" " In Consumer mode:\n" " writes fetched messages to stdout\n" " In Producer mode:\n" " reads messages from stdin and sends to broker\n" "\n" "\n" "\n", argv[0], RdKafka::version_str().c_str(), RdKafka::version(), features.c_str(), RdKafka::get_debug_contexts().c_str()); exit(1); } /* * Set configuration properties */ conf->set("metadata.broker.list", brokers, errstr); if (!debug.empty()) { if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } } ExampleEventCb ex_event_cb; conf->set("event_cb", &ex_event_cb, errstr); if (do_conf_dump) { int pass; for (pass = 0 ; pass < 2 ; pass++) { std::list<std::string> *dump; if (pass == 0) { dump = conf->dump(); std::cout << "# Global config" << std::endl; } else { dump = tconf->dump(); std::cout << "# Topic config" << std::endl; } for (std::list<std::string>::iterator it = dump->begin(); it != dump->end(); ) { std::cout << *it << " = "; it++; std::cout << *it << std::endl; it++; } std::cout << std::endl; } exit(0); } signal(SIGINT, sigterm); signal(SIGTERM, sigterm); if (mode == "P") { /* * Producer mode */ if(topic_str.empty()) goto usage; ExampleDeliveryReportCb ex_dr_cb; /* Set delivery report callback */ conf->set("dr_cb", &ex_dr_cb, errstr); /* * Create producer using accumulated global configuration. */ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); if (!producer) { std::cerr << "Failed to create producer: " << errstr << std::endl; exit(1); } std::cout << "% Created producer " << producer->name() << std::endl; /* * Create topic handle. */ RdKafka::Topic *topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr); if (!topic) { std::cerr << "Failed to create topic: " << errstr << std::endl; exit(1); } /* * Read messages from stdin and produce to broker. */ for (std::string line; run && std::getline(std::cin, line);) { if (line.empty()) { producer->poll(0); continue; } /* * Produce message */ RdKafka::ErrorCode resp = producer->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY /* Copy payload */, const_cast<char *>(line.c_str()), line.size(), NULL, NULL); if (resp != RdKafka::ERR_NO_ERROR) std::cerr << "% Produce failed: " << RdKafka::err2str(resp) << std::endl; else std::cerr << "% Produced message (" << line.size() << " bytes)" << std::endl; producer->poll(0); } run = true; while (run && producer->outq_len() > 0) { std::cerr << "Waiting for " << producer->outq_len() << std::endl; producer->poll(1000); } delete topic; delete producer; } else if (mode == "C") { /* * Consumer mode */ if(topic_str.empty()) goto usage; /* * Create consumer using accumulated global configuration. */ RdKafka::Consumer *consumer = RdKafka::Consumer::create(conf, errstr); if (!consumer) { std::cerr << "Failed to create consumer: " << errstr << std::endl; exit(1); } std::cout << "% Created consumer " << consumer->name() << std::endl; /* * Create topic handle. */ RdKafka::Topic *topic = RdKafka::Topic::create(consumer, topic_str, tconf, errstr); if (!topic) { std::cerr << "Failed to create topic: " << errstr << std::endl; exit(1); } /* * Start consumer for topic+partition at start offset */ RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset); if (resp != RdKafka::ERR_NO_ERROR) { std::cerr << "Failed to start consumer: " << RdKafka::err2str(resp) << std::endl; exit(1); } ExampleConsumeCb ex_consume_cb; /* * Consume messages */ while (run) { if (use_ccb) { consumer->consume_callback(topic, partition, 1000, &ex_consume_cb, &use_ccb); } else { RdKafka::Message *msg = consumer->consume(topic, partition, 1000); msg_consume(msg, NULL); delete msg; } consumer->poll(0); } /* * Stop consumer */ consumer->stop(topic, partition); consumer->poll(1000); delete topic; delete consumer; } else { /* Metadata mode */ /* * Create producer using accumulated global configuration. */ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); if (!producer) { std::cerr << "Failed to create producer: " << errstr << std::endl; exit(1); } std::cout << "% Created producer " << producer->name() << std::endl; /* * Create topic handle. */ RdKafka::Topic *topic = NULL; if(!topic_str.empty()) { topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr); if (!topic) { std::cerr << "Failed to create topic: " << errstr << std::endl; exit(1); } } while (run) { class RdKafka::Metadata *metadata; /* Fetch metadata */ RdKafka::ErrorCode err = producer->metadata(topic!=NULL, topic, &metadata, 5000); if (err != RdKafka::ERR_NO_ERROR) { std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err) << std::endl; run = 0; break; } metadata_print(topic_str, metadata); delete metadata; run = 0; } } /* * Wait for RdKafka to decommission. * This is not strictly needed (when check outq_len() above), but * allows RdKafka to clean up all its resources before the application * exits so that memory profilers such as valgrind wont complain about * memory leaks. */ RdKafka::wait_destroyed(5000); return 0; }
void Consumer::ensureSetup() { if (!topicPtr.load(std::memory_order_acquire)) { CriticalBlock block(lock); if (!topicPtr.load(std::memory_order_relaxed)) { initFileOffsetIfNotExist(); std::string errStr; RdKafka::Conf* globalConfig = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); if (globalConfig) { // Set global configuration parameters, used mainly at the consumer level globalConfig->set("metadata.broker.list", brokers, errStr); globalConfig->set("compression.codec", "snappy", errStr); globalConfig->set("queued.max.messages.kbytes", "10000000", errStr); globalConfig->set("fetch.message.max.bytes", "10000000", errStr); // Set any global configurations from file, allowing // overrides of above settings applyConfig("kafka_global.conf", globalConfig, traceLevel); // Set consumer callbacks globalConfig->set("event_cb", static_cast<RdKafka::EventCb*>(this), errStr); // Create the consumer consumerPtr = RdKafka::Consumer::create(globalConfig, errStr); if (consumerPtr) { RdKafka::Conf* topicConfPtr = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); // Set the per-topic configuration parameters topicConfPtr->set("group.id", consumerGroup, errStr); topicConfPtr->set("auto.offset.reset", "smallest", errStr); // Set any topic configurations from file, allowing // overrides of above settings std::string confName = "kafka_consumer_topic_" + topic + ".conf"; applyConfig(confName.c_str(), topicConfPtr, traceLevel); // Ensure that some items are set a certain way // by setting them after loading the external conf topicConfPtr->set("auto.commit.enable", "false", errStr); // Create the topic topicPtr.store(RdKafka::Topic::create(consumerPtr, topic, topicConfPtr, errStr), std::memory_order_release); if (!topicPtr) { throw MakeStringException(-1, "Kafka: Unable to create consumer topic object for topic '%s'; error: '%s'", topic.c_str(), errStr.c_str()); } } else { throw MakeStringException(-1, "Kafka: Unable to create consumer object for brokers '%s'; error: '%s'", brokers.c_str(), errStr.c_str()); } } else { throw MakeStringException(-1, "Kafka: Unable to create consumer global configuration object for brokers '%s'; error: '%s'", brokers.c_str(), errStr.c_str()); } } } }
void Publisher::ensureSetup() { if (!topicPtr.load(std::memory_order_acquire)) { CriticalBlock block(lock); if (!topicPtr.load(std::memory_order_relaxed)) { std::string errStr; RdKafka::Conf* globalConfig = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); if (globalConfig) { // Set global configuration parameters, used mainly at the producer level globalConfig->set("metadata.broker.list", brokers, errStr); globalConfig->set("queue.buffering.max.messages", "1000000", errStr); globalConfig->set("compression.codec", "snappy", errStr); globalConfig->set("message.send.max.retries", "3", errStr); globalConfig->set("retry.backoff.ms", "500", errStr); // Set any global configurations from file, allowing // overrides of above settings applyConfig("kafka_global.conf", globalConfig, traceLevel); // Set producer callbacks globalConfig->set("event_cb", static_cast<RdKafka::EventCb*>(this), errStr); globalConfig->set("dr_cb", static_cast<RdKafka::DeliveryReportCb*>(this), errStr); // Create the producer producerPtr = RdKafka::Producer::create(globalConfig, errStr); if (producerPtr) { RdKafka::Conf* topicConfPtr = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); // Set any topic configurations from file std::string confName = "kafka_publisher_topic_" + topic + ".conf"; applyConfig(confName.c_str(), topicConfPtr, traceLevel); // Create the topic topicPtr.store(RdKafka::Topic::create(producerPtr, topic, topicConfPtr, errStr), std::memory_order_release); if (topicPtr) { // Start the attached background poller pollerPtr->start(); } else { throw MakeStringException(-1, "Kafka: Unable to create producer topic object for topic '%s'; error: '%s'", topic.c_str(), errStr.c_str()); } } else { throw MakeStringException(-1, "Kafka: Unable to create producer object for brokers '%s'; error: '%s'", brokers.c_str(), errStr.c_str()); } } else { throw MakeStringException(-1, "Kafka: Unable to create producer global configuration object for brokers '%s'; error: '%s'", brokers.c_str(), errStr.c_str()); } } } }
/** * Generate unique topic name (there is a C function for that in test.h wihch you should use) * Query metadata for that topic * Wait one second * Query again, it should now have isrs and everything */ static void test_metadata_cpp (void) { RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); /* @TODO: Do we need to merge with C test_conf_init()? */ RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); /* @TODO: Same of prev */ RdKafka::Metadata *metadata; RdKafka::ErrorCode err; int msgcnt = 10000; int partition_cnt = 2; int i; uint64_t testid; int msg_base = 0; std::string errstr; const char *topic_str = test_mk_topic_name("0013", 1); /* if(!topic){ TEST_FAIL() }*/ //const RdKafka::Conf::ConfResult confResult = conf->set("debug","all",errstr); //if(confResult != RdKafka::Conf::CONF_OK){ // std::stringstream errstring; // errstring << "Can't set config" << errstr; // TEST_FAIL(errstring.str().c_str()); //} TEST_SAY("Topic %s.\n", topic_str); const RdKafka::Conf::ConfResult confBrokerResult = conf->set("metadata.broker.list", "localhost:9092", errstr); if(confBrokerResult != RdKafka::Conf::CONF_OK){ std::stringstream errstring; errstring << "Can't set broker" << errstr; TEST_FAIL(errstring.str().c_str()); } /* Create a producer to fetch metadata */ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); if (!producer) { std::stringstream errstring; errstring << "Can't create producer" << errstr; TEST_FAIL(errstring.str().c_str()); } /* * Create topic handle. */ RdKafka::Topic *topic = NULL; topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr); if (!topic) { std::stringstream errstring; errstring << "Can't create topic" << errstr; exit(1); } /* First request of metadata: It have to fail */ err = producer->metadata(topic!=NULL, topic, &metadata, 5000); if (err != RdKafka::ERR_NO_ERROR) { std::stringstream errstring; errstring << "Can't request first metadata: " << errstr; TEST_FAIL(errstring.str().c_str()); } /* It's a new topic, it should have no partitions */ if(metadata->topics()->at(0)->partitions()->size() != 0){ TEST_FAIL("ISRS != 0"); } sleep(1); /* Second request of metadata: It have to success */ err = producer->metadata(topic!=NULL, topic, &metadata, 5000); /* It should have now partitions */ if(metadata->topics()->at(0)->partitions()->size() == 0){ TEST_FAIL("ISRS == 0"); } delete topic; delete producer; delete tconf; delete conf; /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(10); /* If we havent failed at this point then * there were no threads leaked */ return; }
static void do_test_consumer_lag (void) { const int msgcnt = 10; std::string errstr; RdKafka::ErrorCode err; topic = Test::mk_topic_name("0061-consumer_lag", 1); test_produce_msgs_easy(topic.c_str(), 0, 0, msgcnt); /* * Create consumer */ /* Create consumer */ RdKafka::Conf *conf; Test::conf_init(&conf, NULL, 10); StatsCb stats; if (conf->set("event_cb", &stats, errstr) != RdKafka::Conf::CONF_OK) Test::Fail("set event_cb failed: " + errstr); Test::conf_set(conf, "group.id", topic); Test::conf_set(conf, "enable.auto.commit", "false"); Test::conf_set(conf, "enable.partition.eof", "false"); Test::conf_set(conf, "auto.offset.reset", "earliest"); Test::conf_set(conf, "statistics.interval.ms", "100"); RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); if (!c) Test::Fail("Failed to create KafkaConsumer: " + errstr); delete conf; /* Assign partitions */ /* Subscribe */ std::vector<RdKafka::TopicPartition*> parts; parts.push_back(RdKafka::TopicPartition::create(topic, 0)); if ((err = c->assign(parts))) Test::Fail("assign failed: " + RdKafka::err2str(err)); RdKafka::TopicPartition::destroy(parts); /* Start consuming */ Test::Say("Consuming topic " + topic + "\n"); int cnt = 0; while (cnt < msgcnt) { RdKafka::Message *msg = c->consume(tmout_multip(1000)); switch (msg->err()) { case RdKafka::ERR__TIMED_OUT: break; case RdKafka::ERR__PARTITION_EOF: Test::Fail(tostr() << "Consume error after " << cnt << "/" << msgcnt << " messages: " << msg->errstr()); break; case RdKafka::ERR_NO_ERROR: /* Proper message. Update calculated lag for later * checking in stats callback */ stats.calc_lag = msgcnt - (msg->offset()+1); cnt++; Test::Say(2, tostr() << "Received message #" << cnt << "/" << msgcnt << " at offset " << msg->offset() << " (calc lag " << stats.calc_lag << ")\n"); /* Slow down message "processing" to make sure we get * at least one stats callback per message. */ if (cnt < msgcnt) rd_sleep(1); break; default: Test::Fail("Consume error: " + msg->errstr()); break; } delete msg; } Test::Say(tostr() << "Done, lag was valid " << stats.lag_valid << " times\n"); if (stats.lag_valid == 0) Test::Fail("No valid consumer_lag in statistics seen"); c->close(); delete c; }
void kafka_writer::set_brokers(std::string brokers) { std::string errstr; conf->set("metadata.broker.list", brokers, errstr); }