void Publisher::dr_cb (RdKafka::Message& message) { if (message.err() != RdKafka::ERR_NO_ERROR) { StringBuffer payloadStr; if (message.len() == 0) payloadStr.append("<no message>"); else payloadStr.append(message.len(), static_cast<const char*>(message.payload())); DBGLOG("Kafka: Error publishing message: %d (%s); message: '%s'", message.err(), message.errstr().c_str(), payloadStr.str()); } }
void dr_cb (RdKafka::Message &message) { if (message.err()) { state.producer.numErr++; errorString("producer_send_error", message.errstr(), message.topic_name(), message.key(), std::string(static_cast<const char*>(message.payload()), message.len())); } else { successString("producer_send_success", message.topic_name(), (int)message.partition(), message.offset(), message.key(), std::string(static_cast<const char*>(message.payload()), message.len())); state.producer.numAcked++; } }
void LogDeliverReportCb::dr_cb(RdKafka::Message &message){ if(message.err()!=ERR_NO_ERROR && !needRecover){ //kafka 发送发生错误 DebugMessageWithTime("error occured in kafka,err",message.errstr()," errCode:",message.err()); LogPusherPtr logPusher = LogPusher::getLogger(MTTY_SERVICE_LOGGER); logPusher->setWorkMode(true); const char* payload = (const char*)message.payload(); #if defined(USE_ALIYUN_LOG) log::Message msg(message.topic_name(),"",std::string(payload,payload+message.len())); #else log::Message msg(message.topic_name(),std::string(payload,payload+message.len())); #endif //logPusher->startRemoteMonitor(msg); needRecover = true; }else if(message.err()==ERR_NO_ERROR && needRecover){ //kafka 错误恢复 DebugMessageWithTime("kafka error recover,continue to work"); needRecover = false; LogPusherPtr logPusher = LogPusher::getLogger(MTTY_SERVICE_LOGGER); logPusher->setWorkMode(false); } }
//services void dr_cb( rdkafka::Message &message ) { switch ( message.err() ) { case rdkafka::ERR__TIMED_OUT: break; case rdkafka::ERR_NO_ERROR: { //real message m_tym->color( stamp_color::blue ); m_tym->time_stamp(); std::cerr << "DELIVERY: read msg at offset " << message.offset() << "\n"; if ( message.key() ) { m_tym->time_stamp(); std::cerr << "key: " << message.key() << std::endl; } m_tym->time_stamp(); std::cerr << "message-len:" << message.len() << " " << static_cast<const char *>( message.payload() ) << "\n"; m_tym->clear_color(); break; } case rdkafka::ERR__PARTITION_EOF: { //last message //if ( m_eof ) { // m_run = false; break; } default: { std::cerr << "consume failed: " << message.errstr() << std::endl; m_run = false; } } }
static void do_test_null_empty (bool api_version_request) { std::string topic = Test::mk_topic_name("0070_null_empty", 1); const int partition = 0; Test::Say(tostr() << "Testing with api.version.request=" << api_version_request << " on topic " << topic << " partition " << partition << "\n"); RdKafka::Conf *conf; Test::conf_init(&conf, NULL, 0); Test::conf_set(conf, "api.version.request", api_version_request ? "true" : "false"); Test::conf_set(conf, "acks", "all"); std::string errstr; RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); if (!p) Test::Fail("Failed to create Producer: " + errstr); delete conf; const int msgcnt = 8; static const char *msgs[msgcnt*2] = { NULL, NULL, "key2", NULL, "key3", "val3", NULL, "val4", "", NULL, NULL, "", "", "" }; RdKafka::ErrorCode err; for (int i = 0 ; i < msgcnt * 2 ; i += 2) { Test::Say(3, tostr() << "Produce message #" << (i/2) << ": key=\"" << (msgs[i] ? msgs[i] : "Null") << "\", value=\"" << (msgs[i+1] ? msgs[i+1] : "Null") << "\"\n"); err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, /* Value */ (void *)msgs[i+1], msgs[i+1] ? strlen(msgs[i+1]) : 0, /* Key */ (void *)msgs[i], msgs[i] ? strlen(msgs[i]) : 0, 0, NULL); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("Produce failed: " + RdKafka::err2str(err)); } if (p->flush(tmout_multip(3*5000)) != 0) Test::Fail("Not all messages flushed"); Test::Say(tostr() << "Produced " << msgcnt << " messages to " << topic << "\n"); delete p; /* * Now consume messages from the beginning, making sure they match * what was produced. */ /* Create consumer */ Test::conf_init(&conf, NULL, 10); Test::conf_set(conf, "group.id", topic); Test::conf_set(conf, "api.version.request", api_version_request ? "true" : "false"); Test::conf_set(conf, "enable.auto.commit", "false"); RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); if (!c) Test::Fail("Failed to create KafkaConsumer: " + errstr); delete conf; /* Assign the partition */ std::vector<RdKafka::TopicPartition*> parts; parts.push_back(RdKafka::TopicPartition::create(topic, partition, RdKafka::Topic::OFFSET_BEGINNING)); err = c->assign(parts); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("assign() failed: " + RdKafka::err2str(err)); RdKafka::TopicPartition::destroy(parts); /* Start consuming */ int failures = 0; for (int i = 0 ; i < msgcnt * 2 ; i += 2) { RdKafka::Message *msg = c->consume(tmout_multip(5000)); if (msg->err()) Test::Fail(tostr() << "consume() failed at message " << (i/2) << ": " << msg->errstr()); /* verify key */ failures += check_equal(msgs[i], msg->key() ? msg->key()->c_str() : NULL, msg->key_len(), tostr() << "message #" << (i/2) << " (offset " << msg->offset() << ") key"); /* verify key_pointer() API as too */ failures += check_equal(msgs[i], (const char *)msg->key_pointer(), msg->key_len(), tostr() << "message #" << (i/2) << " (offset " << msg->offset() << ") key"); /* verify value */ failures += check_equal(msgs[i+1], (const char *)msg->payload(), msg->len(), tostr() << "message #" << (i/2) << " (offset " << msg->offset() << ") value"); delete msg; } Test::Say(tostr() << "Done consuming, closing. " << failures << " test failures\n"); if (failures) Test::Fail(tostr() << "See " << failures << " previous test failure(s)"); c->close(); delete c; }
void dr_cb (RdKafka::Message &message) { std::cout << "Message delivery for (" << message.len() << " bytes): " << message.errstr() << std::endl; if (message.key()) std::cout << "Key: " << *(message.key()) << ";" << std::endl; }
const void* KafkaStreamedDataset::nextRow() { const void* result = NULL; __int32 maxAttempts = 10; //!< Maximum number of tries if local queue is full __int32 timeoutWait = 100; //!< Amount of time to wait between retries __int32 attemptNum = 0; if (maxRecords <= 0 || consumedRecCount < maxRecords) { RdKafka::Message* messageObjPtr = NULL; bool messageConsumed = false; while (!messageConsumed && shouldRead && attemptNum < maxAttempts) { messageObjPtr = consumerPtr->getOneMessage(); // messageObjPtr must be deleted when we are through with it if (messageObjPtr) { try { switch (messageObjPtr->err()) { case RdKafka::ERR_NO_ERROR: { RtlDynamicRowBuilder rowBuilder(resultAllocator); unsigned len = sizeof(__int32) + sizeof(__int64) + sizeof(size32_t) + messageObjPtr->len(); byte* row = rowBuilder.ensureCapacity(len, NULL); // Populating this structure: // EXPORT KafkaMessage := RECORD // UNSIGNED4 partitionNum; // UNSIGNED8 offset; // STRING message; // END; *(__int32*)(row) = messageObjPtr->partition(); *(__int64*)(row + sizeof(__int32)) = messageObjPtr->offset(); *(size32_t*)(row + sizeof(__int32) + sizeof(__int64)) = messageObjPtr->len(); memcpy(row + sizeof(__int32) + sizeof(__int64) + sizeof(size32_t), messageObjPtr->payload(), messageObjPtr->len()); result = rowBuilder.finalizeRowClear(len); lastMsgOffset = messageObjPtr->offset(); ++consumedRecCount; // Give opportunity for consumer to pull in any additional messages consumerPtr->handle()->poll(0); // Mark as loaded so we don't retry messageConsumed = true; } break; case RdKafka::ERR__TIMED_OUT: // No new messages arrived and we timed out waiting ++attemptNum; consumerPtr->handle()->poll(timeoutWait); break; case RdKafka::ERR__PARTITION_EOF: // We reached the end of the messages in the partition if (traceLevel > 4) { DBGLOG("Kafka: EOF reading message from partition %d", messageObjPtr->partition()); } shouldRead = false; break; case RdKafka::ERR__UNKNOWN_PARTITION: // Unknown partition; don't throw an error here because // in some configurations (e.g. more Thor slaves than // partitions) not all consumers will have a partition // to read if (traceLevel > 4) { DBGLOG("Kafka: Unknown partition while trying to read"); } shouldRead = false; break; case RdKafka::ERR__UNKNOWN_TOPIC: throw MakeStringException(-1, "Kafka: Error while reading message: '%s'", messageObjPtr->errstr().c_str()); break; } } catch (...) { delete(messageObjPtr); throw; } delete(messageObjPtr); messageObjPtr = NULL; } } } return result; }
void dr_cb (RdKafka::Message &message) { std::cout << "Message delivery for (" << message.len() << " bytes): " << message.errstr() << std::endl; }
int main (int argc, char **argv) { std::string brokers = "localhost"; std::string errstr; std::string topic_str; std::string mode; std::string debug; int32_t partition = RdKafka::Topic::PARTITION_UA; int64_t start_offset = RdKafka::Topic::OFFSET_BEGINNING; bool exit_eof = false; bool do_conf_dump = false; char opt; MyHashPartitionerCb hash_partitioner; /* * Create configuration objects */ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); while ((opt = getopt(argc, argv, "PCt:p:b:z:qd:o:eX:AM:")) != -1) { switch (opt) { case 'P': case 'C': mode = opt; break; case 't': topic_str = optarg; break; case 'p': if (!strcmp(optarg, "random")) /* default */; else if (!strcmp(optarg, "hash")) { if (tconf->set("partitioner_cb", &hash_partitioner, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } } else partition = std::atoi(optarg); break; case 'b': brokers = optarg; break; case 'z': if (conf->set("compression.codec", optarg, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } break; case 'o': if (!strcmp(optarg, "end")) start_offset = RdKafka::Topic::OFFSET_END; else if (!strcmp(optarg, "beginning")) start_offset = RdKafka::Topic::OFFSET_BEGINNING; else if (!strcmp(optarg, "stored")) start_offset = RdKafka::Topic::OFFSET_STORED; else start_offset = strtoll(optarg, NULL, 10); break; case 'e': exit_eof = true; break; case 'd': debug = optarg; break; case 'M': if (conf->set("statistics.interval.ms", optarg, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } break; case 'X': { char *name, *val; if (!strcmp(optarg, "dump")) { do_conf_dump = true; continue; } name = optarg; if (!(val = strchr(name, '='))) { std::cerr << "%% Expected -X property=value, not " << name << std::endl; exit(1); } *val = '\0'; val++; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ RdKafka::Conf::ConfResult res; if (!strncmp(name, "topic.", strlen("topic."))) res = tconf->set(name+strlen("topic."), val, errstr); else res = conf->set(name, val, errstr); if (res != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } } break; default: goto usage; } } if (mode.empty() || topic_str.empty() || optind != argc) { usage: fprintf(stderr, "Usage: %s [-C|-P] -t <topic> " "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n" "\n" "librdkafka version %s (0x%08x)\n" "\n" " Options:\n" " -C | -P Consumer or Producer mode\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (random partitioner)\n" " -p <func> Use partitioner:\n" " random (default), hash\n" " -b <brokers> Broker address (localhost:9092)\n" " -z <codec> Enable compression:\n" " none|gzip|snappy\n" " -o <offset> Start offset (consumer)\n" " -e Exit consumer when last message\n" " in partition has been received.\n" " -d [facs..] Enable debugging contexts:\n" " %s\n" " -M <intervalms> Enable statistics\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" "\n" " In Consumer mode:\n" " writes fetched messages to stdout\n" " In Producer mode:\n" " reads messages from stdin and sends to broker\n" "\n" "\n" "\n", argv[0], RdKafka::version_str().c_str(), RdKafka::version(), RdKafka::Conf::DEBUG_CONTEXTS.c_str()); exit(1); } /* * Set configuration properties */ conf->set("metadata.broker.list", brokers, errstr); if (!debug.empty()) { if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } } ExampleEventCb ex_event_cb; conf->set("event_cb", &ex_event_cb, errstr); if (do_conf_dump) { int pass; for (pass = 0 ; pass < 2 ; pass++) { std::list<std::string> *dump; if (pass == 0) { dump = conf->dump(); std::cout << "# Global config" << std::endl; } else { dump = tconf->dump(); std::cout << "# Topic config" << std::endl; } for (std::list<std::string>::iterator it = dump->begin(); it != dump->end(); ) { std::cout << *it << " = "; it++; std::cout << *it << std::endl; it++; } std::cout << std::endl; } exit(0); } signal(SIGINT, sigterm); signal(SIGTERM, sigterm); if (mode == "P") { /* * Producer mode */ ExampleDeliveryReportCb ex_dr_cb; /* Set delivery report callback */ conf->set("dr_cb", &ex_dr_cb, errstr); /* * Create producer using accumulated global configuration. */ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); if (!producer) { std::cerr << "Failed to create producer: " << errstr << std::endl; exit(1); } std::cout << "% Created producer " << producer->name() << std::endl; /* * Create topic handle. */ RdKafka::Topic *topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr); if (!topic) { std::cerr << "Failed to create topic: " << errstr << std::endl; exit(1); } /* * Read messages from stdin and produce to broker. */ for (std::string line; run and std::getline(std::cin, line);) { if (line.empty()) { producer->poll(0); continue; } /* * Produce message */ RdKafka::ErrorCode resp = producer->produce(topic, partition, RdKafka::Producer::MSG_COPY /* Copy payload */, const_cast<char *>(line.c_str()), line.size(), NULL, NULL); if (resp != RdKafka::ERR_NO_ERROR) std::cerr << "% Produce failed: " << RdKafka::err2str(resp) << std::endl; else std::cerr << "% Produced message (" << line.size() << " bytes)" << std::endl; producer->poll(0); } run = true; while (run and producer->outq_len() > 0) { std::cerr << "Waiting for " << producer->outq_len() << std::endl; producer->poll(1000); } delete topic; delete producer; } else { /* * Consumer mode */ /* * Create consumer using accumulated global configuration. */ RdKafka::Consumer *consumer = RdKafka::Consumer::create(conf, errstr); if (!consumer) { std::cerr << "Failed to create consumer: " << errstr << std::endl; exit(1); } std::cout << "% Created consumer " << consumer->name() << std::endl; /* * Create topic handle. */ RdKafka::Topic *topic = RdKafka::Topic::create(consumer, topic_str, tconf, errstr); if (!topic) { std::cerr << "Failed to create topic: " << errstr << std::endl; exit(1); } /* * Start consumer for topic+partition at start offset */ RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset); if (resp != RdKafka::ERR_NO_ERROR) { std::cerr << "Failed to start consumer: " << RdKafka::err2str(resp) << std::endl; exit(1); } /* * Consume messages */ while (run) { RdKafka::Message *msg = consumer->consume(topic, partition, 1000); switch (msg->err()) { case RdKafka::ERR__TIMED_OUT: break; case RdKafka::ERR_NO_ERROR: /* Real message */ std::cerr << "Read msg at offset " << msg->offset() << std::endl; printf("%.*s\n", static_cast<int>(msg->len()), static_cast<const char *>(msg->payload())); break; case RdKafka::ERR__PARTITION_EOF: /* Last message */ if (exit_eof) run = false; break; default: /* Errors */ std::cerr << "Consume failed: " << msg->errstr() << std::endl; run = false; } delete msg; consumer->poll(0); } /* * Stop consumer */ consumer->stop(topic, partition); consumer->poll(1000); delete topic; delete consumer; } /* * Wait for RdKafka to decommission. * This is not strictly needed (when check outq_len() above), but * allows RdKafka to clean up all its resources before the application * exits so that memory profilers such as valgrind wont complain about * memory leaks. */ RdKafka::wait_destroyed(5000); return 0; }