//services void dr_cb( rdkafka::Message &message ) { switch ( message.err() ) { case rdkafka::ERR__TIMED_OUT: break; case rdkafka::ERR_NO_ERROR: { //real message m_tym->color( stamp_color::blue ); m_tym->time_stamp(); std::cerr << "DELIVERY: read msg at offset " << message.offset() << "\n"; if ( message.key() ) { m_tym->time_stamp(); std::cerr << "key: " << message.key() << std::endl; } m_tym->time_stamp(); std::cerr << "message-len:" << message.len() << " " << static_cast<const char *>( message.payload() ) << "\n"; m_tym->clear_color(); break; } case rdkafka::ERR__PARTITION_EOF: { //last message //if ( m_eof ) { // m_run = false; break; } default: { std::cerr << "consume failed: " << message.errstr() << std::endl; m_run = false; } } }
void dr_cb (RdKafka::Message &message) { if (message.err()) { state.producer.numErr++; errorString("producer_send_error", message.errstr(), message.topic_name(), message.key(), std::string(static_cast<const char*>(message.payload()), message.len())); } else { successString("producer_send_success", message.topic_name(), (int)message.partition(), message.offset(), message.key(), std::string(static_cast<const char*>(message.payload()), message.len())); state.producer.numAcked++; } }
static void do_test_null_empty (bool api_version_request) { std::string topic = Test::mk_topic_name("0070_null_empty", 1); const int partition = 0; Test::Say(tostr() << "Testing with api.version.request=" << api_version_request << " on topic " << topic << " partition " << partition << "\n"); RdKafka::Conf *conf; Test::conf_init(&conf, NULL, 0); Test::conf_set(conf, "api.version.request", api_version_request ? "true" : "false"); Test::conf_set(conf, "acks", "all"); std::string errstr; RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); if (!p) Test::Fail("Failed to create Producer: " + errstr); delete conf; const int msgcnt = 8; static const char *msgs[msgcnt*2] = { NULL, NULL, "key2", NULL, "key3", "val3", NULL, "val4", "", NULL, NULL, "", "", "" }; RdKafka::ErrorCode err; for (int i = 0 ; i < msgcnt * 2 ; i += 2) { Test::Say(3, tostr() << "Produce message #" << (i/2) << ": key=\"" << (msgs[i] ? msgs[i] : "Null") << "\", value=\"" << (msgs[i+1] ? msgs[i+1] : "Null") << "\"\n"); err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, /* Value */ (void *)msgs[i+1], msgs[i+1] ? strlen(msgs[i+1]) : 0, /* Key */ (void *)msgs[i], msgs[i] ? strlen(msgs[i]) : 0, 0, NULL); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("Produce failed: " + RdKafka::err2str(err)); } if (p->flush(tmout_multip(3*5000)) != 0) Test::Fail("Not all messages flushed"); Test::Say(tostr() << "Produced " << msgcnt << " messages to " << topic << "\n"); delete p; /* * Now consume messages from the beginning, making sure they match * what was produced. */ /* Create consumer */ Test::conf_init(&conf, NULL, 10); Test::conf_set(conf, "group.id", topic); Test::conf_set(conf, "api.version.request", api_version_request ? "true" : "false"); Test::conf_set(conf, "enable.auto.commit", "false"); RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); if (!c) Test::Fail("Failed to create KafkaConsumer: " + errstr); delete conf; /* Assign the partition */ std::vector<RdKafka::TopicPartition*> parts; parts.push_back(RdKafka::TopicPartition::create(topic, partition, RdKafka::Topic::OFFSET_BEGINNING)); err = c->assign(parts); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("assign() failed: " + RdKafka::err2str(err)); RdKafka::TopicPartition::destroy(parts); /* Start consuming */ int failures = 0; for (int i = 0 ; i < msgcnt * 2 ; i += 2) { RdKafka::Message *msg = c->consume(tmout_multip(5000)); if (msg->err()) Test::Fail(tostr() << "consume() failed at message " << (i/2) << ": " << msg->errstr()); /* verify key */ failures += check_equal(msgs[i], msg->key() ? msg->key()->c_str() : NULL, msg->key_len(), tostr() << "message #" << (i/2) << " (offset " << msg->offset() << ") key"); /* verify key_pointer() API as too */ failures += check_equal(msgs[i], (const char *)msg->key_pointer(), msg->key_len(), tostr() << "message #" << (i/2) << " (offset " << msg->offset() << ") key"); /* verify value */ failures += check_equal(msgs[i+1], (const char *)msg->payload(), msg->len(), tostr() << "message #" << (i/2) << " (offset " << msg->offset() << ") value"); delete msg; } Test::Say(tostr() << "Done consuming, closing. " << failures << " test failures\n"); if (failures) Test::Fail(tostr() << "See " << failures << " previous test failure(s)"); c->close(); delete c; }
void dr_cb (RdKafka::Message &message) { std::cout << "Message delivery for (" << message.len() << " bytes): " << message.errstr() << std::endl; if (message.key()) std::cout << "Key: " << *(message.key()) << ";" << std::endl; }