void event_cb (RdKafka::Event &event) { switch (event.type()) { case RdKafka::Event::EVENT_ERROR: Test::Say(tostr() << "Error: " << RdKafka::err2str(event.err()) << ": " << event.str() << "\n"); if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN) error_seen = true; break; case RdKafka::Event::EVENT_LOG: Test::Say(tostr() << "Log: " << event.str() << "\n"); break; default: break; } }
void Consumer::event_cb(RdKafka::Event& event) { if (traceLevel > 4) { switch (event.type()) { case RdKafka::Event::EVENT_ERROR: DBGLOG("Kafka: Error: %s", event.str().c_str()); break; case RdKafka::Event::EVENT_STATS: DBGLOG("Kafka: Stats: %s", event.str().c_str()); break; case RdKafka::Event::EVENT_LOG: DBGLOG("Kafka: Log: %s", event.str().c_str()); break; } } }
/** * @brief Event callback */ void event_cb (RdKafka::Event &event) { if (event.type() == RdKafka::Event::EVENT_LOG) { Test::Say(tostr() << "LOG-" << event.severity() << "-" << event.fac() << ": " << event.str() << "\n"); return; } else if (event.type() != RdKafka::Event::EVENT_STATS) { Test::Say(tostr() << "Dropping event " << event.type() << "\n"); return; } int64_t consumer_lag = parse_json(event.str().c_str()); Test::Say(3, tostr() << "Stats: consumer_lag is " << consumer_lag << "\n"); if (consumer_lag == -1) { Test::Say(2, "Skipping old stats with invalid consumer_lag\n"); return; /* Old stats generated before first message consumed */ } else if (consumer_lag != calc_lag) Test::Fail(tostr() << "Stats consumer_lag " << consumer_lag << ", expected " << calc_lag << "\n"); else lag_valid++; }
void event_cb (RdKafka::Event &event) { switch (event.type()) { case RdKafka::Event::EVENT_ERROR: std::cerr << now() << ": ERROR (" << RdKafka::err2str(event.err()) << "): " << event.str() << std::endl; if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN) run = false; break; case RdKafka::Event::EVENT_STATS: std::cerr << now() << ": \"STATS\": " << event.str() << std::endl; break; case RdKafka::Event::EVENT_LOG: std::cerr << now() << ": LOG-" << event.severity() << "-" << event.fac() << ": " << event.str() << std::endl; break; default: std::cerr << now() << ": EVENT " << event.type() << " (" << RdKafka::err2str(event.err()) << "): " << event.str() << std::endl; break; } }
void event_cb (RdKafka::Event &event) { struct timeval tv; char buf[64]; gettimeofday(&tv, NULL); strftime(buf, sizeof(buf)-1, "%Y-%m-%d %H:%M:%S", localtime(&tv.tv_sec)); fprintf(stderr, "%s.%03d: ", buf, (int)(tv.tv_usec / 1000)); switch (event.type()) { case RdKafka::Event::EVENT_ERROR: std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " << event.str() << std::endl; if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN) run = false; break; case RdKafka::Event::EVENT_STATS: std::cerr << "\"STATS\": " << event.str() << std::endl; break; case RdKafka::Event::EVENT_LOG: fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(), event.str().c_str()); break; case RdKafka::Event::EVENT_THROTTLE: std::cerr << "THROTTLED: " << event.throttle_time() << "ms by " << event.broker_name() << " id " << (int)event.broker_id() << std::endl; break; default: std::cerr << "EVENT " << event.type() << " (" << RdKafka::err2str(event.err()) << "): " << event.str() << std::endl; break; } }
void event_cb (RdKafka::Event &event) { switch (event.type()) { case RdKafka::Event::EVENT_ERROR: std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " << event.str() << std::endl; if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN) run = false; break; case RdKafka::Event::EVENT_STATS: std::cerr << "\"STATS\": " << event.str() << std::endl; break; case RdKafka::Event::EVENT_LOG: fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(), event.str().c_str()); break; default: std::cerr << "EVENT " << event.type() << " (" << RdKafka::err2str(event.err()) << "): " << event.str() << std::endl; break; } }
void event_cb ( rdkafka::Event &event ) { switch ( event.type() ) { case RdKafka::Event::EVENT_ERROR: m_tym->time_stamp(); *m_ostr << "ERROR (" << rdkafka::err2str(event.err()) << "): " << event.str() << "\n"; if( event.err() == rdkafka::ERR__ALL_BROKERS_DOWN ) { m_run = false; } break; case rdkafka::Event::EVENT_STATS : { m_tym->time_stamp(); *m_ostr << "STATS(" << event.str() << ")\n"; break; } case rdkafka::Event::EVENT_LOG: { m_tym->time_stamp(); *m_ostr << "LOG(" << event.severity() << ") " << event.fac().c_str() << " " << event.str().c_str() << ")\n"; } break; default: m_tym->time_stamp(); *m_ostr << "EVENT " << event.type() << " (" << rdkafka::err2str(event.err()) << "): " << event.str() << std::endl; break; } }
void KafkaEventCallback::event_cb (RdKafka::Event &event) { switch (event.type()) { case RdKafka::Event::EVENT_ERROR: LOG_ERR("Kafka error: %s", RdKafka::err2str(event.err()).c_str()); if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN) { LOG_ERR("Kafka all brokers down: %s", RdKafka::err2str(event.err()).c_str()); *isConnected = false; } break; case RdKafka::Event::EVENT_STATS: LOG_INFO("Kafka stats: %s", event.str().c_str()); break; case RdKafka::Event::EVENT_LOG: { switch (event.severity()) { case RdKafka::Event::EVENT_SEVERITY_EMERG: case RdKafka::Event::EVENT_SEVERITY_ALERT: case RdKafka::Event::EVENT_SEVERITY_CRITICAL: case RdKafka::Event::EVENT_SEVERITY_ERROR: // rdkafka will reconnect, so no need to change at this phase LOG_ERR("Kafka LOG-%i-%s: %s", event.severity(), event.fac().c_str(), event.str().c_str()); break; case RdKafka::Event::EVENT_SEVERITY_WARNING: LOG_WARN("Kafka LOG-%i-%s: %s", event.severity(), event.fac().c_str(), event.str().c_str()); break; case RdKafka::Event::EVENT_SEVERITY_NOTICE: LOG_NOTICE("Kafka LOG-%i-%s: %s", event.severity(), event.fac().c_str(), event.str().c_str()); break; default: LOG_INFO("Kafka LOG-%i-%s: %s", event.severity(), event.fac().c_str(), event.str().c_str()); break; } break; } default: LOG_INFO("Kafka event type = %d (%s) %s", event.type(), RdKafka::err2str(event.err()).c_str(), event.str().c_str()); break; } }