/** * @brief When rd_kafka_new() succeeds it takes ownership of the config object, * but when it fails the config object remains in application custody. * These tests makes sure that's the case (preferably run with valgrind) */ static void do_test_kafka_new_failures (void) { rd_kafka_conf_t *conf; rd_kafka_t *rk; char errstr[512]; conf = rd_kafka_conf_new(); rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); TEST_ASSERT(rk, "kafka_new() failed: %s", errstr); rd_kafka_destroy(rk); /* Set an erroneous configuration value that is not checked * by conf_set() but by rd_kafka_new() */ conf = rd_kafka_conf_new(); if (rd_kafka_conf_set(conf, "partition.assignment.strategy", "range,thiswillfail", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s", errstr); rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); TEST_ASSERT(!rk, "kafka_new() should have failed"); /* config object should still belong to us, * correct the erroneous config and try again. */ if (rd_kafka_conf_set(conf, "partition.assignment.strategy", NULL, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s", errstr); rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); TEST_ASSERT(rk, "kafka_new() failed: %s", errstr); rd_kafka_destroy(rk); }
int Http::kafka_consumer_::Init(const int partition, const char* topic, const char* brokers, MsgConsume msg_consume) { char err_str[512]; partition_ = partition; msg_consume_ = msg_consume; rd_kafka_conf_t *conf = rd_kafka_conf_new(); if (NULL == conf) { return -1; } rd_kafka_conf_set(conf, "batch.num.messages", "100", err_str, sizeof(err_str)); if (!(rk_ = rd_kafka_new(RD_KAFKA_CONSUMER, conf, err_str, sizeof(err_str)))) { return -1; } rd_kafka_set_log_level(rk_, 1); if (rd_kafka_brokers_add(rk_, brokers) == 0) { return -1; } rd_kafka_topic_conf_t *topic_conf = rd_kafka_topic_conf_new(); rkt_ = rd_kafka_topic_new(rk_, topic, topic_conf); if (NULL == rkt_) { return -1; } //RD_KAFKA_OFFSET_BEGINNING,从partition消息队列的开始进行consume; //RD_KAFKA_OFFSET_END:从partition中的将要produce的下一条信息开始(忽略即当前所有的消息) if (rd_kafka_consume_start(this->rkt_, partition, RD_KAFKA_OFFSET_END) == -1) { return -1; } return 1; }
static int Producer_init (PyObject *selfobj, PyObject *args, PyObject *kwargs) { Handle *self = (Handle *)selfobj; char errstr[256]; rd_kafka_conf_t *conf; if (self->rk) { PyErr_SetString(PyExc_RuntimeError, "Producer already __init__:ialized"); return -1; } self->type = RD_KAFKA_PRODUCER; if (!(conf = common_conf_setup(RD_KAFKA_PRODUCER, self, args, kwargs))) return -1; rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); self->rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!self->rk) { cfl_PyErr_Format(rd_kafka_last_error(), "Failed to create producer: %s", errstr); rd_kafka_conf_destroy(conf); return -1; } /* Forward log messages to poll queue */ if (self->logger) rd_kafka_set_log_queue(self->rk, NULL); return 0; }
RdKafka::Consumer *RdKafka::Consumer::create (RdKafka::Conf *conf, std::string &errstr) { char errbuf[512]; RdKafka::ConfImpl *confimpl = dynamic_cast<RdKafka::ConfImpl *>(conf); RdKafka::ConsumerImpl *rkc = new RdKafka::ConsumerImpl(); rd_kafka_conf_t *rk_conf = NULL; if (confimpl) { if (!confimpl->rk_conf_) { errstr = "Requires RdKafka::Conf::CONF_GLOBAL object"; delete rkc; return NULL; } rkc->set_common_config(confimpl); rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_); } rd_kafka_t *rk; if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, errbuf, sizeof(errbuf)))) { errstr = errbuf; delete rkc; return NULL; } rkc->rk_ = rk; return rkc; }
/** * Issue #530: * "Legacy Consumer. Delete hangs if done right after RdKafka::Consumer::create. * But If I put a start and stop in between, there is no issue." */ static int legacy_consumer_early_destroy (void) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; char errstr[512]; int pass; const char *topic = test_mk_topic_name(__FUNCTION__, 0); for (pass = 0 ; pass < 2 ; pass++) { TEST_SAY("%s: pass #%d\n", __FUNCTION__, pass); rk = rd_kafka_new(RD_KAFKA_CONSUMER, NULL, errstr, sizeof(errstr)); TEST_ASSERT(rk, "failed to create consumer: %s", errstr); if (pass == 1) { /* Second pass, create a topic too. */ rkt = rd_kafka_topic_new(rk, topic, NULL); TEST_ASSERT(rkt, "failed to create topic: %s", rd_kafka_err2str( rd_kafka_errno2err(errno))); rd_sleep(1); rd_kafka_topic_destroy(rkt); } rd_kafka_destroy(rk); } return 0; }
void kfc_rdkafka_init(rd_kafka_type_t type) { char errstr[512]; if (type == RD_KAFKA_PRODUCER) { char tmp[16]; snprintf(tmp, sizeof(tmp), "%i", SIGIO); rd_kafka_conf_set(conf.rk_conf, "internal.termination.signal", tmp, NULL, 0); } /* Create handle */ if (!(conf.rk = rd_kafka_new(type, conf.rk_conf, errstr, sizeof(errstr)))) FATAL("Failed to create rd_kafka struct: %s", errstr); rd_kafka_set_logger(conf.rk, rd_kafka_log_print); if (conf.debug) rd_kafka_set_log_level(conf.rk, LOG_DEBUG); else if (conf.verbosity == 0) rd_kafka_set_log_level(conf.rk, 0); /* Create topic, if specified */ if (conf.topic && !(conf.rkt = rd_kafka_topic_new(conf.rk, conf.topic, conf.rkt_conf))) FATAL("Failed to create rk_kafka_topic %s: %s", conf.topic, rd_kafka_err2str(rd_kafka_errno2err(errno))); conf.rk_conf = NULL; conf.rkt_conf = NULL; }
int main (int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_errno2err(EINVAL); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_conf_set_dr_cb(NULL, NULL); rd_kafka_conf_set_error_cb(NULL, NULL); rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); rd_kafka_conf_dump_free(NULL, 0); rd_kafka_conf_properties_show(NULL); rd_kafka_topic_conf_new(); rd_kafka_topic_conf_dup(NULL); rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); rd_kafka_name(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); rd_kafka_consume_batch(NULL, 0, 0, NULL, 0); rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL); rd_kafka_offset_store(NULL, 0, 0); rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL); rd_kafka_poll(NULL, 0); rd_kafka_brokers_add(NULL, NULL); rd_kafka_set_logger(NULL, NULL); rd_kafka_set_log_level(NULL, 0); rd_kafka_log_print(NULL, 0, NULL, NULL); rd_kafka_log_syslog(NULL, 0, NULL, NULL); rd_kafka_outq_len(NULL); rd_kafka_dump(NULL, NULL); rd_kafka_thread_cnt(); rd_kafka_wait_destroyed(0); } return 0; }
/** * producer_init_kafka * * Initialize the producer instance, setting up the Kafka topic and context. * * @param self VALUE Instance of the Producer Ruby object * @param config HermannInstanceConfig* the instance configuration associated with this producer. */ void producer_init_kafka(VALUE self, HermannInstanceConfig* config) { TRACER("initing (%p)\n", config); config->quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ config->conf = rd_kafka_conf_new(); /* Add our `self` to the opaque pointer for error and logging callbacks */ rd_kafka_conf_set_opaque(config->conf, (void*)config); rd_kafka_conf_set_error_cb(config->conf, producer_error_callback); /* Topic configuration */ config->topic_conf = rd_kafka_topic_conf_new(); /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ rd_kafka_conf_set_dr_msg_cb(config->conf, msg_delivered); /* Create Kafka handle */ if (!(config->rk = rd_kafka_new(RD_KAFKA_PRODUCER, config->conf, config->errstr, sizeof(config->errstr)))) { /* TODO: Use proper logger */ fprintf(stderr, "%% Failed to create new producer: %s\n", config->errstr); rb_raise(rb_eRuntimeError, "%% Failed to create new producer: %s\n", config->errstr); } /* Set logger */ rd_kafka_set_logger(config->rk, logger); rd_kafka_set_log_level(config->rk, LOG_DEBUG); if (rd_kafka_brokers_add(config->rk, config->brokers) == 0) { /* TODO: Use proper logger */ fprintf(stderr, "%% No valid brokers specified\n"); rb_raise(rb_eRuntimeError, "No valid brokers specified"); return; } /* Create topic */ config->rkt = rd_kafka_topic_new(config->rk, config->topic, config->topic_conf); /* Set the partitioner callback */ rd_kafka_topic_conf_set_partitioner_cb( config->topic_conf, producer_partitioner_callback); /* We're now initialized */ config->isInitialized = 1; TRACER("completed kafka init\n"); }
int consumer_init(const int partition, const char* topic, const char* brokers, Consume_Data consume_data, wrapper_Info* producer_info) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; rd_kafka_t *rk; char errstr[512]; producer_info->start_offset = RD_KAFKA_OFFSET_END; producer_info->partition = partition; if (NULL != consume_data) producer_info->func_consume_data = consume_data; else return CONSUMER_INIT_FAILED; /* Kafka configuration */ conf = rd_kafka_conf_new(); if (NULL == conf) return CONSUMER_INIT_FAILED; if (RD_KAFKA_CONF_OK != rd_kafka_conf_set(conf, "group.id", "one", errstr, sizeof(errstr))) return CONSUMER_INIT_FAILED; /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", errstr); return CONSUMER_INIT_FAILED; } rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); return CONSUMER_INIT_FAILED; } /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); /* Create topic */ producer_info->rkt = rd_kafka_topic_new(rk, topic, topic_conf); producer_info->rk = rk; /* Start consuming */ if (rd_kafka_consume_start(producer_info->rkt, partition, RD_KAFKA_OFFSET_END) == -1){ fprintf(stderr, "%% Failed to start consuming: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); return CONSUMER_INIT_FAILED; } return CONSUMER_INIT_SUCCESS; }
static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */ { char errbuf[1024]; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; if (ctx->kafka != NULL && ctx->topic != NULL) return(0); if (ctx->kafka == NULL) { if ((conf = rd_kafka_conf_dup(ctx->kafka_conf)) == NULL) { ERROR("write_kafka plugin: cannot duplicate kafka config"); return(1); } if ((ctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errbuf, sizeof(errbuf))) == NULL) { ERROR("write_kafka plugin: cannot create kafka handle."); return 1; } rd_kafka_conf_destroy(ctx->kafka_conf); ctx->kafka_conf = NULL; INFO ("write_kafka plugin: created KAFKA handle : %s", rd_kafka_name(ctx->kafka)); #if defined(HAVE_LIBRDKAFKA_LOGGER) && !defined(HAVE_LIBRDKAFKA_LOG_CB) rd_kafka_set_logger(ctx->kafka, kafka_log); #endif } if (ctx->topic == NULL ) { if ((topic_conf = rd_kafka_topic_conf_dup(ctx->conf)) == NULL) { ERROR("write_kafka plugin: cannot duplicate kafka topic config"); return 1; } if ((ctx->topic = rd_kafka_topic_new(ctx->kafka, ctx->topic_name, topic_conf)) == NULL) { ERROR("write_kafka plugin: cannot create topic : %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); return errno; } rd_kafka_topic_conf_destroy(ctx->conf); ctx->conf = NULL; INFO ("write_kafka plugin: handle created for topic : %s", rd_kafka_topic_name(ctx->topic)); } return(0); } /* }}} int kafka_handle */
/** * Enable statistics with a set interval, make sure the stats callbacks are * called within reasonable intervals. */ static void do_test_stats_timer (void) { rd_kafka_t *rk; rd_kafka_conf_t *conf; const int exp_calls = 10; char errstr[512]; struct state state; test_timing_t t_new; memset(&state, 0, sizeof(state)); state.interval = 600*1000; test_conf_init(&conf, NULL, 200); test_conf_set(conf, "statistics.interval.ms", "600"); rd_kafka_conf_set_stats_cb(conf, stats_cb); rd_kafka_conf_set_opaque(conf, &state); TIMING_START(&t_new, "rd_kafka_new()"); rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); TIMING_STOP(&t_new); if (!rk) TEST_FAIL("Failed to create instance: %s\n", errstr); TEST_SAY("Starting wait loop for %d expected stats_cb calls " "with an interval of %dms\n", exp_calls, state.interval/1000); while (state.calls < exp_calls) { test_timing_t t_poll; TIMING_START(&t_poll, "rd_kafka_poll()"); rd_kafka_poll(rk, 100); TIMING_STOP(&t_poll); if (TIMING_DURATION(&t_poll) > 150*1000) TEST_WARN("rd_kafka_poll(rk,100) " "took more than 50%% extra\n"); } rd_kafka_destroy(rk); if (state.calls > exp_calls) TEST_SAY("Got more calls than expected: %d > %d\n", state.calls, exp_calls); if (state.fails) TEST_FAIL("%d/%d intervals failed\n", state.fails, state.calls); else TEST_SAY("All %d intervals okay\n", state.calls); }
static void do_test_apis (rd_kafka_type_t cltype) { rd_kafka_t *rk; char errstr[512]; rd_kafka_queue_t *mainq, *backgroundq; rd_kafka_conf_t *conf; mtx_init(&last_event_lock, mtx_plain); cnd_init(&last_event_cnd); do_test_unclean_destroy(cltype, 0/*tempq*/); do_test_unclean_destroy(cltype, 1/*mainq*/); test_conf_init(&conf, NULL, 0); /* Remove brokers, if any, since this is a local test and we * rely on the controller not being found. */ test_conf_set(conf, "bootstrap.servers", ""); test_conf_set(conf, "socket.timeout.ms", MY_SOCKET_TIMEOUT_MS_STR); /* For use with the background queue */ rd_kafka_conf_set_background_event_cb(conf, background_event_cb); rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr)); TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr); mainq = rd_kafka_queue_get_main(rk); backgroundq = rd_kafka_queue_get_background(rk); do_test_options(rk); do_test_CreateTopics("temp queue, no options", rk, NULL, 0, 0); do_test_CreateTopics("temp queue, no options, background_event_cb", rk, backgroundq, 1, 0); do_test_CreateTopics("temp queue, options", rk, NULL, 0, 1); do_test_CreateTopics("main queue, options", rk, mainq, 0, 1); do_test_DeleteTopics("temp queue, no options", rk, NULL, 0); do_test_DeleteTopics("temp queue, options", rk, NULL, 1); do_test_DeleteTopics("main queue, options", rk, mainq, 1); do_test_mix(rk, mainq); do_test_configs(rk, mainq); rd_kafka_queue_destroy(backgroundq); rd_kafka_queue_destroy(mainq); rd_kafka_destroy(rk); mtx_destroy(&last_event_lock); cnd_destroy(&last_event_cnd); }
rd_kafka_t *kafka_set_connection(rd_kafka_type_t type, const char *b, int report_level, const char *compression) { rd_kafka_t *r = NULL; char *tmp = brokers; char errstr[512]; rd_kafka_conf_t *conf = rd_kafka_conf_new(); if (!(r = rd_kafka_new(type, conf, errstr, sizeof(errstr)))) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - failed to create new producer: %s", errstr); } exit(1); } /* Add brokers */ if (rd_kafka_brokers_add(r, b) == 0) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "php kafka - No valid brokers specified"); } exit(1); } /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ if (type == RD_KAFKA_PRODUCER) { if (compression && !strcmp(compression, "none")) {//silently fail on error ATM... if (RD_KAFKA_CONF_OK != rd_kafka_conf_set(conf, "compression.codec", compression, errstr, sizeof errstr)) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "Failed to set compression to %s", compression); } } } if (report_level == 1) rd_kafka_conf_set_dr_cb(conf, kafka_produce_cb_simple); else if (report_level == 2) rd_kafka_conf_set_dr_msg_cb(conf, kafka_produce_detailed_cb); } rd_kafka_conf_set_error_cb(conf, kafka_err_cb); if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - using: %s", brokers); } return r; }
/* Shared logic of Consumer_start and Producer_start */ static PyObject * RdkHandle_start(RdkHandle *self, rd_kafka_type_t rdk_type, const char *brokers, const char *topic_name) { if (RdkHandle_excl_lock(self)) return NULL; if (self->rdk_handle) { set_pykafka_error("RdKafkaException", "Already started!"); return RdkHandle_start_fail(self, RdkHandle_stop); } /* Configure and start rdk_handle */ char errstr[512]; Py_BEGIN_ALLOW_THREADS /* avoid callbacks deadlocking */ self->rdk_handle = rd_kafka_new( rdk_type, self->rdk_conf, errstr, sizeof(errstr)); self->rdk_conf = NULL; /* deallocated by rd_kafka_new() */ Py_END_ALLOW_THREADS if (! self->rdk_handle) { set_pykafka_error("RdKafkaException", errstr); return RdkHandle_start_fail(self, RdkHandle_stop); } /* Set brokers */ int brokers_added; Py_BEGIN_ALLOW_THREADS /* avoid callbacks deadlocking */ brokers_added = rd_kafka_brokers_add(self->rdk_handle, brokers); Py_END_ALLOW_THREADS if (brokers_added == 0) { set_pykafka_error("RdKafkaException", "adding brokers failed"); return RdkHandle_start_fail(self, RdkHandle_stop); } /* Configure and take out a topic handle */ Py_BEGIN_ALLOW_THREADS /* avoid callbacks deadlocking */ self->rdk_topic_handle = rd_kafka_topic_new(self->rdk_handle, topic_name, self->rdk_topic_conf); self->rdk_topic_conf = NULL; /* deallocated by rd_kafka_topic_new() */ Py_END_ALLOW_THREADS if (! self->rdk_topic_handle) { set_pykafka_error_from_code(rd_kafka_errno2err(errno), NULL); return RdkHandle_start_fail(self, RdkHandle_stop); } if (RdkHandle_unlock(self)) return NULL; Py_INCREF(Py_None); return Py_None; }
int producer_init(const int partition, const char* topic, const char* brokers, Msg_Delivered func_msg_delivered, wrapper_Info* producer_info) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; rd_kafka_t *rk; char errstr[512]; producer_info->partition = partition; strcpy(producer_info->topic, topic); if (NULL != func_msg_delivered) producer_info->func_msg_delivered = func_msg_delivered; else return PRODUCER_INIT_FAILED; /* Kafka configuration */ conf = rd_kafka_conf_new(); if (RD_KAFKA_CONF_OK != rd_kafka_conf_set(conf, "queue.buffering.max.messages", "500000", NULL, 0)) return PRODUCER_INIT_FAILED; /* Set logger */ rd_kafka_conf_set_log_cb(conf, logger); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); rd_kafka_conf_set_dr_cb(conf, func_msg_delivered); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); return PRODUCER_INIT_FAILED; } /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); return PRODUCER_INIT_FAILED; } /* Create topic */ producer_info->rkt = rd_kafka_topic_new(rk, topic, topic_conf); producer_info->rk = rk; return PRODUCER_INIT_SUCCESS; }
/** * @brief setup_kafka initialises librdkafka based on the config * wrapped in kafka_t * @param k kafka configuration **/ int setup_kafka(kafka_t* k) { char* brokers = "localhost:9092"; char* zookeepers = NULL; char* topic = "bloh"; config* fk_conf = (config*) fuse_get_context()->private_data; if(fk_conf->zookeepers_n > 0) zookeepers = fk_conf->zookeepers[0]; if(fk_conf->brokers_n > 0) brokers = fk_conf->brokers[0]; topic = fk_conf->topic[0]; rd_kafka_topic_conf_t *topic_conf; rd_kafka_conf_t *conf; conf = rd_kafka_conf_new(); rd_kafka_conf_set_dr_cb(conf, msg_delivered); if(rd_kafka_conf_set(conf, "debug", "all", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || rd_kafka_conf_set(conf, "batch.num.messages", "1", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { printf("%% Debug configuration failed: %s: %s\n", errstr, "all"); return(1); } if (!(k->rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); return(1); } rd_kafka_set_logger(k->rk, logger); rd_kafka_set_log_level(k->rk, 7); if (zookeepers != NULL) { initialize_zookeeper(zookeepers, k); return 0; } else { if (rd_kafka_brokers_add(k->rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); return(1); } topic_conf = rd_kafka_topic_conf_new(); k->rkt = rd_kafka_topic_new(k->rk, topic, topic_conf); if(k->rkt == NULL) printf("topic %s creation failed\n", topic); return k->rkt == NULL; } }
/*------------------------------------------------------------------- * TextLog_Open/Close: open/close associated log file *------------------------------------------------------------------- */ rd_kafka_t* KafkaLog_Open (const char* brokers) { char errstr[256]; rd_kafka_conf_t * conf = rd_kafka_conf_new(); //conf.producer.dr_cb = msg_delivered; /* debug */ rd_kafka_t * kafka_handle = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); /*rd_kafka_set_log_level (kafka_handle, LOG_DEBUG);*/ if(NULL==kafka_handle) { perror("kafka_new producer"); FatalError("Failed to create new producer: %s\n",errstr); } return kafka_handle; }
int p_kafka_connect_to_produce(struct p_kafka_host *kafka_host) { if (kafka_host) { kafka_host->rk = rd_kafka_new(RD_KAFKA_PRODUCER, kafka_host->cfg, kafka_host->errstr, sizeof(kafka_host->errstr)); if (!kafka_host->rk) { Log(LOG_ERR, "ERROR ( %s/%s ): Failed to create new Kafka producer: %s\n", config.name, config.type, kafka_host->errstr); p_kafka_close(kafka_host, TRUE); return ERR; } if (config.debug) rd_kafka_set_log_level(kafka_host->rk, LOG_DEBUG); } else return ERR; return SUCCESS; }
/** * @brief Verify that an unclean rd_kafka_destroy() does not hang. */ static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { rd_kafka_t *rk; char errstr[512]; rd_kafka_conf_t *conf; rd_kafka_queue_t *q; rd_kafka_event_t *rkev; rd_kafka_DeleteTopic_t *topic; test_timing_t t_destroy; test_conf_init(&conf, NULL, 0); /* Remove brokers, if any, since this is a local test and we * rely on the controller not being found. */ test_conf_set(conf, "bootstrap.servers", ""); test_conf_set(conf, "socket.timeout.ms", "60000"); rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr)); TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr); TEST_SAY(_C_MAG "[ Test unclean destroy for %s using %s]\n", rd_kafka_name(rk), with_mainq ? "mainq" : "tempq"); if (with_mainq) q = rd_kafka_queue_get_main(rk); else q = rd_kafka_queue_new(rk); topic = rd_kafka_DeleteTopic_new("test"); rd_kafka_DeleteTopics(rk, &topic, 1, NULL, q); rd_kafka_DeleteTopic_destroy(topic); /* We're not expecting a result yet since DeleteTopics will attempt * to look up the controller for socket.timeout.ms (1 minute). */ rkev = rd_kafka_queue_poll(q, 100); TEST_ASSERT(!rkev, "Did not expect result: %s", rd_kafka_event_name(rkev)); rd_kafka_queue_destroy(q); TEST_SAY("Giving rd_kafka_destroy() 5s to finish, " "despite Admin API request being processed\n"); test_timeout_set(5); TIMING_START(&t_destroy, "rd_kafka_destroy()"); rd_kafka_destroy(rk); TIMING_STOP(&t_destroy); /* Restore timeout */ test_timeout_set(60); }
rd_kafka_t *test_create_consumer (const char *group_id, void (*rebalance_cb) ( rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque), rd_kafka_topic_conf_t *default_topic_conf, void *opaque) { rd_kafka_t *rk; rd_kafka_conf_t *conf; char errstr[512]; char tmp[64]; test_conf_init(&conf, NULL, 20); if (group_id) { if (rd_kafka_conf_set(conf, "group.id", group_id, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("Conf failed: %s\n", errstr); } rd_snprintf(tmp, sizeof(tmp), "%d", test_session_timeout_ms); if (rd_kafka_conf_set(conf, "session.timeout.ms", tmp, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("Conf failed: %s\n", errstr); rd_kafka_conf_set_opaque(conf, opaque); if (rebalance_cb) rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); if (default_topic_conf) rd_kafka_conf_set_default_topic_conf(conf, default_topic_conf); /* Create kafka instance */ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); return rk; }
rd_kafka_t *test_create_producer (void) { rd_kafka_t *rk; rd_kafka_conf_t *conf; char errstr[512]; test_conf_init(&conf, NULL, 20); rd_kafka_conf_set_dr_cb(conf, test_dr_cb); /* Create kafka instance */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); return rk; }
int kafka_consumer_::Init(const int partition, const char* topic, const char* brokers, MsgConsume msg_consume) { char err_str[512]; partition_ = partition; msg_consume_ = msg_consume; printf("partition=%d, topic=%s, brokers=%s\n", partition, topic, brokers); rd_kafka_conf_t *conf = rd_kafka_conf_new(); if (NULL == conf) return CONSUMER_INIT_FAILED; if (RD_KAFKA_CONF_OK != rd_kafka_conf_set(conf, "group.id", "one", err_str, sizeof(err_str))) return CONSUMER_INIT_FAILED; // rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0); if (!(rk_ = rd_kafka_new(RD_KAFKA_CONSUMER, conf, err_str, sizeof(err_str)))) { printf("%% Failed to create new consumer: %s\n",err_str); return CONSUMER_INIT_FAILED; } //rd_kafka_set_log_level(rk_, LOG_DEBUG); if (rd_kafka_brokers_add(rk_, brokers) == 0) { printf("%% No valid brokers specified\n"); return CONSUMER_INIT_FAILED; } rd_kafka_topic_conf_t *topic_conf = rd_kafka_topic_conf_new(); rkt_ = rd_kafka_topic_new(rk_, topic, topic_conf); if (NULL == rkt_) { printf("topic creat failed\n"); return CONSUMER_INIT_FAILED; } printf("rkt_=%p,partition=%d\n", rkt_, partition); if (rd_kafka_consume_start(this->rkt_, partition, RD_KAFKA_OFFSET_END) == -1){ printf("Failed to start consuming:"); return CONSUMER_INIT_FAILED; } return CONSUMER_INIT_SUCCESS; }
ngx_int_t ngx_http_kafka_init_worker(ngx_cycle_t *cycle) { size_t n; ngx_http_kafka_main_conf_t *main_conf; main_conf = ngx_http_cycle_get_module_main_conf(cycle, ngx_http_kafka_module); main_conf->rkc = rd_kafka_conf_new(); rd_kafka_conf_set_dr_cb(main_conf->rkc, kafka_callback_handler); main_conf->rk = rd_kafka_new(RD_KAFKA_PRODUCER, main_conf->rkc, NULL, 0); for (n = 0; n != main_conf->nbrokers; ++n) { ngx_str_helper(&main_conf->brokers[n], ngx_str_push); rd_kafka_brokers_add(main_conf->rk, (const char *)main_conf->brokers[n].data); ngx_str_helper(&main_conf->brokers[n], ngx_str_pop); } return 0; }
static VALUE kafka_init(VALUE self) { rd_kafka_conf_t *conf; char errstr[512]; if(rk) { kafka_destroy(); } conf = rd_kafka_conf_new(); rd_kafka_conf_set_error_cb(conf, error_cb); rd_kafka_conf_set_log_cb(conf, logger); rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) { rb_raise(rb_eStandardError, "failed to create kafka producer: %s\n", errstr); } return Qnil; }
RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (RdKafka::Conf *conf, std::string &errstr) { char errbuf[512]; RdKafka::ConfImpl *confimpl = dynamic_cast<RdKafka::ConfImpl *>(conf); RdKafka::KafkaConsumerImpl *rkc = new RdKafka::KafkaConsumerImpl(); rd_kafka_conf_t *rk_conf = NULL; size_t grlen; if (!confimpl->rk_conf_) { errstr = "Requires RdKafka::Conf::CONF_GLOBAL object"; delete rkc; return NULL; } if (rd_kafka_conf_get(confimpl->rk_conf_, "group.id", NULL, &grlen) != RD_KAFKA_CONF_OK || grlen <= 1 /* terminating null only */) { errstr = "\"group.id\" must be configured"; delete rkc; return NULL; } rkc->set_common_config(confimpl); rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_); rd_kafka_t *rk; if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, errbuf, sizeof(errbuf)))) { errstr = errbuf; delete rkc; return NULL; } rkc->rk_ = rk; /* Redirect handle queue to cgrp's queue to provide a single queue point */ rd_kafka_poll_set_consumer(rk); return rkc; }
static int Consumer_init (PyObject *selfobj, PyObject *args, PyObject *kwargs) { Handle *self = (Handle *)selfobj; char errstr[256]; rd_kafka_conf_t *conf; if (self->rk) { PyErr_SetString(PyExc_RuntimeError, "Consumer already initialized"); return -1; } self->type = RD_KAFKA_CONSUMER; if (!(conf = common_conf_setup(RD_KAFKA_CONSUMER, self, args, kwargs))) return -1; /* Exception raised by ..conf_setup() */ rd_kafka_conf_set_rebalance_cb(conf, Consumer_rebalance_cb); rd_kafka_conf_set_offset_commit_cb(conf, Consumer_offset_commit_cb); self->rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); if (!self->rk) { cfl_PyErr_Format(rd_kafka_last_error(), "Failed to create consumer: %s", errstr); rd_kafka_conf_destroy(conf); return -1; } /* Forward log messages to main queue which is then forwarded * to the consumer queue */ if (self->logger) rd_kafka_set_log_queue(self->rk, NULL); rd_kafka_poll_set_consumer(self->rk); self->u.Consumer.rkqu = rd_kafka_queue_get_consumer(self->rk); assert(self->u.Consumer.rkqu); return 0; }
//We're no longer relying on the global rk variable (not thread-safe) static void kafka_init( rd_kafka_type_t type ) { if (rk && type != rk_type) { rd_kafka_destroy(rk); rk = NULL; } if (rk == NULL) { char errstr[512]; rd_kafka_conf_t *conf = rd_kafka_conf_new(); if (!(rk = rd_kafka_new(type, conf, errstr, sizeof(errstr)))) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - failed to create new producer: %s", errstr); } exit(1); } /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "php kafka - No valid brokers specified"); } exit(1); } /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ if (type == RD_KAFKA_PRODUCER) rd_kafka_conf_set_dr_cb(conf, kafka_produce_cb_simple); rd_kafka_conf_set_error_cb(conf, kafka_err_cb); if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - using: %s", brokers); } } }
/** * @brief Verify that an unclean rd_kafka_destroy() does not hang. */ static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { rd_kafka_t *rk; char errstr[512]; rd_kafka_conf_t *conf; rd_kafka_queue_t *q; rd_kafka_NewTopic_t *topic; test_timing_t t_destroy; test_conf_init(&conf, NULL, 0); rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr)); TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr); TEST_SAY(_C_MAG "[ Test unclean destroy for %s using %s]\n", rd_kafka_name(rk), with_mainq ? "mainq" : "tempq"); if (with_mainq) q = rd_kafka_queue_get_main(rk); else q = rd_kafka_queue_new(rk); topic = rd_kafka_NewTopic_new(test_mk_topic_name(__FUNCTION__, 1), 3, 1, NULL, 0); rd_kafka_CreateTopics(rk, &topic, 1, NULL, q); rd_kafka_NewTopic_destroy(topic); rd_kafka_queue_destroy(q); TEST_SAY("Giving rd_kafka_destroy() 5s to finish, " "despite Admin API request being processed\n"); test_timeout_set(5); TIMING_START(&t_destroy, "rd_kafka_destroy()"); rd_kafka_destroy(rk); TIMING_STOP(&t_destroy); /* Restore timeout */ test_timeout_set(60);; }
/** * consumer_init_kafka * * Initialize the Kafka context and instantiate a consumer. * * @param config HermannInstanceConfig* pointer to the instance configuration for this producer or consumer */ void consumer_init_kafka(HermannInstanceConfig* config) { TRACER("configuring rd_kafka\n"); config->quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ config->conf = rd_kafka_conf_new(); /* Topic configuration */ config->topic_conf = rd_kafka_topic_conf_new(); /* Create Kafka handle */ if (!(config->rk = rd_kafka_new(RD_KAFKA_CONSUMER, config->conf, config->errstr, sizeof(config->errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", config->errstr); rb_raise(rb_eRuntimeError, "%% Failed to create new consumer: %s\n", config->errstr); } /* Set logger */ rd_kafka_set_logger(config->rk, logger); rd_kafka_set_log_level(config->rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(config->rk, config->brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); rb_raise(rb_eRuntimeError, "No valid brokers specified"); return; } /* Create topic */ config->rkt = rd_kafka_topic_new(config->rk, config->topic, config->topic_conf); /* We're now initialized */ config->isInitialized = 1; }
static void om_kafka_init(nx_module_t *module) { log_debug("Kafka module init entrypoint"); char errstr[512]; nx_om_kafka_conf_t* modconf; modconf = (nx_om_kafka_conf_t*) module->config; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; /* Kafka configuration */ conf = rd_kafka_conf_new(); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); rd_kafka_conf_set_dr_cb(conf, msg_delivered); if (rd_kafka_conf_set(conf, "compression.codec", modconf->compression, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { log_error("Unable to set compression codec %s", modconf->compression); } else { log_info("Kafka compression set to %s", modconf->compression); } if (!(modconf->rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { log_error("Failed to create new producer: %s\n", errstr); } if (rd_kafka_brokers_add(modconf->rk, modconf->brokerlist) == 0) { log_error("No valid brokers specified (%s)", modconf->brokerlist); } else { log_info("Kafka brokers set to %s", modconf->brokerlist); } modconf->rkt = rd_kafka_topic_new(modconf->rk, modconf->topic, topic_conf); modconf->kafka_conf = conf; modconf->topic_conf = topic_conf; }