/** * producer_init_kafka * * Initialize the producer instance, setting up the Kafka topic and context. * * @param self VALUE Instance of the Producer Ruby object * @param config HermannInstanceConfig* the instance configuration associated with this producer. */ void producer_init_kafka(VALUE self, HermannInstanceConfig* config) { TRACER("initing (%p)\n", config); config->quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ config->conf = rd_kafka_conf_new(); /* Add our `self` to the opaque pointer for error and logging callbacks */ rd_kafka_conf_set_opaque(config->conf, (void*)config); rd_kafka_conf_set_error_cb(config->conf, producer_error_callback); /* Topic configuration */ config->topic_conf = rd_kafka_topic_conf_new(); /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ rd_kafka_conf_set_dr_msg_cb(config->conf, msg_delivered); /* Create Kafka handle */ if (!(config->rk = rd_kafka_new(RD_KAFKA_PRODUCER, config->conf, config->errstr, sizeof(config->errstr)))) { /* TODO: Use proper logger */ fprintf(stderr, "%% Failed to create new producer: %s\n", config->errstr); rb_raise(rb_eRuntimeError, "%% Failed to create new producer: %s\n", config->errstr); } /* Set logger */ rd_kafka_set_logger(config->rk, logger); rd_kafka_set_log_level(config->rk, LOG_DEBUG); if (rd_kafka_brokers_add(config->rk, config->brokers) == 0) { /* TODO: Use proper logger */ fprintf(stderr, "%% No valid brokers specified\n"); rb_raise(rb_eRuntimeError, "No valid brokers specified"); return; } /* Create topic */ config->rkt = rd_kafka_topic_new(config->rk, config->topic, config->topic_conf); /* Set the partitioner callback */ rd_kafka_topic_conf_set_partitioner_cb( config->topic_conf, producer_partitioner_callback); /* We're now initialized */ config->isInitialized = 1; TRACER("completed kafka init\n"); }
int init_function(struct vmod_priv *priv, const struct VCL_conf *conf) { /* Kafka configuration */ conf = rd_kafka_conf_new(); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); return (0); }
void p_kafka_set_topic(struct p_kafka_host *kafka_host, char *topic) { if (kafka_host) { kafka_host->topic_cfg = rd_kafka_topic_conf_new(); /* destroy current allocation before making a new one */ if (kafka_host->topic) p_kafka_unset_topic(kafka_host); if (kafka_host->rk && kafka_host->topic_cfg) { kafka_host->topic = rd_kafka_topic_new(kafka_host->rk, topic, kafka_host->topic_cfg); kafka_host->topic_cfg = NULL; /* rd_kafka_topic_new() destroys conf as per rdkafka.h */ } } }
int producer_init(const int partition, const char* topic, const char* brokers, Msg_Delivered func_msg_delivered, wrapper_Info* producer_info) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; rd_kafka_t *rk; char errstr[512]; producer_info->partition = partition; strcpy(producer_info->topic, topic); if (NULL != func_msg_delivered) producer_info->func_msg_delivered = func_msg_delivered; else return PRODUCER_INIT_FAILED; /* Kafka configuration */ conf = rd_kafka_conf_new(); if (RD_KAFKA_CONF_OK != rd_kafka_conf_set(conf, "queue.buffering.max.messages", "500000", NULL, 0)) return PRODUCER_INIT_FAILED; /* Set logger */ rd_kafka_conf_set_log_cb(conf, logger); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); rd_kafka_conf_set_dr_cb(conf, func_msg_delivered); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); return PRODUCER_INIT_FAILED; } /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); return PRODUCER_INIT_FAILED; } /* Create topic */ producer_info->rkt = rd_kafka_topic_new(rk, topic, topic_conf); producer_info->rk = rk; return PRODUCER_INIT_SUCCESS; }
static VALUE kafka_send(VALUE self, VALUE topic_value, VALUE key, VALUE message) { rd_kafka_topic_conf_t *topic_conf = NULL; rd_kafka_topic_t *topic = NULL; char *topic_name = NULL; void *message_bytes = NULL; size_t message_len = 0; void *key_buf = NULL; size_t key_len = 0; int res = 0; if (!NIL_P(key)) { key_buf = RSTRING_PTR(key); key_len = RSTRING_LEN(key); } topic_name = StringValueCStr(topic_value); if (!topic_name) { rb_raise(rb_eStandardError, "topic is not a string!"); } if(!NIL_P(message)) { message_bytes = RSTRING_PTR(message); if(!message_bytes) { rb_raise(rb_eStandardError, "failed to get message ptr"); } message_len = RSTRING_LEN(message); } topic_conf = rd_kafka_topic_conf_new(); if(!topic_conf) { rb_raise(rb_eStandardError, "failed to create kafka topic configuration"); } topic = rd_kafka_topic_new(rk, topic_name, topic_conf); if(!topic) { rb_raise(rb_eStandardError, "failed to create topic"); } res = rd_kafka_produce(topic, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, message_bytes, message_len, key_buf, key_len, NULL); if (res) { rb_raise(rb_eStandardError, "rd_kafka_produce failed: %d", res); } return Qnil; }
/** * @brief setup_kafka initialises librdkafka based on the config * wrapped in kafka_t * @param k kafka configuration **/ int setup_kafka(kafka_t* k) { char* brokers = "localhost:9092"; char* zookeepers = NULL; char* topic = "bloh"; config* fk_conf = (config*) fuse_get_context()->private_data; if(fk_conf->zookeepers_n > 0) zookeepers = fk_conf->zookeepers[0]; if(fk_conf->brokers_n > 0) brokers = fk_conf->brokers[0]; topic = fk_conf->topic[0]; rd_kafka_topic_conf_t *topic_conf; rd_kafka_conf_t *conf; conf = rd_kafka_conf_new(); rd_kafka_conf_set_dr_cb(conf, msg_delivered); if(rd_kafka_conf_set(conf, "debug", "all", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || rd_kafka_conf_set(conf, "batch.num.messages", "1", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { printf("%% Debug configuration failed: %s: %s\n", errstr, "all"); return(1); } if (!(k->rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); return(1); } rd_kafka_set_logger(k->rk, logger); rd_kafka_set_log_level(k->rk, 7); if (zookeepers != NULL) { initialize_zookeeper(zookeepers, k); return 0; } else { if (rd_kafka_brokers_add(k->rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); return(1); } topic_conf = rd_kafka_topic_conf_new(); k->rkt = rd_kafka_topic_new(k->rk, topic, topic_conf); if(k->rkt == NULL) printf("topic %s creation failed\n", topic); return k->rkt == NULL; } }
/** * Creates and sets up kafka configuration objects. * Will read "test.conf" file if it exists. */ void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, int timeout) { char buf[512]; const char *test_conf = #ifndef _MSC_VER getenv("RDKAFKA_TEST_CONF") ? getenv("RDKAFKA_TEST_CONF") : #endif "test.conf"; if (conf) { #ifndef _MSC_VER char *tmp; #endif *conf = rd_kafka_conf_new(); rd_kafka_conf_set_error_cb(*conf, test_error_cb); rd_kafka_conf_set_stats_cb(*conf, test_stats_cb); #ifndef _MSC_VER if ((tmp = getenv("TEST_DEBUG")) && *tmp) test_conf_set(*conf, "debug", tmp); #endif #ifdef SIGIO /* Quick termination */ rd_snprintf(buf, sizeof(buf), "%i", SIGIO); rd_kafka_conf_set(*conf, "internal.termination.signal", buf, NULL, 0); signal(SIGIO, SIG_IGN); #endif } if (topic_conf) *topic_conf = rd_kafka_topic_conf_new(); /* Open and read optional local test configuration file, if any. */ test_read_conf_file(test_conf, conf ? *conf : NULL, topic_conf ? *topic_conf : NULL, &timeout); if (timeout) test_timeout_set(timeout); }
int kafka_consumer_::Init(const int partition, const char* topic, const char* brokers, MsgConsume msg_consume) { char err_str[512]; partition_ = partition; msg_consume_ = msg_consume; printf("partition=%d, topic=%s, brokers=%s\n", partition, topic, brokers); rd_kafka_conf_t *conf = rd_kafka_conf_new(); if (NULL == conf) return CONSUMER_INIT_FAILED; if (RD_KAFKA_CONF_OK != rd_kafka_conf_set(conf, "group.id", "one", err_str, sizeof(err_str))) return CONSUMER_INIT_FAILED; // rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0); if (!(rk_ = rd_kafka_new(RD_KAFKA_CONSUMER, conf, err_str, sizeof(err_str)))) { printf("%% Failed to create new consumer: %s\n",err_str); return CONSUMER_INIT_FAILED; } //rd_kafka_set_log_level(rk_, LOG_DEBUG); if (rd_kafka_brokers_add(rk_, brokers) == 0) { printf("%% No valid brokers specified\n"); return CONSUMER_INIT_FAILED; } rd_kafka_topic_conf_t *topic_conf = rd_kafka_topic_conf_new(); rkt_ = rd_kafka_topic_new(rk_, topic, topic_conf); if (NULL == rkt_) { printf("topic creat failed\n"); return CONSUMER_INIT_FAILED; } printf("rkt_=%p,partition=%d\n", rkt_, partition); if (rd_kafka_consume_start(this->rkt_, partition, RD_KAFKA_OFFSET_END) == -1){ printf("Failed to start consuming:"); return CONSUMER_INIT_FAILED; } return CONSUMER_INIT_SUCCESS; }
char *ngx_http_set_kafka_topic(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { ngx_http_core_loc_conf_t *clcf; ngx_http_kafka_loc_conf_t *local_conf; /* install ngx_http_kafka_handler */ clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module); if (clcf == NULL) { return NGX_CONF_ERROR; } clcf->handler = ngx_http_kafka_handler; /* ngx_http_kafka_loc_conf_t::topic assignment */ if (ngx_conf_set_str_slot(cf, cmd, conf) != NGX_CONF_OK) { return NGX_CONF_ERROR; } local_conf = conf; local_conf->rktc = rd_kafka_topic_conf_new(); return NGX_CONF_OK; }
static void om_kafka_init(nx_module_t *module) { log_debug("Kafka module init entrypoint"); char errstr[512]; nx_om_kafka_conf_t* modconf; modconf = (nx_om_kafka_conf_t*) module->config; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; /* Kafka configuration */ conf = rd_kafka_conf_new(); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); rd_kafka_conf_set_dr_cb(conf, msg_delivered); if (rd_kafka_conf_set(conf, "compression.codec", modconf->compression, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { log_error("Unable to set compression codec %s", modconf->compression); } else { log_info("Kafka compression set to %s", modconf->compression); } if (!(modconf->rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { log_error("Failed to create new producer: %s\n", errstr); } if (rd_kafka_brokers_add(modconf->rk, modconf->brokerlist) == 0) { log_error("No valid brokers specified (%s)", modconf->brokerlist); } else { log_info("Kafka brokers set to %s", modconf->brokerlist); } modconf->rkt = rd_kafka_topic_new(modconf->rk, modconf->topic, topic_conf); modconf->kafka_conf = conf; modconf->topic_conf = topic_conf; }
/** * consumer_init_kafka * * Initialize the Kafka context and instantiate a consumer. * * @param config HermannInstanceConfig* pointer to the instance configuration for this producer or consumer */ void consumer_init_kafka(HermannInstanceConfig* config) { TRACER("configuring rd_kafka\n"); config->quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ config->conf = rd_kafka_conf_new(); /* Topic configuration */ config->topic_conf = rd_kafka_topic_conf_new(); /* Create Kafka handle */ if (!(config->rk = rd_kafka_new(RD_KAFKA_CONSUMER, config->conf, config->errstr, sizeof(config->errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", config->errstr); rb_raise(rb_eRuntimeError, "%% Failed to create new consumer: %s\n", config->errstr); } /* Set logger */ rd_kafka_set_logger(config->rk, logger); rd_kafka_set_log_level(config->rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(config->rk, config->brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); rb_raise(rb_eRuntimeError, "No valid brokers specified"); return; } /* Create topic */ config->rkt = rd_kafka_topic_new(config->rk, config->topic, config->topic_conf); /* We're now initialized */ config->isInitialized = 1; }
/** * Creates and sets up kafka configuration objects. * Will read "test.conf" file if it exists. */ void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, int timeout) { FILE *fp; char buf[512]; int line = 0; const char *test_conf = #ifndef _MSC_VER getenv("RDKAFKA_TEST_CONF") ? getenv("RDKAFKA_TEST_CONF") : #endif "test.conf"; char errstr[512]; test_init(); if (conf) { *conf = rd_kafka_conf_new(); rd_kafka_conf_set_error_cb(*conf, test_error_cb); #ifdef SIGIO /* Quick termination */ rd_snprintf(buf, sizeof(buf), "%i", SIGIO); rd_kafka_conf_set(*conf, "internal.termination.signal", buf, NULL, 0); signal(SIGIO, SIG_IGN); #endif } if (topic_conf) *topic_conf = rd_kafka_topic_conf_new(); /* Open and read optional local test configuration file, if any. */ #ifndef _MSC_VER fp = fopen(test_conf, "r"); #else fp = NULL; errno = fopen_s(&fp, test_conf, "r"); #endif if (!fp) { if (errno == ENOENT) TEST_FAIL("%s not found\n", test_conf); else TEST_FAIL("Failed to read %s: errno %i", test_conf, errno); } while (fgets(buf, sizeof(buf)-1, fp)) { char *t; char *b = buf; rd_kafka_conf_res_t res = RD_KAFKA_CONF_UNKNOWN; char *name, *val; line++; if ((t = strchr(b, '\n'))) *t = '\0'; if (*b == '#' || !*b) continue; if (!(t = strchr(b, '='))) TEST_FAIL("%s:%i: expected name=value format\n", test_conf, line); name = b; *t = '\0'; val = t+1; if (!strcmp(name, "test.timeout.multiplier")) { timeout = (int)((float)timeout * strtod(val, NULL)); res = RD_KAFKA_CONF_OK; } else if (!strcmp(name, "test.topic.prefix")) { rd_snprintf(test_topic_prefix, sizeof(test_topic_prefix), "%s", val); res = RD_KAFKA_CONF_OK; } else if (!strcmp(name, "test.topic.random")) { if (!strcmp(val, "true") || !strcmp(val, "1")) test_topic_random = 1; else test_topic_random = 0; res = RD_KAFKA_CONF_OK; } else if (!strncmp(name, "topic.", strlen("topic."))) { name += strlen("topic."); if (topic_conf) res = rd_kafka_topic_conf_set(*topic_conf, name, val, errstr, sizeof(errstr)); else res = RD_KAFKA_CONF_OK; name -= strlen("topic."); } if (res == RD_KAFKA_CONF_UNKNOWN) { if (conf) res = rd_kafka_conf_set(*conf, name, val, errstr, sizeof(errstr)); else res = RD_KAFKA_CONF_OK; } if (res != RD_KAFKA_CONF_OK) TEST_FAIL("%s:%i: %s\n", test_conf, line, errstr); } fclose(fp); if (timeout) { /* Limit the test run time. */ #ifndef _MSC_VER alarm(timeout); signal(SIGALRM, sig_alarm); #endif } }
static PyObject * RdkHandle_configure(RdkHandle *self, PyObject *args, PyObject *kwds) { char *keywords[] = {"conf", "topic_conf", NULL}; PyObject *conf = NULL; PyObject *topic_conf = NULL; if (! PyArg_ParseTupleAndKeywords(args, kwds, "|OO", keywords, &conf, &topic_conf)) { return NULL; } if (RdkHandle_safe_lock(self, /* check_running= */ 0)) return NULL; if ((conf && topic_conf) || (!conf && !topic_conf)) { return set_pykafka_error( "RdKafkaException", "You need to specify *either* `conf` *or* `topic_conf`."); } if (self->rdk_handle) { return set_pykafka_error( "RdKafkaException", "Cannot configure: seems instance was started already?"); } Py_BEGIN_ALLOW_THREADS /* avoid callbacks deadlocking */ if (! self->rdk_conf) { self->rdk_conf = rd_kafka_conf_new(); rd_kafka_conf_set_log_cb(self->rdk_conf, logging_callback); } if (! self->rdk_topic_conf) { self->rdk_topic_conf = rd_kafka_topic_conf_new(); } Py_END_ALLOW_THREADS PyObject *retval = Py_None; PyObject *conf_or_topic_conf = topic_conf ? topic_conf : conf; Py_ssize_t i, len = PyList_Size(conf_or_topic_conf); for (i = 0; i != len; ++i) { PyObject *conf_pair = PyList_GetItem(conf_or_topic_conf, i); const char *name = NULL; const char *value = NULL; if (! PyArg_ParseTuple(conf_pair, "ss", &name, &value)) { retval = NULL; break; } char errstr[512]; rd_kafka_conf_res_t res; Py_BEGIN_ALLOW_THREADS /* avoid callbacks deadlocking */ if (topic_conf) { res = rd_kafka_topic_conf_set( self->rdk_topic_conf, name, value, errstr, sizeof(errstr)); } else { res = rd_kafka_conf_set( self->rdk_conf, name, value, errstr, sizeof(errstr)); } Py_END_ALLOW_THREADS if (res != RD_KAFKA_CONF_OK) { retval = set_pykafka_error("RdKafkaException", errstr); break; } } if (RdkHandle_unlock(self)) return NULL; Py_XINCREF(retval); return retval; }
int main (int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_errno2err(EINVAL); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_conf_set_dr_cb(NULL, NULL); rd_kafka_conf_set_dr_msg_cb(NULL, NULL); rd_kafka_conf_set_error_cb(NULL, NULL); rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_log_cb(NULL, NULL); rd_kafka_conf_set_socket_cb(NULL, NULL); rd_kafka_conf_set_open_cb(NULL, NULL); rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_opaque(NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); rd_kafka_conf_dump_free(NULL, 0); rd_kafka_conf_properties_show(NULL); rd_kafka_topic_conf_new(); rd_kafka_topic_conf_dup(NULL); rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); rd_kafka_name(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); rd_kafka_consume_batch(NULL, 0, 0, NULL, 0); rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL); rd_kafka_offset_store(NULL, 0, 0); rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL); rd_kafka_poll(NULL, 0); rd_kafka_brokers_add(NULL, NULL); rd_kafka_set_logger(NULL, NULL); rd_kafka_set_log_level(NULL, 0); rd_kafka_log_print(NULL, 0, NULL, NULL); rd_kafka_log_syslog(NULL, 0, NULL, NULL); rd_kafka_outq_len(NULL); rd_kafka_dump(NULL, NULL); rd_kafka_thread_cnt(); rd_kafka_wait_destroyed(0); rd_kafka_metadata(NULL, 0, NULL, NULL, 0); rd_kafka_metadata_destroy(NULL); } return 0; }
static void kafka_config_topic(rd_kafka_conf_t *conf, oconfig_item_t *ci) /* {{{ */ { int status; int i; struct kafka_topic_context *tctx; char *key; char *val; char callback_name[DATA_MAX_NAME_LEN]; char errbuf[1024]; user_data_t ud; oconfig_item_t *child; rd_kafka_conf_res_t ret; if ((tctx = calloc(1, sizeof (*tctx))) == NULL) { ERROR ("write_kafka plugin: calloc failed."); return; } tctx->escape_char = '.'; tctx->store_rates = 1; rd_kafka_conf_set_log_cb(conf, kafka_log); if ((tctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errbuf, sizeof(errbuf))) == NULL) { sfree(tctx); ERROR("write_kafka plugin: cannot create kafka handle."); return; } conf = NULL; if ((tctx->conf = rd_kafka_topic_conf_new()) == NULL) { rd_kafka_destroy(tctx->kafka); sfree(tctx); ERROR ("write_kafka plugin: cannot create topic configuration."); return; } if (ci->values_num != 1) { WARNING("kafka topic name needed."); goto errout; } if (ci->values[0].type != OCONFIG_TYPE_STRING) { WARNING("kafka topic needs a string argument."); goto errout; } if ((tctx->topic_name = strdup(ci->values[0].value.string)) == NULL) { ERROR("write_kafka plugin: cannot copy topic name."); goto errout; } for (i = 0; i < ci->children_num; i++) { /* * The code here could be simplified but makes room * for easy adding of new options later on. */ child = &ci->children[i]; status = 0; if (strcasecmp ("Property", child->key) == 0) { if (child->values_num != 2) { WARNING("kafka properties need both a key and a value."); goto errout; } if (child->values[0].type != OCONFIG_TYPE_STRING || child->values[1].type != OCONFIG_TYPE_STRING) { WARNING("kafka properties needs string arguments."); goto errout; } key = child->values[0].value.string; val = child->values[0].value.string; ret = rd_kafka_topic_conf_set(tctx->conf,key, val, errbuf, sizeof(errbuf)); if (ret != RD_KAFKA_CONF_OK) { WARNING("cannot set kafka topic property %s to %s: %s.", key, val, errbuf); goto errout; } } else if (strcasecmp ("Key", child->key) == 0) { char *tmp_buf = NULL; status = cf_util_get_string(child, &tmp_buf); if (status != 0) { WARNING("write_kafka plugin: invalid key supplied"); break; } if (strcasecmp(tmp_buf, "Random") != 0) { tctx->has_key = 1; tctx->key = crc32_buffer((u_char *)tmp_buf, strlen(tmp_buf)); } sfree(tmp_buf); } else if (strcasecmp ("Format", child->key) == 0) { status = cf_util_get_string(child, &key); if (status != 0) goto errout; assert(key != NULL); if (strcasecmp(key, "Command") == 0) { tctx->format = KAFKA_FORMAT_COMMAND; } else if (strcasecmp(key, "Graphite") == 0) { tctx->format = KAFKA_FORMAT_GRAPHITE; } else if (strcasecmp(key, "Json") == 0) { tctx->format = KAFKA_FORMAT_JSON; } else { WARNING ("write_kafka plugin: Invalid format string: %s", key); } sfree(key); } else if (strcasecmp ("StoreRates", child->key) == 0) { status = cf_util_get_boolean (child, &tctx->store_rates); (void) cf_util_get_flag (child, &tctx->graphite_flags, GRAPHITE_STORE_RATES); } else if (strcasecmp ("GraphiteSeparateInstances", child->key) == 0) { status = cf_util_get_flag (child, &tctx->graphite_flags, GRAPHITE_SEPARATE_INSTANCES); } else if (strcasecmp ("GraphiteAlwaysAppendDS", child->key) == 0) { status = cf_util_get_flag (child, &tctx->graphite_flags, GRAPHITE_ALWAYS_APPEND_DS); } else if (strcasecmp ("GraphitePrefix", child->key) == 0) { status = cf_util_get_string (child, &tctx->prefix); } else if (strcasecmp ("GraphitePostfix", child->key) == 0) { status = cf_util_get_string (child, &tctx->postfix); } else if (strcasecmp ("GraphiteEscapeChar", child->key) == 0) { char *tmp_buff = NULL; status = cf_util_get_string (child, &tmp_buff); if (strlen (tmp_buff) > 1) WARNING ("write_kafka plugin: The option \"GraphiteEscapeChar\" handles " "only one character. Others will be ignored."); tctx->escape_char = tmp_buff[0]; sfree (tmp_buff); } else { WARNING ("write_kafka plugin: Invalid directive: %s.", child->key); } if (status != 0) break; } rd_kafka_topic_conf_set_partitioner_cb(tctx->conf, kafka_partition); rd_kafka_topic_conf_set_opaque(tctx->conf, tctx); if ((tctx->topic = rd_kafka_topic_new(tctx->kafka, tctx->topic_name, tctx->conf)) == NULL) { ERROR("write_kafka plugin: cannot create topic."); goto errout; } tctx->conf = NULL; ssnprintf(callback_name, sizeof(callback_name), "write_kafka/%s", tctx->topic_name); ud.data = tctx; ud.free_func = kafka_topic_context_free; status = plugin_register_write (callback_name, kafka_write, &ud); if (status != 0) { WARNING ("write_kafka plugin: plugin_register_write (\"%s\") " "failed with status %i.", callback_name, status); goto errout; } return; errout: if (conf != NULL) rd_kafka_conf_destroy(conf); if (tctx->kafka != NULL) rd_kafka_destroy(tctx->kafka); if (tctx->topic != NULL) rd_kafka_topic_destroy(tctx->topic); if (tctx->topic_name != NULL) free(tctx->topic_name); if (tctx->conf != NULL) rd_kafka_topic_conf_destroy(tctx->conf); sfree(tctx); } /* }}} int kafka_config_topic */
ECL_KAFKA_API __int32 ECL_KAFKA_CALL getTopicPartitionCount(ICodeContext* ctx, const char* brokers, const char* topic) { // We have to use librdkafka's C API for this right now, as the C++ API // does not expose a topic's metadata. In addition, there is no easy // link between the exposed C++ objects and the structs used by the // C API, so we are basically creating a brand-new connection from // scratch. __int32 pCount = 0; char errstr[512]; rd_kafka_conf_t* conf = rd_kafka_conf_new(); rd_kafka_t* rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); if (rk) { if (rd_kafka_brokers_add(rk, brokers) != 0) { rd_kafka_topic_conf_t* topic_conf = rd_kafka_topic_conf_new(); rd_kafka_topic_t* rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (rkt) { const struct rd_kafka_metadata* metadata = NULL; rd_kafka_resp_err_t err = rd_kafka_metadata(rk, 0, rkt, &metadata, 5000); if (err == RD_KAFKA_RESP_ERR_NO_ERROR) { pCount = metadata->topics[0].partition_cnt; rd_kafka_metadata_destroy(metadata); } else { if (ctx->queryContextLogger().queryTraceLevel() > 4) { DBGLOG("Kafka: Error retrieving metadata from topic: %s @ %s: '%s'", topic, brokers, rd_kafka_err2str(err)); } } rd_kafka_topic_destroy(rkt); } else { if (ctx->queryContextLogger().queryTraceLevel() > 4) { DBGLOG("Kafka: Could not create topic object: %s @ %s", topic, brokers); } } } else { if (ctx->queryContextLogger().queryTraceLevel() > 4) { DBGLOG("Kafka: Could not add brokers: %s @ %s", topic, brokers); } } rd_kafka_destroy(rk); } if (pCount == 0) { DBGLOG("Kafka: Unable to retrieve partition count from topic: %s @ %s", topic, brokers); } return pCount; }
int main (int argc, char **argv) { char *brokers = "localhost"; char mode = 'C'; char *topic = NULL; const char *key = NULL; int partition = RD_KAFKA_PARTITION_UA; /* random */ int opt; int msgcnt = -1; int sendflags = 0; char *msgpattern = "librdkafka_performance testing!"; int msgsize = strlen(msgpattern); const char *debug = NULL; rd_ts_t now; char errstr[512]; uint64_t seq = 0; int seed = time(NULL); rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; const char *compression = "no"; int64_t start_offset = 0; int batch_size = 0; /* Kafka configuration */ conf = rd_kafka_conf_new(); rd_kafka_conf_set_error_cb(conf, err_cb); rd_kafka_conf_set_dr_cb(conf, msg_delivered); /* Producer config */ rd_kafka_conf_set(conf, "queue.buffering.max.messages", "500000", NULL, 0); rd_kafka_conf_set(conf, "message.send.max.retries", "3", NULL, 0); rd_kafka_conf_set(conf, "retry.backoff.ms", "500", NULL, 0); /* Consumer config */ /* Tell rdkafka to (try to) maintain 1M messages * in its internal receive buffers. This is to avoid * application -> rdkafka -> broker per-message ping-pong * latency. * The larger the local queue, the higher the performance. * Try other values with: ... -X queued.min.messages=1000 */ rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0); /* Kafka topic configuration */ topic_conf = rd_kafka_topic_conf_new(); rd_kafka_topic_conf_set(topic_conf, "message.timeout.ms", "5000", NULL, 0); while ((opt = getopt(argc, argv, "PCt:p:b:s:k:c:fi:Dd:m:S:x:R:a:z:o:X:B:eT:q")) != -1) { switch (opt) { case 'P': case 'C': mode = opt; break; case 't': topic = optarg; break; case 'p': partition = atoi(optarg); break; case 'b': brokers = optarg; break; case 's': msgsize = atoi(optarg); break; case 'k': key = optarg; break; case 'c': msgcnt = atoi(optarg); break; case 'D': sendflags |= RD_KAFKA_MSG_F_FREE; break; case 'i': dispintvl = atoi(optarg); break; case 'm': msgpattern = optarg; break; case 'S': seq = strtoull(optarg, NULL, 10); do_seq = 1; break; case 'x': exit_after = atoi(optarg); break; case 'R': seed = atoi(optarg); break; case 'a': if (rd_kafka_topic_conf_set(topic_conf, "request.required.acks", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } break; case 'B': batch_size = atoi(optarg); break; case 'z': if (rd_kafka_conf_set(conf, "compression.codec", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } compression = optarg; break; case 'o': start_offset = strtoll(optarg, NULL, 10); break; case 'e': exit_eof = 1; break; case 'd': debug = optarg; break; case 'X': { char *name, *val; rd_kafka_conf_res_t res; if (!strcmp(optarg, "list") || !strcmp(optarg, "help")) { rd_kafka_conf_properties_show(stdout); exit(0); } name = optarg; if (!(val = strchr(name, '='))) { fprintf(stderr, "%% Expected " "-X property=value, not %s\n", name); exit(1); } *val = '\0'; val++; res = RD_KAFKA_CONF_UNKNOWN; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ if (!strncmp(name, "topic.", strlen("topic."))) res = rd_kafka_topic_conf_set(topic_conf, name+ strlen("topic"), val, errstr, sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN) res = rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } } break; case 'T': if (rd_kafka_conf_set(conf, "statistics.interval.ms", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } rd_kafka_conf_set_stats_cb(conf, stats_cb); break; case 'q': quiet = 1; break; default: goto usage; } } if (!topic || optind != argc) { usage: fprintf(stderr, "Usage: %s [-C|-P] -t <topic> " "[-p <partition>] [-b <broker,broker..>] [options..]\n" "\n" " Options:\n" " -C | -P Consumer or Producer mode\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (defaults to random)\n" " -b <brokers> Broker address list (host[:port],..)\n" " -s <size> Message size (producer)\n" " -k <key> Message key (producer)\n" " -c <cnt> Messages to transmit/receive\n" " -D Copy/Duplicate data buffer (producer)\n" " -i <ms> Display interval\n" " -m <msg> Message payload pattern\n" " -S <start> Send a sequence number starting at " "<start> as payload\n" " -R <seed> Random seed value (defaults to time)\n" " -a <acks> Required acks (producer): " "-1, 0, 1, >1\n" " -B <size> Consume batch size (# of msgs)\n" " -z <codec> Enable compression:\n" " none|gzip|snappy\n" " -o <offset> Start offset (consumer)\n" " -d [facs..] Enable debugging contexts:\n" " %s\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" " -T <intvl> Enable statistics from librdkafka at " "specified interval (ms)\n" " -q Be more quiet\n" "\n" " In Consumer mode:\n" " consumes messages and prints thruput\n" " If -B <..> is supplied the batch consumer\n" " mode is used, else the callback mode is used.\n" "\n" " In Producer mode:\n" " writes messages of size -s <..> and prints thruput\n" "\n", argv[0], RD_KAFKA_DEBUG_CONTEXTS); exit(1); } dispintvl *= 1000; /* us */ printf("%% Using random seed %i\n", seed); srand(seed); signal(SIGINT, stop); signal(SIGUSR1, sig_usr1); if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { printf("%% Debug configuration failed: %s: %s\n", errstr, debug); exit(1); } /* Socket hangups are gracefully handled in librdkafka on socket error * without the use of signals, so SIGPIPE should be ignored by the * calling program. */ signal(SIGPIPE, SIG_IGN); if (msgcnt != -1) forever = 0; if (mode == 'P') { /* * Producer */ char *sbuf; char *pbuf; int outq; int i; int keylen = key ? strlen(key) : 0; off_t rof = 0; size_t plen = strlen(msgpattern); if (do_seq) { if (msgsize < strlen("18446744073709551615: ")+1) msgsize = strlen("18446744073709551615: ")+1; /* Force duplication of payload */ sendflags |= RD_KAFKA_MSG_F_FREE; } sbuf = malloc(msgsize); /* Copy payload content to new buffer */ while (rof < msgsize) { size_t xlen = RD_MIN(msgsize-rof, plen); memcpy(sbuf+rof, msgpattern, xlen); rof += xlen; } if (msgcnt == -1) printf("%% Sending messages of size %i bytes\n", msgsize); else printf("%% Sending %i messages of size %i bytes\n", msgcnt, msgsize); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create Kafka producer: %s\n", errstr); exit(1); } if (debug) rd_kafka_set_log_level(rk, 7); /* Add broker(s) */ if (rd_kafka_brokers_add(rk, brokers) < 1) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Explicitly create topic to avoid per-msg lookups. */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); cnt.t_start = rd_clock(); while (run && (msgcnt == -1 || cnt.msgs < msgcnt)) { /* Send/Produce message. */ if (do_seq) { snprintf(sbuf, msgsize-1, "%"PRIu64": ", seq); seq++; } if (sendflags & RD_KAFKA_MSG_F_FREE) { /* Duplicate memory */ pbuf = malloc(msgsize); memcpy(pbuf, sbuf, msgsize); } else pbuf = sbuf; cnt.tx++; while (run && rd_kafka_produce(rkt, partition, sendflags, pbuf, msgsize, key, keylen, NULL) == -1) { if (!quiet || errno != ENOBUFS) printf("produce error: %s%s\n", strerror(errno), errno == ENOBUFS ? " (backpressure)":""); cnt.tx_err++; if (errno != ENOBUFS) { run = 0; break; } now = rd_clock(); if (cnt.t_last + dispintvl <= now) { printf("%% Backpressure %i " "(tx %"PRIu64", " "txerr %"PRIu64")\n", rd_kafka_outq_len(rk), cnt.tx, cnt.tx_err); cnt.t_last = now; } /* Poll to handle delivery reports */ rd_kafka_poll(rk, 10); } msgs_wait_cnt++; cnt.msgs++; cnt.bytes += msgsize; print_stats(mode, 0, compression); /* Must poll to handle delivery reports */ rd_kafka_poll(rk, 0); } forever = 0; printf("All messages produced, " "now waiting for %li deliveries\n", msgs_wait_cnt); rd_kafka_dump(stdout, rk); /* Wait for messages to be delivered */ i = 0; while (run && rd_kafka_poll(rk, 1000) != -1) { if (!(i++ % (dispintvl/1000))) printf("%% Waiting for %li, " "%i messages in outq " "to be sent. Abort with Ctrl-c\n", msgs_wait_cnt, rd_kafka_outq_len(rk)); } outq = rd_kafka_outq_len(rk); printf("%% %i messages in outq\n", outq); cnt.msgs -= outq; cnt.bytes -= msgsize * outq; cnt.t_end = t_end; if (cnt.tx_err > 0) printf("%% %"PRIu64" backpressures for %"PRIu64 " produce calls: %.3f%% backpressure rate\n", cnt.tx_err, cnt.tx, ((double)cnt.tx_err / (double)cnt.tx) * 100.0); rd_kafka_dump(stdout, rk); /* Destroy the handle */ rd_kafka_destroy(rk); } else if (mode == 'C') { /* * Consumer */ rd_kafka_message_t **rkmessages = NULL; #if 0 /* Future API */ /* The offset storage file is optional but its presence * avoids starting all over from offset 0 again when * the program restarts. * ZooKeeper functionality will be implemented in future * versions and then the offset will be stored there instead. */ conf.consumer.offset_file = "."; /* current directory */ /* Indicate to rdkafka that the application is responsible * for storing the offset. This allows the application to * successfully handle a message before storing the offset. * If this flag is not set rdkafka will store the offset * just prior to returning the message from rd_kafka_consume(). */ conf.flags |= RD_KAFKA_CONF_F_APP_OFFSET_STORE; #endif /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create Kafka producer: %s\n", errstr); exit(1); } if (debug) rd_kafka_set_log_level(rk, 7); /* Add broker(s) */ if (rd_kafka_brokers_add(rk, brokers) < 1) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic to consume from */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); /* Batch consumer */ if (batch_size) rkmessages = malloc(sizeof(*rkmessages) * batch_size); /* Start consuming */ if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){ fprintf(stderr, "%% Failed to start consuming: %s\n", strerror(errno)); exit(1); } cnt.t_start = rd_clock(); while (run && (msgcnt == -1 || msgcnt > cnt.msgs)) { /* Consume messages. * A message may either be a real message, or * an error signaling (if rkmessage->err is set). */ uint64_t latency; int r; latency = rd_clock(); if (batch_size) { int i; /* Batch fetch mode */ r = rd_kafka_consume_batch(rkt, partition, 1000, rkmessages, batch_size); if (r != -1) { for (i = 0 ; i < r ; i++) { msg_consume(rkmessages[i],NULL); rd_kafka_message_destroy( rkmessages[i]); } } } else { /* Callback mode */ r = rd_kafka_consume_callback(rkt, partition, 1000/*timeout*/, msg_consume, NULL); } cnt.t_latency += rd_clock() - latency; if (r == -1) fprintf(stderr, "%% Error: %s\n", strerror(errno)); print_stats(mode, 0, compression); /* Poll to handle stats callbacks */ rd_kafka_poll(rk, 0); } cnt.t_end = rd_clock(); /* Stop consuming */ rd_kafka_consume_stop(rkt, partition); /* Destroy topic */ rd_kafka_topic_destroy(rkt); if (batch_size) free(rkmessages); /* Destroy the handle */ rd_kafka_destroy(rk); } print_stats(mode, 1, compression); if (cnt.t_latency && cnt.msgs) printf("%% Average application fetch latency: %"PRIu64"us\n", cnt.t_latency / cnt.msgs); /* Let background threads clean up and terminate cleanly. */ rd_kafka_wait_destroyed(2000); return 0; }
/* * kafka_consume_main * * Main function for Kafka consumers running as background workers */ void kafka_consume_main(Datum arg) { char err_msg[512]; rd_kafka_topic_conf_t *topic_conf; rd_kafka_t *kafka; rd_kafka_topic_t *topic; rd_kafka_message_t **messages; const struct rd_kafka_metadata *meta; struct rd_kafka_metadata_topic topic_meta; rd_kafka_resp_err_t err; bool found; Oid id = (Oid) arg; ListCell *lc; KafkaConsumerProc *proc = hash_search(consumer_procs, &id, HASH_FIND, &found); KafkaConsumer consumer; CopyStmt *copy; int valid_brokers = 0; int i; int my_partitions = 0; if (!found) elog(ERROR, "kafka consumer %d not found", id); pqsignal(SIGTERM, kafka_consume_main_sigterm); #define BACKTRACE_SEGFAULTS #ifdef BACKTRACE_SEGFAULTS pqsignal(SIGSEGV, debug_segfault); #endif /* we're now ready to receive signals */ BackgroundWorkerUnblockSignals(); /* give this proc access to the database */ BackgroundWorkerInitializeConnection(NameStr(proc->dbname), NULL); /* load saved consumer state */ StartTransactionCommand(); load_consumer_state(proc->consumer_id, &consumer); copy = get_copy_statement(&consumer); topic_conf = rd_kafka_topic_conf_new(); kafka = rd_kafka_new(RD_KAFKA_CONSUMER, NULL, err_msg, sizeof(err_msg)); rd_kafka_set_logger(kafka, logger); /* * Add all brokers currently in pipeline_kafka_brokers */ if (consumer.brokers == NIL) elog(ERROR, "no valid brokers were found"); foreach(lc, consumer.brokers) valid_brokers += rd_kafka_brokers_add(kafka, lfirst(lc)); if (!valid_brokers) elog(ERROR, "no valid brokers were found"); /* * Set up our topic to read from */ topic = rd_kafka_topic_new(kafka, consumer.topic, topic_conf); err = rd_kafka_metadata(kafka, false, topic, &meta, CONSUMER_TIMEOUT); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) elog(ERROR, "failed to acquire metadata: %s", rd_kafka_err2str(err)); Assert(meta->topic_cnt == 1); topic_meta = meta->topics[0]; load_consumer_offsets(&consumer, &topic_meta, proc->offset); CommitTransactionCommand(); /* * Begin consuming all partitions that this process is responsible for */ for (i = 0; i < topic_meta.partition_cnt; i++) { int partition = topic_meta.partitions[i].id; Assert(partition <= consumer.num_partitions); if (partition % consumer.parallelism != proc->partition_group) continue; elog(LOG, "[kafka consumer] %s <- %s consuming partition %d from offset %ld", consumer.rel->relname, consumer.topic, partition, consumer.offsets[partition]); if (rd_kafka_consume_start(topic, partition, consumer.offsets[partition]) == -1) elog(ERROR, "failed to start consuming: %s", rd_kafka_err2str(rd_kafka_errno2err(errno))); my_partitions++; } /* * No point doing anything if we don't have any partitions assigned to us */ if (my_partitions == 0) { elog(LOG, "[kafka consumer] %s <- %s consumer %d doesn't have any partitions to read from", consumer.rel->relname, consumer.topic, MyProcPid); goto done; } messages = palloc0(sizeof(rd_kafka_message_t) * consumer.batch_size); /* * Consume messages until we are terminated */ while (!got_sigterm) { ssize_t num_consumed; int i; int messages_buffered = 0; int partition; StringInfoData buf; bool xact = false; for (partition = 0; partition < consumer.num_partitions; partition++) { if (partition % consumer.parallelism != proc->partition_group) continue; num_consumed = rd_kafka_consume_batch(topic, partition, CONSUMER_TIMEOUT, messages, consumer.batch_size); if (num_consumed <= 0) continue; if (!xact) { StartTransactionCommand(); xact = true; } initStringInfo(&buf); for (i = 0; i < num_consumed; i++) { if (messages[i]->payload != NULL) { appendBinaryStringInfo(&buf, messages[i]->payload, messages[i]->len); if (buf.len > 0 && buf.data[buf.len - 1] != '\n') appendStringInfoChar(&buf, '\n'); messages_buffered++; } consumer.offsets[partition] = messages[i]->offset; rd_kafka_message_destroy(messages[i]); } } if (!xact) { pg_usleep(1 * 1000); continue; } /* we don't want to die in the event of any errors */ PG_TRY(); { if (messages_buffered) execute_copy(copy, &buf); } PG_CATCH(); { elog(LOG, "[kafka consumer] %s <- %s failed to process batch, dropped %d message%s:", consumer.rel->relname, consumer.topic, (int) num_consumed, (num_consumed == 1 ? "" : "s")); EmitErrorReport(); FlushErrorState(); AbortCurrentTransaction(); xact = false; } PG_END_TRY(); if (!xact) StartTransactionCommand(); if (messages_buffered) save_consumer_state(&consumer, proc->partition_group); CommitTransactionCommand(); } done: hash_search(consumer_procs, &id, HASH_REMOVE, NULL); rd_kafka_topic_destroy(topic); rd_kafka_destroy(kafka); rd_kafka_wait_destroyed(CONSUMER_TIMEOUT); }
int main_0004_conf (int argc, char **argv) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *ignore_conf, *conf, *conf2; rd_kafka_topic_conf_t *ignore_topic_conf, *tconf, *tconf2; char errstr[512]; const char **arr_orig, **arr_dup; size_t cnt_orig, cnt_dup; int i; const char *topic; static const char *gconfs[] = { "message.max.bytes", "12345", /* int property */ "client.id", "my id", /* string property */ "debug", "topic,metadata", /* S2F property */ "topic.blacklist", "__.*", /* #778 */ "auto.offset.reset", "earliest", /* Global->Topic fallthru */ #if WITH_ZLIB "compression.codec", "gzip", /* S2I property */ #endif NULL }; static const char *tconfs[] = { "request.required.acks", "-1", /* int */ "auto.commit.enable", "false", /* bool */ "auto.offset.reset", "error", /* S2I */ "offset.store.path", "my/path", /* string */ NULL }; test_conf_init(&ignore_conf, &ignore_topic_conf, 10); rd_kafka_conf_destroy(ignore_conf); rd_kafka_topic_conf_destroy(ignore_topic_conf); topic = test_mk_topic_name("0004", 0); /* Set up a global config object */ conf = rd_kafka_conf_new(); rd_kafka_conf_set_dr_cb(conf, dr_cb); rd_kafka_conf_set_error_cb(conf, error_cb); for (i = 0 ; gconfs[i] ; i += 2) { if (rd_kafka_conf_set(conf, gconfs[i], gconfs[i+1], errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); } /* Set up a topic config object */ tconf = rd_kafka_topic_conf_new(); rd_kafka_topic_conf_set_partitioner_cb(tconf, partitioner); rd_kafka_topic_conf_set_opaque(tconf, (void *)0xbeef); for (i = 0 ; tconfs[i] ; i += 2) { if (rd_kafka_topic_conf_set(tconf, tconfs[i], tconfs[i+1], errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); } /* Verify global config */ arr_orig = rd_kafka_conf_dump(conf, &cnt_orig); conf_verify(__LINE__, arr_orig, cnt_orig, gconfs); /* Verify copied global config */ conf2 = rd_kafka_conf_dup(conf); arr_dup = rd_kafka_conf_dump(conf2, &cnt_dup); conf_verify(__LINE__, arr_dup, cnt_dup, gconfs); conf_cmp("global", arr_orig, cnt_orig, arr_dup, cnt_dup); rd_kafka_conf_dump_free(arr_orig, cnt_orig); rd_kafka_conf_dump_free(arr_dup, cnt_dup); /* Verify topic config */ arr_orig = rd_kafka_topic_conf_dump(tconf, &cnt_orig); conf_verify(__LINE__, arr_orig, cnt_orig, tconfs); /* Verify copied topic config */ tconf2 = rd_kafka_topic_conf_dup(tconf); arr_dup = rd_kafka_topic_conf_dump(tconf2, &cnt_dup); conf_verify(__LINE__, arr_dup, cnt_dup, tconfs); conf_cmp("topic", arr_orig, cnt_orig, arr_dup, cnt_dup); rd_kafka_conf_dump_free(arr_orig, cnt_orig); rd_kafka_conf_dump_free(arr_dup, cnt_dup); /* * Create kafka instances using original and copied confs */ /* original */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, topic, tconf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); /* copied */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf2); rkt = rd_kafka_topic_new(rk, topic, tconf2); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); /* Incremental S2F property. * NOTE: The order of fields returned in get() is hardcoded here. */ { static const char *s2fs[] = { "generic,broker,queue,cgrp", "generic,broker,queue,cgrp", "-broker,+queue,topic", "generic,topic,queue,cgrp", "-all,security,-fetch,+metadata", "metadata,security", NULL }; TEST_SAY("Incremental S2F tests\n"); conf = rd_kafka_conf_new(); for (i = 0 ; s2fs[i] ; i += 2) { const char *val; TEST_SAY(" Set: %s\n", s2fs[i]); test_conf_set(conf, "debug", s2fs[i]); val = test_conf_get(conf, "debug"); TEST_SAY(" Now: %s\n", val); if (strcmp(val, s2fs[i+1])) TEST_FAIL_LATER("\n" "Expected: %s\n" " Got: %s", s2fs[i+1], val); } rd_kafka_conf_destroy(conf); } /* Canonical int values, aliases, s2i-verified strings */ { static const struct { const char *prop; const char *val; const char *exp; int is_global; } props[] = { { "request.required.acks", "0", "0" }, { "request.required.acks", "-1", "-1" }, { "request.required.acks", "1", "1" }, { "acks", "3", "3" }, /* alias test */ { "request.required.acks", "393", "393" }, { "request.required.acks", "bad", NULL }, { "request.required.acks", "all", "-1" }, { "request.required.acks", "all", "-1", 1/*fallthru*/ }, { "acks", "0", "0" }, /* alias test */ #if WITH_SASL { "sasl.mechanisms", "GSSAPI", "GSSAPI", 1 }, { "sasl.mechanisms", "PLAIN", "PLAIN", 1 }, { "sasl.mechanisms", "GSSAPI,PLAIN", NULL, 1 }, { "sasl.mechanisms", "", NULL, 1 }, #endif { NULL } }; TEST_SAY("Canonical tests\n"); tconf = rd_kafka_topic_conf_new(); conf = rd_kafka_conf_new(); for (i = 0 ; props[i].prop ; i++) { char dest[64]; size_t destsz; rd_kafka_conf_res_t res; TEST_SAY(" Set: %s=%s expect %s (%s)\n", props[i].prop, props[i].val, props[i].exp, props[i].is_global ? "global":"topic"); /* Set value */ if (props[i].is_global) res = rd_kafka_conf_set(conf, props[i].prop, props[i].val, errstr, sizeof(errstr)); else res = rd_kafka_topic_conf_set(tconf, props[i].prop, props[i].val, errstr, sizeof(errstr)); if ((res == RD_KAFKA_CONF_OK ? 1:0) != (props[i].exp ? 1:0)) TEST_FAIL("Expected %s, got %s", props[i].exp ? "success" : "failure", (res == RD_KAFKA_CONF_OK ? "OK" : (res == RD_KAFKA_CONF_INVALID ? "INVALID" : "UNKNOWN"))); if (!props[i].exp) continue; /* Get value and compare to expected result */ destsz = sizeof(dest); if (props[i].is_global) res = rd_kafka_conf_get(conf, props[i].prop, dest, &destsz); else res = rd_kafka_topic_conf_get(tconf, props[i].prop, dest, &destsz); TEST_ASSERT(res == RD_KAFKA_CONF_OK, ".._conf_get(%s) returned %d", props[i].prop, res); TEST_ASSERT(!strcmp(props[i].exp, dest), "Expected \"%s\", got \"%s\"", props[i].exp, dest); } rd_kafka_topic_conf_destroy(tconf); rd_kafka_conf_destroy(conf); } return 0; }
int main (int argc, char **argv) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *ignore_conf, *conf, *conf2; rd_kafka_topic_conf_t *ignore_topic_conf, *tconf, *tconf2; char errstr[512]; const char **arr_orig, **arr_dup; size_t cnt_orig, cnt_dup; int i; const char *topic; static const char *gconfs[] = { "message.max.bytes", "12345", /* int property */ "client.id", "my id", /* string property */ "debug", "topic,metadata", /* S2F property */ "compression.codec", "gzip", /* S2I property */ NULL }; static const char *tconfs[] = { "request.required.acks", "-1", /* int */ "auto.commit.enable", "false", /* bool */ "auto.offset.reset", "error", /* S2I */ "offset.store.path", "my/path", /* string */ NULL }; test_conf_init(&ignore_conf, &ignore_topic_conf, 10); rd_kafka_conf_destroy(ignore_conf); rd_kafka_topic_conf_destroy(ignore_topic_conf); topic = test_mk_topic_name("generic", 0); /* Set up a global config object */ conf = rd_kafka_conf_new(); rd_kafka_conf_set_dr_cb(conf, dr_cb); rd_kafka_conf_set_error_cb(conf, error_cb); for (i = 0 ; gconfs[i] ; i += 2) { if (rd_kafka_conf_set(conf, gconfs[i], gconfs[i+1], errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); } /* Set up a topic config object */ tconf = rd_kafka_topic_conf_new(); rd_kafka_topic_conf_set_partitioner_cb(tconf, partitioner); rd_kafka_topic_conf_set_opaque(tconf, (void *)0xbeef); for (i = 0 ; tconfs[i] ; i += 2) { if (rd_kafka_topic_conf_set(tconf, tconfs[i], tconfs[i+1], errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); } /* Verify global config */ arr_orig = rd_kafka_conf_dump(conf, &cnt_orig); conf_verify(__LINE__, arr_orig, cnt_orig, gconfs); /* Verify copied global config */ conf2 = rd_kafka_conf_dup(conf); arr_dup = rd_kafka_conf_dump(conf2, &cnt_dup); conf_verify(__LINE__, arr_dup, cnt_dup, gconfs); conf_cmp("global", arr_orig, cnt_orig, arr_dup, cnt_dup); rd_kafka_conf_dump_free(arr_orig, cnt_orig); rd_kafka_conf_dump_free(arr_dup, cnt_dup); /* Verify topic config */ arr_orig = rd_kafka_topic_conf_dump(tconf, &cnt_orig); conf_verify(__LINE__, arr_orig, cnt_orig, tconfs); /* Verify copied topic config */ tconf2 = rd_kafka_topic_conf_dup(tconf); arr_dup = rd_kafka_topic_conf_dump(tconf2, &cnt_dup); conf_verify(__LINE__, arr_dup, cnt_dup, tconfs); conf_cmp("topic", arr_orig, cnt_orig, arr_dup, cnt_dup); rd_kafka_conf_dump_free(arr_orig, cnt_orig); rd_kafka_conf_dump_free(arr_dup, cnt_dup); /* * Create kafka instances using original and copied confs */ /* original */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); rkt = rd_kafka_topic_new(rk, topic, tconf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); /* copied */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf2, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); rkt = rd_kafka_topic_new(rk, topic, tconf2); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(2); /* If we havent failed at this point then * there were no threads leaked */ return 0; }
int kafka_produce_batch(rd_kafka_t *r, char *topic, char **msg, int *msg_len, int msg_cnt, int report, long timeout) { char errstr[512]; rd_kafka_topic_t *rkt; struct produce_cb_params pcb = {msg_cnt, 0, 0, 0, 0, NULL}; void *opaque; int partition = RD_KAFKA_PARTITION_UA; int i, err_cnt = 0; if (report) opaque = &pcb; else opaque = NULL; rd_kafka_topic_conf_t *topic_conf; if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to produce to topic: %s", topic); } return -2; } /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); char timeoutStr[64]; snprintf(timeoutStr, 64, "%lu", timeout); if (rd_kafka_topic_conf_set(topic_conf, "message.timeout.ms", timeoutStr, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_ERR, "Failed to configure topic param 'message.timeout.ms' to %lu before producing; config err was: %s", timeout, errstr ); } rd_kafka_topic_conf_destroy(topic_conf); return -3; } /* Create topic */ rkt = rd_kafka_topic_new(r, topic, topic_conf); //do we have VLA? rd_kafka_message_t *messages = calloc(sizeof *messages, msg_cnt); if (messages == NULL) {//fallback to individual produce calls for (i=0;i<msg_cnt;++i) { if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg[i], msg_len[i], NULL, 0, opaque) == -1) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Failed to produce to topic %s " "partition %i: %s", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); } } } } else { for (i=0;i<msg_cnt;++i) { messages[i].payload = msg[i]; messages[i].len = msg_len[i]; } i = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_COPY, messages, msg_cnt); if (i < msg_cnt) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_WARNING, "Failed to queue full message batch, %d of %d were put in queue", i, msg_cnt); } } err_cnt = msg_cnt - i; free(messages); messages = NULL; } /* Poll to handle delivery reports */ rd_kafka_poll(r, 0); /* Wait for messages to be delivered */ while (report && pcb.msg_count && rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 10); //set global to NULL again rd_kafka_topic_destroy(rkt); if (report) err_cnt = pcb.err_count; return err_cnt; }
int kafka_produce_report(rd_kafka_t *r, const char *topic, char *msg, int msg_len, long timeout) { char errstr[512]; rd_kafka_topic_t *rkt = NULL; int partition = RD_KAFKA_PARTITION_UA; rd_kafka_topic_conf_t *conf = NULL; struct produce_cb_params pcb = {1, 0, 0, 0, 0, NULL}; if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "No connection provided to produce to topic %s", topic); } return -2; } /* Topic configuration */ conf = rd_kafka_topic_conf_new(); rd_kafka_topic_conf_set(conf,"produce.offset.report", "true", errstr, sizeof errstr ); char timeoutStr[64]; snprintf(timeoutStr, 64, "%lu", timeout); if (rd_kafka_topic_conf_set(conf, "message.timeout.ms", timeoutStr, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_ERR, "Failed to configure topic param 'message.timeout.ms' to %lu before producing; config err was: %s", timeout, errstr ); } rd_kafka_topic_conf_destroy(conf); return -3; } //callback already set in kafka_set_connection rkt = rd_kafka_topic_new(r, topic, conf); if (!rkt) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "Failed to open topic %s", topic); } rd_kafka_topic_conf_destroy(conf); return -1; } //begin producing: if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, msg_len,NULL, 0,&pcb) == -1) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "Failed to produce message: %s", rd_kafka_err2str(rd_kafka_errno2err(errno))); } //handle delivery response (callback) rd_kafka_poll(rk, 0); rd_kafka_topic_destroy(rkt); return -1; } rd_kafka_poll(rk, 0); while(pcb.msg_count && rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 10); rd_kafka_topic_destroy(rkt); return 0; }
int kafka_consume(rd_kafka_t *r, zval* return_value, char* topic, char* offset, int item_count, int partition) { int64_t start_offset = 0; int read_counter = 0, run = 1; //nothing to consume? if (item_count == 0) return 0; if (strlen(offset) != 0) { if (!strcmp(offset, "end")) start_offset = RD_KAFKA_OFFSET_END; else if (!strcmp(offset, "beginning")) start_offset = RD_KAFKA_OFFSET_BEGINNING; else if (!strcmp(offset, "stored")) start_offset = RD_KAFKA_OFFSET_STORED; else { start_offset = strtoll(offset, NULL, 10); if (start_offset < 1) return -1; } } rd_kafka_topic_t *rkt; if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to consume from topic: %s", topic); } return -2; } rd_kafka_topic_conf_t *topic_conf; /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); /* Create topic */ rkt = rd_kafka_topic_new(r, topic, topic_conf); if (rkt == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_ERR, "Failed to consume from topic %s: %s", topic, rd_kafka_err2str( rd_kafka_errno2err(errno) ) ); } return -3; } if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - start_offset: %"PRId64" and offset passed: %s", start_offset, offset); } /* Start consuming */ if (rd_kafka_consume_start(rkt, partition, start_offset) == -1) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Failed to start consuming: %s", rd_kafka_err2str(rd_kafka_errno2err(errno))); } return -4; } /** * Keep reading until run == 0, or read_counter == item_count */ for (read_counter=0;read_counter!=item_count;++read_counter) { if (run == 0) break; if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "Consuming, count at %d (of %d - run: %d)", read_counter, item_count, run ); } rd_kafka_message_t *rkmessage = NULL, *rkmessage_return = NULL; /* Consume single message. * See rdkafka_performance.c for high speed * consuming of messages. */ rkmessage = rd_kafka_consume(rkt, partition, 1000); //timeout ONLY if error didn't cause run to be 0 if (!rkmessage) { //break on timeout, makes second call redundant if (errno == ETIMEDOUT) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "Consumer timed out, count at %d (of %d) stop consuming after %d messages", read_counter, item_count, read_counter +1 ); } break; } continue; } rkmessage_return = msg_consume(rkmessage, &run); if (rkmessage_return != NULL) { if ((int) rkmessage_return->len > 0) { //ensure there is a payload char payload[(int) rkmessage_return->len]; sprintf(payload, "%.*s", (int) rkmessage_return->len, (char *) rkmessage_return->payload); add_index_string(return_value, (int) rkmessage_return->offset, payload, 1); } else { //add empty value char payload[1] = "";//empty string add_index_string(return_value, (int) rkmessage_return->offset, payload, 1); } } /* Return message to rdkafka */ rd_kafka_message_destroy(rkmessage); } /* Stop consuming */ rd_kafka_consume_stop(rkt, partition); rd_kafka_topic_destroy(rkt); return 0; }
int main (int argc, char **argv) { char *conf_path = ZKS_CONF_PATH_DEFAULT; char *pidfile_path = ZKS_PIDFILE_PATH_DEFAULT; char errstr[512]; char c; /* Default configuration */ conf.run = 1; conf.log_level = 6; conf.daemonize = 1; conf.zmq_socket_type = ZMQ_PULL; conf.kafka_partition = RD_KAFKA_PARTITION_UA; /* Random partitioning */ conf.rk_conf = rd_kafka_conf_new(); conf.rkt_conf = rd_kafka_topic_conf_new(); conf.flags |= CONF_F_LOG_KAFKA_MSG_ERROR; /* Parse command line arguments */ while ((c = getopt(argc, argv, "c:p:dDh")) != -1) { switch (c) { case 'c': conf_path = optarg; break; case 'p': pidfile_path = optarg; break; case 'd': conf.log_level = 7; break; case 'D': conf.daemonize = 0; break; case 'h': default: usage(argv[0]); break; } } /* Read config file */ if (ezd_conf_file_read(conf_path, conf_set, errstr, sizeof(errstr), NULL) == -1) { fprintf(stderr, "%s\n", errstr); exit(1); } /* Go to background if configured to do so */ if (conf.daemonize) { if (ezd_daemon(10, errstr, sizeof(errstr)) == -1) { fprintf(stderr, "%s\n", errstr); exit(1); } /* We're now in the child process */ } /* Check and create pidfile */ if (ezd_pidfile_open(pidfile_path, errstr, sizeof(errstr)) == -1) { fprintf(stderr, "%s\n", errstr); exit(1); } /* Set up logging output to syslog */ openlog("zmq2kafka-streamer", LOG_PID | (!conf.daemonize ? LOG_PERROR : 0), LOG_DAEMON); /* Start ZeroMQ interface */ if (zif_start(errstr, sizeof(errstr)) == -1) { fprintf(stderr, "%s\n", errstr); exit(1); } /* Start Kafka interface */ if (kif_start(errstr, sizeof(errstr)) == -1) { fprintf(stderr, "%s\n", errstr); exit(1); } /* Finalize daemonization */ if (conf.daemonize) ezd_daemon_started(); /* Termination signal setup */ signal(SIGINT, term); signal(SIGTERM, term); /* Main loop */ while (conf.run) { /* Poll Kafka for delivery report callbacks. */ rd_kafka_poll(conf.rk, 1000); } /* Termination */ zif_stop(); kif_stop(); ezd_pidfile_close(); exit(0); }
int main (int argc, char **argv) { rd_kafka_topic_t *rkt; char *brokers = "localhost:9092"; char mode = 'C'; char *topic = NULL; int partition = RD_KAFKA_PARTITION_UA; int opt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; const char *debug = NULL; int64_t start_offset = 0; int report_offsets = 0; int do_conf_dump = 0; quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ conf = rd_kafka_conf_new(); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:A")) != -1) { switch (opt) { case 'P': case 'C': case 'L': mode = opt; break; case 't': topic = optarg; break; case 'p': partition = atoi(optarg); break; case 'b': brokers = optarg; break; case 'z': if (rd_kafka_conf_set(conf, "compression.codec", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } break; case 'o': if (!strcmp(optarg, "end")) start_offset = RD_KAFKA_OFFSET_END; else if (!strcmp(optarg, "beginning")) start_offset = RD_KAFKA_OFFSET_BEGINNING; else if (!strcmp(optarg, "stored")) start_offset = RD_KAFKA_OFFSET_STORED; else if (!strcmp(optarg, "report")) report_offsets = 1; else start_offset = strtoll(optarg, NULL, 10); break; case 'e': exit_eof = 1; break; case 'd': debug = optarg; break; case 'q': quiet = 1; break; case 'A': output = OUTPUT_RAW; break; case 'X': { char *name, *val; rd_kafka_conf_res_t res; if (!strcmp(optarg, "list") || !strcmp(optarg, "help")) { rd_kafka_conf_properties_show(stdout); exit(0); } if (!strcmp(optarg, "dump")) { do_conf_dump = 1; continue; } name = optarg; if (!(val = strchr(name, '='))) { fprintf(stderr, "%% Expected " "-X property=value, not %s\n", name); exit(1); } *val = '\0'; val++; res = RD_KAFKA_CONF_UNKNOWN; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ if (!strncmp(name, "topic.", strlen("topic."))) res = rd_kafka_topic_conf_set(topic_conf, name+ strlen("topic."), val, errstr, sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN) res = rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } } break; default: goto usage; } } if (do_conf_dump) { const char **arr; size_t cnt; int pass; for (pass = 0 ; pass < 2 ; pass++) { int i; if (pass == 0) { arr = rd_kafka_conf_dump(conf, &cnt); printf("# Global config\n"); } else { printf("# Topic config\n"); arr = rd_kafka_topic_conf_dump(topic_conf, &cnt); } for (i = 0 ; i < cnt ; i += 2) printf("%s = %s\n", arr[i], arr[i+1]); printf("\n"); rd_kafka_conf_dump_free(arr, cnt); } exit(0); } if (optind != argc || (mode != 'L' && !topic)) { usage: fprintf(stderr, "Usage: %s -C|-P|-L -t <topic> " "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n" "\n" "librdkafka version %s (0x%08x)\n" "\n" " Options:\n" " -C | -P Consumer or Producer mode\n" " -L Metadata list mode\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (random partitioner)\n" " -b <brokers> Broker address (localhost:9092)\n" " -z <codec> Enable compression:\n" " none|gzip|snappy\n" " -o <offset> Start offset (consumer)\n" " -o report Report message offsets (producer)\n" " -e Exit consumer when last message\n" " in partition has been received.\n" " -d [facs..] Enable debugging contexts:\n" " -q Be quiet\n" " -A Raw payload output (consumer)\n" " %s\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" "\n" " In Consumer mode:\n" " writes fetched messages to stdout\n" " In Producer mode:\n" " reads messages from stdin and sends to broker\n" " In List mode:\n" " queries broker for metadata information, " "topic is optional.\n" "\n" "\n" "\n", argv[0], rd_kafka_version_str(), rd_kafka_version(), RD_KAFKA_DEBUG_CONTEXTS); exit(1); } signal(SIGINT, stop); signal(SIGUSR1, sig_usr1); if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% Debug configuration failed: %s: %s\n", errstr, debug); exit(1); } if (mode == 'P') { /* * Producer */ char buf[2048]; int sendcnt = 0; /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ /* If offset reporting (-o report) is enabled, use the * richer dr_msg_cb instead. */ if (report_offsets) { rd_kafka_topic_conf_set(topic_conf, "produce.offset.report", "true", errstr, sizeof(errstr)); rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered2); } else rd_kafka_conf_set_dr_cb(conf, msg_delivered); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!quiet) fprintf(stderr, "%% Type stuff and hit enter to send\n"); while (run && fgets(buf, sizeof(buf), stdin)) { size_t len = strlen(buf); if (buf[len-1] == '\n') buf[--len] = '\0'; /* Send/Produce message. */ if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ buf, len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ NULL) == -1) { fprintf(stderr, "%% Failed to produce to topic %s " "partition %i: %s\n", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); continue; } if (!quiet) fprintf(stderr, "%% Sent %zd bytes to topic " "%s partition %i\n", len, rd_kafka_topic_name(rkt), partition); sendcnt++; /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); } /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Wait for messages to be delivered */ while (run && rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); /* Destroy the handle */ rd_kafka_destroy(rk); } else if (mode == 'C') { /* * Consumer */ /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); /* Start consuming */ if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){ fprintf(stderr, "%% Failed to start consuming: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); exit(1); } while (run) { rd_kafka_message_t *rkmessage; /* Consume single message. * See rdkafka_performance.c for high speed * consuming of messages. */ rkmessage = rd_kafka_consume(rkt, partition, 1000); if (!rkmessage) /* timeout */ continue; msg_consume(rkmessage, NULL); /* Return message to rdkafka */ rd_kafka_message_destroy(rkmessage); } /* Stop consuming */ rd_kafka_consume_stop(rkt, partition); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); } else if (mode == 'L') { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ if (topic) rkt = rd_kafka_topic_new(rk, topic, topic_conf); else rkt = NULL; while (run) { const struct rd_kafka_metadata *metadata; /* Fetch metadata */ err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, &metadata, 5000); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { fprintf(stderr, "%% Failed to acquire metadata: %s\n", rd_kafka_err2str(err)); run = 0; break; } metadata_print(topic, metadata); rd_kafka_metadata_destroy(metadata); run = 0; } /* Destroy the handle */ rd_kafka_destroy(rk); /* Exit right away, dont wait for background cleanup, we haven't * done anything important anyway. */ exit(err ? 2 : 0); } /* Let background threads clean up and terminate cleanly. */ rd_kafka_wait_destroyed(2000); return 0; }
/** * @brief Get all partitions for topic and their beginning offsets, useful * if we're consuming messages without knowing the actual partition beforehand * @param int **partitions should be pointer to NULL, will be allocated here * @param const char * topic topic name * @return int (0 == meta error, -2: no connection, -1: allocation error, all others indicate success (nr of elems in array)) */ int kafka_partition_offsets(rd_kafka_t *r, long **partitions, const char *topic) { rd_kafka_topic_t *rkt = NULL; rd_kafka_topic_conf_t *conf = NULL; rd_kafka_queue_t *rkqu = NULL; struct consume_cb_params cb_params = {0, NULL, NULL, 0, 0, 0}; int i = 0; //make life easier, 1 level of indirection... long *values = *partitions; //connect as consumer if required if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to get offsets of topic: %s", topic); } return -2; } /* Topic configuration */ conf = rd_kafka_topic_conf_new(); /* Create topic */ rkt = rd_kafka_topic_new(r, topic, conf); rkqu = rd_kafka_queue_new(rk); const struct rd_kafka_metadata *meta = NULL; if (RD_KAFKA_RESP_ERR_NO_ERROR == rd_kafka_metadata(r, 0, rkt, &meta, 5)) { values = realloc(values, meta->topics->partition_cnt * sizeof *values); if (values == NULL) { *partitions = values;//possible corrupted pointer now //free metadata, return error rd_kafka_metadata_destroy(meta); return -1; } //we need eop to reach 0, if there are 4 partitions, start at 3 (0, 1, 2, 3) cb_params.eop = meta->topics->partition_cnt -1; cb_params.partition_offset = values; for (i=0;i<meta->topics->partition_cnt;++i) { //initialize: set to -2 for callback values[i] = -2; if (rd_kafka_consume_start_queue(rkt, meta->topics->partitions[i].id, RD_KAFKA_OFFSET_BEGINNING, rkqu)) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "Failed to start consuming topic %s [%"PRId32"]", topic, meta->topics->partitions[i].id ); } continue; } } //eiter eop reached 0, or the read errors >= nr of partitions //either way, we've consumed a message from each partition, and therefore, we're done while(cb_params.eop && cb_params.error_count < meta->topics->partition_cnt) rd_kafka_consume_callback_queue(rkqu, 100, offset_queue_consume, &cb_params); //stop consuming for all partitions for (i=0;i<meta->topics->partition_cnt;++i) rd_kafka_consume_stop(rkt, meta->topics[0].partitions[i].id); rd_kafka_queue_destroy(rkqu); //do we need this poll here? while(rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 5); //let's be sure to pass along the correct values here... *partitions = values; i = meta->topics->partition_cnt; } if (meta) rd_kafka_metadata_destroy(meta); rd_kafka_topic_destroy(rkt); return i; }
int main (int argc, char **argv) { char mode = 'C'; char *brokers = "localhost:9092"; int opt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; const char *debug = NULL; int do_conf_dump = 0; char tmp[16]; rd_kafka_resp_err_t err; char *group = NULL; rd_kafka_topic_partition_list_t *topics; int i; quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ conf = rd_kafka_conf_new(); /* Set logger */ rd_kafka_conf_set_log_cb(conf, logger); /* Quick termination */ snprintf(tmp, sizeof(tmp), "%i", SIGIO); rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); while ((opt = getopt(argc, argv, "g:b:qd:eX:As:DO")) != -1) { switch (opt) { case 'b': brokers = optarg; break; case 'g': group = optarg; break; case 'e': exit_eof = 1; break; case 'd': debug = optarg; break; case 'q': quiet = 1; break; case 'A': output = OUTPUT_RAW; break; case 'X': { char *name, *val; rd_kafka_conf_res_t res; if (!strcmp(optarg, "list") || !strcmp(optarg, "help")) { rd_kafka_conf_properties_show(stdout); exit(0); } if (!strcmp(optarg, "dump")) { do_conf_dump = 1; continue; } name = optarg; if (!(val = strchr(name, '='))) { fprintf(stderr, "%% Expected " "-X property=value, not %s\n", name); exit(1); } *val = '\0'; val++; res = RD_KAFKA_CONF_UNKNOWN; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ if (!strncmp(name, "topic.", strlen("topic."))) res = rd_kafka_topic_conf_set(topic_conf, name+ strlen("topic."), val, errstr, sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN) res = rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } } break; case 'D': case 'O': mode = opt; break; default: goto usage; } } if (do_conf_dump) { const char **arr; size_t cnt; int pass; for (pass = 0 ; pass < 2 ; pass++) { if (pass == 0) { arr = rd_kafka_conf_dump(conf, &cnt); printf("# Global config\n"); } else { printf("# Topic config\n"); arr = rd_kafka_topic_conf_dump(topic_conf, &cnt); } for (i = 0 ; i < (int)cnt ; i += 2) printf("%s = %s\n", arr[i], arr[i+1]); printf("\n"); rd_kafka_conf_dump_free(arr, cnt); } exit(0); } if (strchr("OC", mode) && optind == argc) { usage: fprintf(stderr, "Usage: %s [options] <topic[:part]> <topic[:part]>..\n" "\n" "librdkafka version %s (0x%08x)\n" "\n" " Options:\n" " -g <group> Consumer group (%s)\n" " -b <brokers> Broker address (%s)\n" " -e Exit consumer when last message\n" " in partition has been received.\n" " -D Describe group.\n" " -O Get commmitted offset(s)\n" " -d [facs..] Enable debugging contexts:\n" " %s\n" " -q Be quiet\n" " -A Raw payload output (consumer)\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" "\n" "\n", argv[0], rd_kafka_version_str(), rd_kafka_version(), group, brokers, RD_KAFKA_DEBUG_CONTEXTS); exit(1); } signal(SIGINT, stop); signal(SIGUSR1, sig_usr1); if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% Debug configuration failed: %s: %s\n", errstr, debug); exit(1); } /* * Client/Consumer group */ if (strchr("CO", mode)) { /* Consumer groups require a group id */ if (!group) group = "rdkafka_consumer_example"; if (rd_kafka_conf_set(conf, "group.id", group, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } /* Consumer groups always use broker based offset storage */ if (rd_kafka_topic_conf_set(topic_conf, "offset.store.method", "broker", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } /* Set default topic config for pattern-matched topics. */ rd_kafka_conf_set_default_topic_conf(conf, topic_conf); /* Callback called on partition assignment changes */ rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); } /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", errstr); exit(1); } rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } if (mode == 'D') { int r; /* Describe groups */ r = describe_groups(rk, group); rd_kafka_destroy(rk); exit(r == -1 ? 1 : 0); } /* Redirect rd_kafka_poll() to consumer_poll() */ rd_kafka_poll_set_consumer(rk); topics = rd_kafka_topic_partition_list_new(argc - optind); for (i = optind ; i < argc ; i++) { /* Parse "topic[:part] */ char *topic = argv[i]; char *t; int32_t partition = -1; if ((t = strstr(topic, ":"))) { *t = '\0'; partition = atoi(t+1); } rd_kafka_topic_partition_list_add(topics, topic, partition); } if (mode == 'O') { /* Offset query */ err = rd_kafka_position(rk, topics, 5000); if (err) { fprintf(stderr, "%% Failed to fetch offsets: %s\n", rd_kafka_err2str(err)); exit(1); } for (i = 0 ; i < topics->cnt ; i++) { rd_kafka_topic_partition_t *p = &topics->elems[i]; printf("Topic \"%s\" partition %"PRId32, p->topic, p->partition); if (p->err) printf(" error %s", rd_kafka_err2str(p->err)); else { printf(" offset %"PRId64"", p->offset); if (p->metadata_size) printf(" (%d bytes of metadata)", (int)p->metadata_size); } printf("\n"); } goto done; } if ((err = rd_kafka_subscribe(rk, topics))) { fprintf(stderr, "%% Failed to start consuming topics: %s\n", rd_kafka_err2str(err)); exit(1); } while (run) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consumer_poll(rk, 1000); if (rkmessage) { msg_consume(rkmessage, NULL); rd_kafka_message_destroy(rkmessage); } } done: err = rd_kafka_consumer_close(rk); if (err) fprintf(stderr, "%% Failed to close consumer: %s\n", rd_kafka_err2str(err)); else fprintf(stderr, "%% Consumer closed\n"); rd_kafka_topic_partition_list_destroy(topics); /* Destroy handle */ rd_kafka_destroy(rk); /* Let background threads clean up and terminate cleanly. */ run = 5; while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1) printf("Waiting for librdkafka to decommission\n"); if (run <= 0) rd_kafka_dump(stdout, rk); return 0; }
void kafka_consume_all(rd_kafka_t *rk, zval *return_value, const char *topic, const char *offset, int item_count) { char errstr[512]; rd_kafka_topic_t *rkt; rd_kafka_topic_conf_t *conf; const struct rd_kafka_metadata *meta = NULL; rd_kafka_queue_t *rkqu = NULL; int current, p, i = 0; int32_t partition = 0; int64_t start; struct consume_cb_params cb_params = {item_count, return_value, NULL, 0, 0, 0}; //check for NULL pointers, all arguments are required! if (rk == NULL || return_value == NULL || topic == NULL || offset == NULL || strlen(offset) == 0) return; if (!strcmp(offset, "end")) start = RD_KAFKA_OFFSET_END; else if (!strcmp(offset, "beginning")) start = RD_KAFKA_OFFSET_BEGINNING; else if (!strcmp(offset, "stored")) start = RD_KAFKA_OFFSET_STORED; else start = strtoll(offset, NULL, 10); /* Topic configuration */ conf = rd_kafka_topic_conf_new(); /* Disable autocommit, queue_consume sets offsets automatically */ if (rd_kafka_topic_conf_set(conf, "auto.commit.enable", "false", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_WARNING, "failed to turn autocommit off consuming %d messages (start offset %"PRId64") from topic %s: %s", item_count, start, topic, errstr ); } cb_params.auto_commit = 1; } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, conf); if (!rkt) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - Failed to read %s from %"PRId64" (%s)", topic, start, offset); } return; } rkqu = rd_kafka_queue_new(rk); if (RD_KAFKA_RESP_ERR_NO_ERROR == rd_kafka_metadata(rk, 0, rkt, &meta, 5)) { p = meta->topics->partition_cnt; cb_params.partition_ends = calloc(sizeof *cb_params.partition_ends, p); if (cb_params.partition_ends == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - Failed to read %s from %"PRId64" (%s)", topic, start, offset); } rd_kafka_metadata_destroy(meta); meta = NULL; rd_kafka_queue_destroy(rkqu); rd_kafka_topic_destroy(rkt); return; } cb_params.eop = p; for (i=0;i<p;++i) { partition = meta->topics[0].partitions[i].id; if (rd_kafka_consume_start_queue(rkt, partition, start, rkqu)) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "Failed to start consuming topic %s [%"PRId32"]: %s", topic, partition, offset ); } continue; } } while(cb_params.read_count && cb_params.eop) rd_kafka_consume_callback_queue(rkqu, 200, queue_consume, &cb_params); free(cb_params.partition_ends); cb_params.partition_ends = NULL; for (i=0;i<p;++i) { partition = meta->topics[0].partitions[i].id; rd_kafka_consume_stop(rkt, partition); } rd_kafka_metadata_destroy(meta); meta = NULL; rd_kafka_queue_destroy(rkqu); while(rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 50); rd_kafka_topic_destroy(rkt); } if (meta) rd_kafka_metadata_destroy(meta); }
int kafka_produce(rd_kafka_t *r, char* topic, char* msg, int msg_len, int report, long timeout) { char errstr[512]; rd_kafka_topic_t *rkt; struct produce_cb_params pcb = {1, 0, 0, 0, 0, NULL}; void *opaque; int partition = RD_KAFKA_PARTITION_UA; //decide whether to pass callback params or not... if (report) opaque = &pcb; else opaque = NULL; rd_kafka_topic_conf_t *topic_conf; if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to produce to topic: %s", topic); } return -2; } /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); char timeoutStr[64]; snprintf(timeoutStr, 64, "%lu", timeout); if (rd_kafka_topic_conf_set(topic_conf, "message.timeout.ms", timeoutStr, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_ERR, "Failed to configure topic param 'message.timeout.ms' to %lu before producing; config err was: %s", timeout, errstr ); } rd_kafka_topic_conf_destroy(topic_conf); return -3; } /* Create topic */ rkt = rd_kafka_topic_new(r, topic, topic_conf); if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ msg, msg_len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ opaque) == -1) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Failed to produce to topic %s " "partition %i: %s", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); } rd_kafka_topic_destroy(rkt); return -1; } /* Poll to handle delivery reports */ rd_kafka_poll(r, 0); /* Wait for messages to be delivered */ while (report && pcb.msg_count && rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 10); //set global to NULL again rd_kafka_topic_destroy(rkt); return 0; }
/* transmitter worker */ void *kaf_worker(void *thread_id) { char buf[MAX_BUF], *b; int rc=-1, nr, len, l, count=0,kr; /* kafka connection setup */ char errstr[512]; rd_kafka_t *k; rd_kafka_topic_t *t; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; int partition = RD_KAFKA_PARTITION_UA; char *key = NULL; int keylen = key ? strlen(key) : 0; /* set up global options */ conf = rd_kafka_conf_new(); rd_kafka_conf_set_error_cb(conf, err_cb); //rd_kafka_conf_set_throttle_cb(conf, throttle_cb); rd_kafka_conf_set_stats_cb(conf, stats_cb); kr = rd_kafka_conf_set(conf, "statistics.interval.ms", "60000", errstr, sizeof(errstr)); if (kr != RD_KAFKA_CONF_OK) { fprintf(stderr,"error: rd_kafka_conf_set: statistics.interval.ms 60000 => %s\n", errstr); goto done; } char **opt=NULL; while( (opt=(char **)utarray_next(CF.rdkafka_options,opt))) { char *eq = strchr(*opt,'='); if (eq == NULL) { fprintf(stderr,"error: specify rdkafka params as key=value\n"); goto done; } char *k = strdup(*opt), *v; k[eq-*opt] = '\0'; v = &k[eq-*opt + 1]; if (CF.verbose) fprintf(stderr,"setting %s %s\n", k, v); kr = rd_kafka_conf_set(conf, k, v, errstr, sizeof(errstr)); if (kr != RD_KAFKA_CONF_OK) { fprintf(stderr,"error: rd_kafka_conf_set: %s %s => %s\n", k, v, errstr); goto done; } free(k); } /* set up topic options */ topic_conf = rd_kafka_topic_conf_new(); opt=NULL; while( (opt=(char **)utarray_next(CF.rdkafka_topic_options,opt))) { char *eq = strchr(*opt,'='); if (eq == NULL) { fprintf(stderr,"error: specify rdkafka topic params as key=value\n"); goto done; } char *k = strdup(*opt), *v; k[eq-*opt] = '\0'; v = &k[eq-*opt + 1]; if (CF.verbose) fprintf(stderr,"setting %s %s\n", k, v); kr = rd_kafka_topic_conf_set(topic_conf, k, v, errstr, sizeof(errstr)); if (kr != RD_KAFKA_CONF_OK) { fprintf(stderr,"error: rd_kafka_conf_set: %s %s => %s\n", k, v, errstr); goto done; } free(k); } k = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (k == NULL) { fprintf(stderr, "rd_kafka_new: %s\n", errstr); goto done; } if (rd_kafka_brokers_add(k, CF.broker) < 1) { fprintf(stderr, "invalid broker\n"); goto done; } t = rd_kafka_topic_new(k, CF.topic, topic_conf); while (CF.shutdown == 0) { len = nn_recv(CF.egress_socket_pull, buf, MAX_BUF, 0); if (len < 0) { fprintf(stderr,"nn_recv: %s\n", nn_strerror(errno)); goto done; } rc = rd_kafka_produce(t, partition, RD_KAFKA_MSG_F_COPY, buf, len, key, keylen, NULL); if (rc == -1) { fprintf(stderr,"rd_kafka_produce: %s %s\n", rd_kafka_err2str( rd_kafka_errno2err(errno)), ((errno == ENOBUFS) ? "(backpressure)" : "")); goto done; } // cause rdkafka to invoke optional callbacks (msg delivery reports or error) if ((++count % 1000) == 0) rd_kafka_poll(k, 0); if (thread_id == 0) { /* only emit these stats from the first worker thread (not thread safe) */ ts_add(CF.kaf_bytes_ts, CF.now, &len); ts_add(CF.kaf_msgs_ts, CF.now, NULL); } } rc = 0; done: CF.shutdown = 1; return NULL; }