static VALUE producer_connect(VALUE self, VALUE timeout) { HermannInstanceConfig *producerConfig; rd_kafka_resp_err_t err; VALUE result = Qfalse; int timeout_ms = rb_num2int(timeout); struct rd_kafka_metadata *data = NULL; Data_Get_Struct(self, HermannInstanceConfig, producerConfig); if (!producerConfig->isInitialized) { producer_init_kafka(self, producerConfig); } err = rd_kafka_metadata(producerConfig->rk, 0, producerConfig->rkt, &data, timeout_ms); TRACER("err: %s (%i)\n", rd_kafka_err2str(err), err); if (RD_KAFKA_RESP_ERR_NO_ERROR == err) { TRACER("brokers: %i, topics: %i\n", data->broker_cnt, data->topic_cnt); producerConfig->isConnected = 1; result = Qtrue; } else { producerConfig->isErrored = err; } rd_kafka_metadata_destroy(data); return result; }
static VALUE producer_metadata(VALUE self, VALUE topicStr, VALUE timeout) { HermannInstanceConfig *producerConfig; rd_kafka_resp_err_t err; hermann_metadata_ctx_t md_context; VALUE result; Data_Get_Struct(self, HermannInstanceConfig, producerConfig); if (!producerConfig->isInitialized) { producer_init_kafka(self, producerConfig); } md_context.rk = producerConfig->rk; md_context.timeout_ms = rb_num2int(timeout); if ( !NIL_P(topicStr) ) { Check_Type(topicStr, T_STRING); md_context.topic = rd_kafka_topic_new(producerConfig->rk, StringValuePtr(topicStr), NULL); } else { md_context.topic = NULL; } err = producer_metadata_request(&md_context); if ( err != RD_KAFKA_RESP_ERR_NO_ERROR ) { // annoyingly, this is always a timeout error -- the rest rdkafka just jams onto STDERR rb_raise( rb_eRuntimeError, "%s", rd_kafka_err2str(err) ); } else { result = producer_metadata_make_hash(md_context.data); rd_kafka_metadata_destroy(md_context.data); return result; } }
static VALUE producer_connect(VALUE self, VALUE timeout) { HermannInstanceConfig *producerConfig; rd_kafka_resp_err_t err; VALUE result = Qfalse; hermann_metadata_ctx_t md_context; Data_Get_Struct(self, HermannInstanceConfig, producerConfig); if (!producerConfig->isInitialized) { producer_init_kafka(self, producerConfig); } md_context.rk = producerConfig->rk; md_context.topic = NULL; md_context.data = NULL; md_context.timeout_ms = rb_num2int(timeout); err = producer_metadata_request(&md_context); TRACER("err: %s (%i)\n", rd_kafka_err2str(err), err); if (RD_KAFKA_RESP_ERR_NO_ERROR == err) { TRACER("brokers: %i, topics: %i\n", md_context.data->broker_cnt, md_context.data->topic_cnt); producerConfig->isConnected = 1; result = Qtrue; } else { producerConfig->isErrored = err; } if ( md_context.data ) rd_kafka_metadata_destroy(md_context.data); return result; }
/** * producer_push_single * * @param self VALUE the Ruby producer instance * @param message VALUE the ruby String containing the outgoing message. * @param topic VALUE the ruby String containing the topic to use for the * outgoing message. * @param result VALUE the Hermann::Result object to be fulfilled when the * push completes */ static VALUE producer_push_single(VALUE self, VALUE message, VALUE topic, VALUE result) { HermannInstanceConfig* producerConfig; /* Context pointer, pointing to `result`, for the librdkafka delivery * callback */ hermann_push_ctx_t *delivery_ctx = (hermann_push_ctx_t *)malloc(sizeof(hermann_push_ctx_t)); rd_kafka_topic_t *rkt = NULL; TRACER("self: %p, message: %p, result: %p)\n", self, message, result); Data_Get_Struct(self, HermannInstanceConfig, producerConfig); delivery_ctx->producer = producerConfig; delivery_ctx->result = NULL; TRACER("producerConfig: %p\n", producerConfig); if ((Qnil == topic) || (0 == RSTRING_LEN(topic))) { rb_raise(rb_eArgError, "Topic cannot be empty"); return self; } if (!producerConfig->isInitialized) { producer_init_kafka(self, producerConfig); } TRACER("kafka initialized\n"); rkt = rd_kafka_topic_new(producerConfig->rk, RSTRING_PTR(topic), NULL); if (NULL == rkt) { rb_raise(rb_eRuntimeError, "Could not construct a topic structure"); return self; } /* Only pass result through if it's non-nil */ if (Qnil != result) { delivery_ctx->result = result; TRACER("setting result: %p\n", result); } TRACER("rd_kafka_produce() message of %i bytes\n", RSTRING_LEN(message)); /* Send/Produce message. */ if (-1 == rd_kafka_produce(rkt, producerConfig->partition, RD_KAFKA_MSG_F_COPY, RSTRING_PTR(message), RSTRING_LEN(message), NULL, 0, delivery_ctx)) { fprintf(stderr, "%% Failed to produce to topic %s partition %i: %s\n", rd_kafka_topic_name(producerConfig->rkt), producerConfig->partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); /* TODO: raise a Ruby exception here, requires a test though */ } if (NULL != rkt) { rd_kafka_topic_destroy(rkt); } TRACER("returning\n"); return self; }