/** * producer_tick * * This function is responsible for ticking the librdkafka reactor so we can * get feedback from the librdkafka threads back into the Ruby environment * * @param self VALUE the Ruby producer instance * @param message VALUE A Ruby FixNum of how many ms we should wait on librdkafka */ static VALUE producer_tick(VALUE self, VALUE timeout) { hermann_conf_t *conf = NULL; long timeout_ms = 0; int events = 0; if (Qnil != timeout) { timeout_ms = rb_num2int(timeout); } else { rb_raise(rb_eArgError, "Cannot call `tick` with a nil timeout!\n"); } Data_Get_Struct(self, hermann_conf_t, conf); /* * if the producerConfig is not initialized then we never properly called * producer_push_single, so why are we ticking? */ if (!conf->isInitialized) { rb_raise(rb_eRuntimeError, "Cannot call `tick` without having ever sent a message\n"); } events = rd_kafka_poll(conf->rk, timeout_ms); if (conf->isErrored) { rb_raise(rb_eStandardError, conf->error); } return rb_int_new(events); }
static VALUE producer_connect(VALUE self, VALUE timeout) { HermannInstanceConfig *producerConfig; rd_kafka_resp_err_t err; VALUE result = Qfalse; int timeout_ms = rb_num2int(timeout); struct rd_kafka_metadata *data = NULL; Data_Get_Struct(self, HermannInstanceConfig, producerConfig); if (!producerConfig->isInitialized) { producer_init_kafka(self, producerConfig); } err = rd_kafka_metadata(producerConfig->rk, 0, producerConfig->rkt, &data, timeout_ms); TRACER("err: %s (%i)\n", rd_kafka_err2str(err), err); if (RD_KAFKA_RESP_ERR_NO_ERROR == err) { TRACER("brokers: %i, topics: %i\n", data->broker_cnt, data->topic_cnt); producerConfig->isConnected = 1; result = Qtrue; } else { producerConfig->isErrored = err; } rd_kafka_metadata_destroy(data); return result; }
static VALUE producer_metadata(VALUE self, VALUE topicStr, VALUE timeout) { HermannInstanceConfig *producerConfig; rd_kafka_resp_err_t err; hermann_metadata_ctx_t md_context; VALUE result; Data_Get_Struct(self, HermannInstanceConfig, producerConfig); if (!producerConfig->isInitialized) { producer_init_kafka(self, producerConfig); } md_context.rk = producerConfig->rk; md_context.timeout_ms = rb_num2int(timeout); if ( !NIL_P(topicStr) ) { Check_Type(topicStr, T_STRING); md_context.topic = rd_kafka_topic_new(producerConfig->rk, StringValuePtr(topicStr), NULL); } else { md_context.topic = NULL; } err = producer_metadata_request(&md_context); if ( err != RD_KAFKA_RESP_ERR_NO_ERROR ) { // annoyingly, this is always a timeout error -- the rest rdkafka just jams onto STDERR rb_raise( rb_eRuntimeError, "%s", rd_kafka_err2str(err) ); } else { result = producer_metadata_make_hash(md_context.data); rd_kafka_metadata_destroy(md_context.data); return result; } }
static VALUE Packer_write_ext(VALUE self, VALUE type, VALUE data) { PACKER(self, pk); int ext_type = rb_num2int(type); if(ext_type < -128 || ext_type > 127) { rb_raise(rb_eRangeError, "integer %d too big to convert to `signed char'", ext_type); } StringValue(data); msgpack_packer_write_ext(pk, ext_type, data); return self; }
static VALUE Packer_register_type(int argc, VALUE* argv, VALUE self) { PACKER(self, pk); int ext_type; VALUE ext_class; VALUE proc; VALUE arg; switch (argc) { case 2: /* register_type(0x7f, Time) {|obj| block... } */ rb_need_block(); #ifdef HAVE_RB_BLOCK_LAMBDA proc = rb_block_lambda(); #else /* MRI 1.8 */ proc = rb_block_proc(); #endif arg = proc; break; case 3: /* register_type(0x7f, Time, :to_msgpack_ext) */ arg = argv[2]; proc = rb_funcall(arg, rb_intern("to_proc"), 0); break; default: rb_raise(rb_eArgError, "wrong number of arguments (%d for 2..3)", argc); } ext_type = rb_num2int(argv[0]); if(ext_type < -128 || ext_type > 127) { rb_raise(rb_eRangeError, "integer %d too big to convert to `signed char'", ext_type); } ext_class = argv[1]; if(rb_type(ext_class) != T_CLASS) { rb_raise(rb_eArgError, "expected Class but found %s.", rb_obj_classname(ext_class)); } msgpack_packer_ext_registry_put(&pk->ext_registry, ext_class, ext_type, proc, arg); return Qnil; }
static VALUE Unpacker_register_type(int argc, VALUE* argv, VALUE self) { UNPACKER(self, uk); int ext_type; VALUE proc; VALUE arg; VALUE ext_class; switch (argc) { case 1: /* register_type(0x7f) {|data| block... } */ rb_need_block(); #ifdef HAVE_RB_BLOCK_LAMBDA proc = rb_block_lambda(); #else /* MRI 1.8 */ proc = rb_block_proc(); #endif arg = proc; ext_class = Qnil; break; case 3: /* register_type(0x7f, Time, :from_msgpack_ext) */ ext_class = argv[1]; arg = argv[2]; proc = rb_obj_method(ext_class, arg); break; default: rb_raise(rb_eArgError, "wrong number of arguments (%d for 1 or 3)", argc); } ext_type = rb_num2int(argv[0]); if(ext_type < -128 || ext_type > 127) { rb_raise(rb_eRangeError, "integer %d too big to convert to `signed char'", ext_type); } msgpack_unpacker_ext_registry_put(&uk->ext_registry, ext_class, ext_type, proc, arg); return Qnil; }
static VALUE producer_connect(VALUE self, VALUE timeout) { HermannInstanceConfig *producerConfig; rd_kafka_resp_err_t err; VALUE result = Qfalse; hermann_metadata_ctx_t md_context; Data_Get_Struct(self, HermannInstanceConfig, producerConfig); if (!producerConfig->isInitialized) { producer_init_kafka(self, producerConfig); } md_context.rk = producerConfig->rk; md_context.topic = NULL; md_context.data = NULL; md_context.timeout_ms = rb_num2int(timeout); err = producer_metadata_request(&md_context); TRACER("err: %s (%i)\n", rd_kafka_err2str(err), err); if (RD_KAFKA_RESP_ERR_NO_ERROR == err) { TRACER("brokers: %i, topics: %i\n", md_context.data->broker_cnt, md_context.data->topic_cnt); producerConfig->isConnected = 1; result = Qtrue; } else { producerConfig->isErrored = err; } if ( md_context.data ) rd_kafka_metadata_destroy(md_context.data); return result; }
static VALUE numeric_spec_rb_num2int(VALUE self, VALUE num) { return LONG2NUM(rb_num2int(num)); }