//get topics + partition count void kafka_get_topics(rd_kafka_t *r, zval *return_value) { int i; const struct rd_kafka_metadata *meta = NULL; if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to get topics"); } return; } if (RD_KAFKA_RESP_ERR_NO_ERROR == rd_kafka_metadata(r, 1, NULL, &meta, 200)) { for (i=0;i<meta->topic_cnt;++i) { add_assoc_long( return_value, meta->topics[i].topic, (long) meta->topics[i].partition_cnt ); } } if (meta) { rd_kafka_metadata_destroy(meta); } }
static int kafka_partition_count(rd_kafka_t *r, const char *topic) { rd_kafka_topic_t *rkt; rd_kafka_topic_conf_t *conf; int i;//C89 compliant //connect as consumer if required if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to get partition count for topic: %s", topic); } return -1; } /* Topic configuration */ conf = rd_kafka_topic_conf_new(); /* Create topic */ rkt = rd_kafka_topic_new(r, topic, conf); //metadata API required rd_kafka_metadata_t** to be passed const struct rd_kafka_metadata *meta = NULL; if (RD_KAFKA_RESP_ERR_NO_ERROR == rd_kafka_metadata(r, 0, rkt, &meta, 200)) i = (int) meta->topics->partition_cnt; else i = 0; if (meta) { rd_kafka_metadata_destroy(meta); } rd_kafka_topic_destroy(rkt); return i; }
static VALUE producer_connect(VALUE self, VALUE timeout) { HermannInstanceConfig *producerConfig; rd_kafka_resp_err_t err; VALUE result = Qfalse; int timeout_ms = rb_num2int(timeout); struct rd_kafka_metadata *data = NULL; Data_Get_Struct(self, HermannInstanceConfig, producerConfig); if (!producerConfig->isInitialized) { producer_init_kafka(self, producerConfig); } err = rd_kafka_metadata(producerConfig->rk, 0, producerConfig->rkt, &data, timeout_ms); TRACER("err: %s (%i)\n", rd_kafka_err2str(err), err); if (RD_KAFKA_RESP_ERR_NO_ERROR == err) { TRACER("brokers: %i, topics: %i\n", data->broker_cnt, data->topic_cnt); producerConfig->isConnected = 1; result = Qtrue; } else { producerConfig->isErrored = err; } rd_kafka_metadata_destroy(data); return result; }
int main_0035_api_version (int argc, char **argv) { rd_kafka_t *rk; rd_kafka_conf_t *conf; const struct rd_kafka_metadata *metadata; rd_kafka_resp_err_t err; test_timing_t t_meta; test_conf_init(&conf, NULL, 30); test_conf_set(conf, "socket.timeout.ms", "12000"); rk = test_create_handle(RD_KAFKA_PRODUCER, conf); TEST_SAY("Querying for metadata\n"); TIMING_START(&t_meta, "metadata()"); err = rd_kafka_metadata(rk, 0, NULL, &metadata, 10*1000); TIMING_STOP(&t_meta); if (err) TEST_FAIL("metadata() failed: %s", rd_kafka_err2str(err)); if (TIMING_DURATION(&t_meta) / 1000 > 11*1000) TEST_FAIL("metadata() took too long: %.3fms", (float)TIMING_DURATION(&t_meta) / 1000.0f); rd_kafka_metadata_destroy(metadata); TEST_SAY("Metadata succeeded\n"); rd_kafka_destroy(rk); return 0; }
static VALUE producer_metadata(VALUE self, VALUE topicStr, VALUE timeout) { HermannInstanceConfig *producerConfig; rd_kafka_resp_err_t err; hermann_metadata_ctx_t md_context; VALUE result; Data_Get_Struct(self, HermannInstanceConfig, producerConfig); if (!producerConfig->isInitialized) { producer_init_kafka(self, producerConfig); } md_context.rk = producerConfig->rk; md_context.timeout_ms = rb_num2int(timeout); if ( !NIL_P(topicStr) ) { Check_Type(topicStr, T_STRING); md_context.topic = rd_kafka_topic_new(producerConfig->rk, StringValuePtr(topicStr), NULL); } else { md_context.topic = NULL; } err = producer_metadata_request(&md_context); if ( err != RD_KAFKA_RESP_ERR_NO_ERROR ) { // annoyingly, this is always a timeout error -- the rest rdkafka just jams onto STDERR rb_raise( rb_eRuntimeError, "%s", rd_kafka_err2str(err) ); } else { result = producer_metadata_make_hash(md_context.data); rd_kafka_metadata_destroy(md_context.data); return result; } }
void rd_kafka_op_destroy (rd_kafka_op_t *rko) { /* Decrease refcount on rkbuf to eventually rd_free the shared buffer*/ if (rko->rko_rkbuf) rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY); else if (rko->rko_payload && rko->rko_flags & RD_KAFKA_OP_F_FREE) { if (rko->rko_free_cb) rko->rko_free_cb(rko->rko_payload); else rd_free(rko->rko_payload); } if (rko->rko_rkt) rd_kafka_topic_destroy0(rd_kafka_topic_a2s(rko->rko_rkt)); if (rko->rko_rktp) rd_kafka_toppar_destroy(rko->rko_rktp); if (rko->rko_metadata) rd_kafka_metadata_destroy(rko->rko_metadata); if (rko->rko_replyq) rd_kafka_q_destroy(rko->rko_replyq); if (rd_atomic32_sub(&rd_kafka_op_cnt, 1) < 0) rd_kafka_assert(NULL, !*"rd_kafka_op_cnt < 0"); rd_free(rko); }
static VALUE producer_connect(VALUE self, VALUE timeout) { HermannInstanceConfig *producerConfig; rd_kafka_resp_err_t err; VALUE result = Qfalse; hermann_metadata_ctx_t md_context; Data_Get_Struct(self, HermannInstanceConfig, producerConfig); if (!producerConfig->isInitialized) { producer_init_kafka(self, producerConfig); } md_context.rk = producerConfig->rk; md_context.topic = NULL; md_context.data = NULL; md_context.timeout_ms = rb_num2int(timeout); err = producer_metadata_request(&md_context); TRACER("err: %s (%i)\n", rd_kafka_err2str(err), err); if (RD_KAFKA_RESP_ERR_NO_ERROR == err) { TRACER("brokers: %i, topics: %i\n", md_context.data->broker_cnt, md_context.data->topic_cnt); producerConfig->isConnected = 1; result = Qtrue; } else { producerConfig->isErrored = err; } if ( md_context.data ) rd_kafka_metadata_destroy(md_context.data); return result; }
int main (int argc, char **argv) { rd_kafka_topic_t *rkt; char *brokers = "localhost:9092"; char mode = 'C'; char *topic = NULL; int partition = RD_KAFKA_PARTITION_UA; int opt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; const char *debug = NULL; int64_t start_offset = 0; int report_offsets = 0; int do_conf_dump = 0; quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ conf = rd_kafka_conf_new(); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:A")) != -1) { switch (opt) { case 'P': case 'C': case 'L': mode = opt; break; case 't': topic = optarg; break; case 'p': partition = atoi(optarg); break; case 'b': brokers = optarg; break; case 'z': if (rd_kafka_conf_set(conf, "compression.codec", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } break; case 'o': if (!strcmp(optarg, "end")) start_offset = RD_KAFKA_OFFSET_END; else if (!strcmp(optarg, "beginning")) start_offset = RD_KAFKA_OFFSET_BEGINNING; else if (!strcmp(optarg, "stored")) start_offset = RD_KAFKA_OFFSET_STORED; else if (!strcmp(optarg, "report")) report_offsets = 1; else start_offset = strtoll(optarg, NULL, 10); break; case 'e': exit_eof = 1; break; case 'd': debug = optarg; break; case 'q': quiet = 1; break; case 'A': output = OUTPUT_RAW; break; case 'X': { char *name, *val; rd_kafka_conf_res_t res; if (!strcmp(optarg, "list") || !strcmp(optarg, "help")) { rd_kafka_conf_properties_show(stdout); exit(0); } if (!strcmp(optarg, "dump")) { do_conf_dump = 1; continue; } name = optarg; if (!(val = strchr(name, '='))) { fprintf(stderr, "%% Expected " "-X property=value, not %s\n", name); exit(1); } *val = '\0'; val++; res = RD_KAFKA_CONF_UNKNOWN; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ if (!strncmp(name, "topic.", strlen("topic."))) res = rd_kafka_topic_conf_set(topic_conf, name+ strlen("topic."), val, errstr, sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN) res = rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } } break; default: goto usage; } } if (do_conf_dump) { const char **arr; size_t cnt; int pass; for (pass = 0 ; pass < 2 ; pass++) { int i; if (pass == 0) { arr = rd_kafka_conf_dump(conf, &cnt); printf("# Global config\n"); } else { printf("# Topic config\n"); arr = rd_kafka_topic_conf_dump(topic_conf, &cnt); } for (i = 0 ; i < cnt ; i += 2) printf("%s = %s\n", arr[i], arr[i+1]); printf("\n"); rd_kafka_conf_dump_free(arr, cnt); } exit(0); } if (optind != argc || (mode != 'L' && !topic)) { usage: fprintf(stderr, "Usage: %s -C|-P|-L -t <topic> " "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n" "\n" "librdkafka version %s (0x%08x)\n" "\n" " Options:\n" " -C | -P Consumer or Producer mode\n" " -L Metadata list mode\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (random partitioner)\n" " -b <brokers> Broker address (localhost:9092)\n" " -z <codec> Enable compression:\n" " none|gzip|snappy\n" " -o <offset> Start offset (consumer)\n" " -o report Report message offsets (producer)\n" " -e Exit consumer when last message\n" " in partition has been received.\n" " -d [facs..] Enable debugging contexts:\n" " -q Be quiet\n" " -A Raw payload output (consumer)\n" " %s\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" "\n" " In Consumer mode:\n" " writes fetched messages to stdout\n" " In Producer mode:\n" " reads messages from stdin and sends to broker\n" " In List mode:\n" " queries broker for metadata information, " "topic is optional.\n" "\n" "\n" "\n", argv[0], rd_kafka_version_str(), rd_kafka_version(), RD_KAFKA_DEBUG_CONTEXTS); exit(1); } signal(SIGINT, stop); signal(SIGUSR1, sig_usr1); if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% Debug configuration failed: %s: %s\n", errstr, debug); exit(1); } if (mode == 'P') { /* * Producer */ char buf[2048]; int sendcnt = 0; /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ /* If offset reporting (-o report) is enabled, use the * richer dr_msg_cb instead. */ if (report_offsets) { rd_kafka_topic_conf_set(topic_conf, "produce.offset.report", "true", errstr, sizeof(errstr)); rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered2); } else rd_kafka_conf_set_dr_cb(conf, msg_delivered); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!quiet) fprintf(stderr, "%% Type stuff and hit enter to send\n"); while (run && fgets(buf, sizeof(buf), stdin)) { size_t len = strlen(buf); if (buf[len-1] == '\n') buf[--len] = '\0'; /* Send/Produce message. */ if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ buf, len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ NULL) == -1) { fprintf(stderr, "%% Failed to produce to topic %s " "partition %i: %s\n", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); continue; } if (!quiet) fprintf(stderr, "%% Sent %zd bytes to topic " "%s partition %i\n", len, rd_kafka_topic_name(rkt), partition); sendcnt++; /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); } /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Wait for messages to be delivered */ while (run && rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); /* Destroy the handle */ rd_kafka_destroy(rk); } else if (mode == 'C') { /* * Consumer */ /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); /* Start consuming */ if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){ fprintf(stderr, "%% Failed to start consuming: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); exit(1); } while (run) { rd_kafka_message_t *rkmessage; /* Consume single message. * See rdkafka_performance.c for high speed * consuming of messages. */ rkmessage = rd_kafka_consume(rkt, partition, 1000); if (!rkmessage) /* timeout */ continue; msg_consume(rkmessage, NULL); /* Return message to rdkafka */ rd_kafka_message_destroy(rkmessage); } /* Stop consuming */ rd_kafka_consume_stop(rkt, partition); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); } else if (mode == 'L') { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ if (topic) rkt = rd_kafka_topic_new(rk, topic, topic_conf); else rkt = NULL; while (run) { const struct rd_kafka_metadata *metadata; /* Fetch metadata */ err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, &metadata, 5000); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { fprintf(stderr, "%% Failed to acquire metadata: %s\n", rd_kafka_err2str(err)); run = 0; break; } metadata_print(topic, metadata); rd_kafka_metadata_destroy(metadata); run = 0; } /* Destroy the handle */ rd_kafka_destroy(rk); /* Exit right away, dont wait for background cleanup, we haven't * done anything important anyway. */ exit(err ? 2 : 0); } /* Let background threads clean up and terminate cleanly. */ rd_kafka_wait_destroyed(2000); return 0; }
void kafka_consume_all(rd_kafka_t *rk, zval *return_value, const char *topic, const char *offset, int item_count) { char errstr[512]; rd_kafka_topic_t *rkt; rd_kafka_topic_conf_t *conf; const struct rd_kafka_metadata *meta = NULL; rd_kafka_queue_t *rkqu = NULL; int current, p, i = 0; int32_t partition = 0; int64_t start; struct consume_cb_params cb_params = {item_count, return_value, NULL, 0, 0, 0}; //check for NULL pointers, all arguments are required! if (rk == NULL || return_value == NULL || topic == NULL || offset == NULL || strlen(offset) == 0) return; if (!strcmp(offset, "end")) start = RD_KAFKA_OFFSET_END; else if (!strcmp(offset, "beginning")) start = RD_KAFKA_OFFSET_BEGINNING; else if (!strcmp(offset, "stored")) start = RD_KAFKA_OFFSET_STORED; else start = strtoll(offset, NULL, 10); /* Topic configuration */ conf = rd_kafka_topic_conf_new(); /* Disable autocommit, queue_consume sets offsets automatically */ if (rd_kafka_topic_conf_set(conf, "auto.commit.enable", "false", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_WARNING, "failed to turn autocommit off consuming %d messages (start offset %"PRId64") from topic %s: %s", item_count, start, topic, errstr ); } cb_params.auto_commit = 1; } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, conf); if (!rkt) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - Failed to read %s from %"PRId64" (%s)", topic, start, offset); } return; } rkqu = rd_kafka_queue_new(rk); if (RD_KAFKA_RESP_ERR_NO_ERROR == rd_kafka_metadata(rk, 0, rkt, &meta, 5)) { p = meta->topics->partition_cnt; cb_params.partition_ends = calloc(sizeof *cb_params.partition_ends, p); if (cb_params.partition_ends == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - Failed to read %s from %"PRId64" (%s)", topic, start, offset); } rd_kafka_metadata_destroy(meta); meta = NULL; rd_kafka_queue_destroy(rkqu); rd_kafka_topic_destroy(rkt); return; } cb_params.eop = p; for (i=0;i<p;++i) { partition = meta->topics[0].partitions[i].id; if (rd_kafka_consume_start_queue(rkt, partition, start, rkqu)) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "Failed to start consuming topic %s [%"PRId32"]: %s", topic, partition, offset ); } continue; } } while(cb_params.read_count && cb_params.eop) rd_kafka_consume_callback_queue(rkqu, 200, queue_consume, &cb_params); free(cb_params.partition_ends); cb_params.partition_ends = NULL; for (i=0;i<p;++i) { partition = meta->topics[0].partitions[i].id; rd_kafka_consume_stop(rkt, partition); } rd_kafka_metadata_destroy(meta); meta = NULL; rd_kafka_queue_destroy(rkqu); while(rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 50); rd_kafka_topic_destroy(rkt); } if (meta) rd_kafka_metadata_destroy(meta); }
/** * @brief Get all partitions for topic and their beginning offsets, useful * if we're consuming messages without knowing the actual partition beforehand * @param int **partitions should be pointer to NULL, will be allocated here * @param const char * topic topic name * @return int (0 == meta error, -2: no connection, -1: allocation error, all others indicate success (nr of elems in array)) */ int kafka_partition_offsets(rd_kafka_t *r, long **partitions, const char *topic) { rd_kafka_topic_t *rkt = NULL; rd_kafka_topic_conf_t *conf = NULL; rd_kafka_queue_t *rkqu = NULL; struct consume_cb_params cb_params = {0, NULL, NULL, 0, 0, 0}; int i = 0; //make life easier, 1 level of indirection... long *values = *partitions; //connect as consumer if required if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to get offsets of topic: %s", topic); } return -2; } /* Topic configuration */ conf = rd_kafka_topic_conf_new(); /* Create topic */ rkt = rd_kafka_topic_new(r, topic, conf); rkqu = rd_kafka_queue_new(rk); const struct rd_kafka_metadata *meta = NULL; if (RD_KAFKA_RESP_ERR_NO_ERROR == rd_kafka_metadata(r, 0, rkt, &meta, 5)) { values = realloc(values, meta->topics->partition_cnt * sizeof *values); if (values == NULL) { *partitions = values;//possible corrupted pointer now //free metadata, return error rd_kafka_metadata_destroy(meta); return -1; } //we need eop to reach 0, if there are 4 partitions, start at 3 (0, 1, 2, 3) cb_params.eop = meta->topics->partition_cnt -1; cb_params.partition_offset = values; for (i=0;i<meta->topics->partition_cnt;++i) { //initialize: set to -2 for callback values[i] = -2; if (rd_kafka_consume_start_queue(rkt, meta->topics->partitions[i].id, RD_KAFKA_OFFSET_BEGINNING, rkqu)) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "Failed to start consuming topic %s [%"PRId32"]", topic, meta->topics->partitions[i].id ); } continue; } } //eiter eop reached 0, or the read errors >= nr of partitions //either way, we've consumed a message from each partition, and therefore, we're done while(cb_params.eop && cb_params.error_count < meta->topics->partition_cnt) rd_kafka_consume_callback_queue(rkqu, 100, offset_queue_consume, &cb_params); //stop consuming for all partitions for (i=0;i<meta->topics->partition_cnt;++i) rd_kafka_consume_stop(rkt, meta->topics[0].partitions[i].id); rd_kafka_queue_destroy(rkqu); //do we need this poll here? while(rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 5); //let's be sure to pass along the correct values here... *partitions = values; i = meta->topics->partition_cnt; } if (meta) rd_kafka_metadata_destroy(meta); rd_kafka_topic_destroy(rkt); return i; }
/** * @brief Test that Metadata requests are retried properly when * timing out due to high broker rtt. */ static void do_test_low_socket_timeout (const char *topic) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_t *rkt; rd_kafka_resp_err_t err; const struct rd_kafka_metadata *md; int res; mtx_init(&ctrl.lock, mtx_plain); cnd_init(&ctrl.cnd); TEST_SAY("Test Metadata request retries on timeout\n"); test_conf_init(&conf, NULL, 60); test_conf_set(conf, "socket.timeout.ms", "1000"); test_conf_set(conf, "socket.max.fails", "12345"); test_conf_set(conf, "retry.backoff.ms", "5000"); /* Avoid api version requests (with their own timeout) to get in * the way of our test */ test_conf_set(conf, "api.version.request", "false"); test_socket_enable(conf); test_curr->connect_cb = connect_cb; test_curr->is_fatal_cb = is_fatal_cb; rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = test_create_producer_topic(rk, topic, NULL); TEST_SAY("Waiting for sockem connect..\n"); mtx_lock(&ctrl.lock); while (!ctrl.skm) cnd_wait(&ctrl.cnd, &ctrl.lock); mtx_unlock(&ctrl.lock); TEST_SAY("Connected, fire off a undelayed metadata() to " "make sure connection is up\n"); err = rd_kafka_metadata(rk, 0, rkt, &md, tmout_multip(2000)); TEST_ASSERT(!err, "metadata(undelayed) failed: %s", rd_kafka_err2str(err)); rd_kafka_metadata_destroy(md); if (thrd_create(&ctrl.thrd, ctrl_thrd_main, NULL) != thrd_success) TEST_FAIL("Failed to create sockem ctrl thread"); set_delay(0, 3000); /* Takes effect immediately */ /* After two retries, remove the delay, the third retry * should kick in and work. */ set_delay(((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) - 2000, 0); TEST_SAY("Calling metadata() again which should succeed after " "3 internal retries\n"); /* Metadata should be returned after the third retry */ err = rd_kafka_metadata(rk, 0, rkt, &md, ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) + 5000); TEST_SAY("metadata() returned %s\n", rd_kafka_err2str(err)); TEST_ASSERT(!err, "metadata(undelayed) failed: %s", rd_kafka_err2str(err)); rd_kafka_metadata_destroy(md); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); /* Join controller thread */ mtx_lock(&ctrl.lock); ctrl.term = 1; mtx_unlock(&ctrl.lock); thrd_join(ctrl.thrd, &res); cnd_destroy(&ctrl.cnd); mtx_destroy(&ctrl.lock); }
int main (int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_errno2err(EINVAL); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_conf_set_dr_cb(NULL, NULL); rd_kafka_conf_set_dr_msg_cb(NULL, NULL); rd_kafka_conf_set_error_cb(NULL, NULL); rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_log_cb(NULL, NULL); rd_kafka_conf_set_socket_cb(NULL, NULL); rd_kafka_conf_set_open_cb(NULL, NULL); rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_opaque(NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); rd_kafka_conf_dump_free(NULL, 0); rd_kafka_conf_properties_show(NULL); rd_kafka_topic_conf_new(); rd_kafka_topic_conf_dup(NULL); rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); rd_kafka_name(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); rd_kafka_consume_batch(NULL, 0, 0, NULL, 0); rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL); rd_kafka_offset_store(NULL, 0, 0); rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL); rd_kafka_poll(NULL, 0); rd_kafka_brokers_add(NULL, NULL); rd_kafka_set_logger(NULL, NULL); rd_kafka_set_log_level(NULL, 0); rd_kafka_log_print(NULL, 0, NULL, NULL); rd_kafka_log_syslog(NULL, 0, NULL, NULL); rd_kafka_outq_len(NULL); rd_kafka_dump(NULL, NULL); rd_kafka_thread_cnt(); rd_kafka_wait_destroyed(0); rd_kafka_metadata(NULL, 0, NULL, NULL, 0); rd_kafka_metadata_destroy(NULL); } return 0; }
ECL_KAFKA_API __int32 ECL_KAFKA_CALL getTopicPartitionCount(ICodeContext* ctx, const char* brokers, const char* topic) { // We have to use librdkafka's C API for this right now, as the C++ API // does not expose a topic's metadata. In addition, there is no easy // link between the exposed C++ objects and the structs used by the // C API, so we are basically creating a brand-new connection from // scratch. __int32 pCount = 0; char errstr[512]; rd_kafka_conf_t* conf = rd_kafka_conf_new(); rd_kafka_t* rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); if (rk) { if (rd_kafka_brokers_add(rk, brokers) != 0) { rd_kafka_topic_conf_t* topic_conf = rd_kafka_topic_conf_new(); rd_kafka_topic_t* rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (rkt) { const struct rd_kafka_metadata* metadata = NULL; rd_kafka_resp_err_t err = rd_kafka_metadata(rk, 0, rkt, &metadata, 5000); if (err == RD_KAFKA_RESP_ERR_NO_ERROR) { pCount = metadata->topics[0].partition_cnt; rd_kafka_metadata_destroy(metadata); } else { if (ctx->queryContextLogger().queryTraceLevel() > 4) { DBGLOG("Kafka: Error retrieving metadata from topic: %s @ %s: '%s'", topic, brokers, rd_kafka_err2str(err)); } } rd_kafka_topic_destroy(rkt); } else { if (ctx->queryContextLogger().queryTraceLevel() > 4) { DBGLOG("Kafka: Could not create topic object: %s @ %s", topic, brokers); } } } else { if (ctx->queryContextLogger().queryTraceLevel() > 4) { DBGLOG("Kafka: Could not add brokers: %s @ %s", topic, brokers); } } rd_kafka_destroy(rk); } if (pCount == 0) { DBGLOG("Kafka: Unable to retrieve partition count from topic: %s @ %s", topic, brokers); } return pCount; }
int main_0002_unkpart (int argc, char **argv) { int partition = 99; /* non-existent */ int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char msg[128]; int msgcnt = 10; int i; int fails = 0; const struct rd_kafka_metadata *metadata; test_conf_init(&conf, &topic_conf, 10); /* Set delivery report callback */ rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0002", 0), topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Request metadata so that we know the cluster is up before producing * messages, otherwise erroneous partitions will not fail immediately.*/ if ((r = rd_kafka_metadata(rk, 0, rkt, &metadata, tmout_multip(15000))) != RD_KAFKA_RESP_ERR_NO_ERROR) TEST_FAIL("Failed to acquire metadata: %s\n", rd_kafka_err2str(r)); rd_kafka_metadata_destroy(metadata); /* Produce a message */ for (i = 0 ; i < msgcnt ; i++) { int *msgidp = malloc(sizeof(*msgidp)); *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, strlen(msg), NULL, 0, msgidp); if (r == -1) { if (errno == ESRCH) TEST_SAY("Failed to produce message #%i: " "unknown partition: good!\n", i); else TEST_FAIL("Failed to produce message #%i: %s\n", i, rd_kafka_err2str( rd_kafka_errno2err(errno))); free(msgidp); } else { if (i > 5) { fails++; TEST_SAY("Message #%i produced: " "should've failed\n", i); } msgs_wait |= (1 << i); } /* After half the messages: sleep to allow the metadata * to be fetched from broker and update the actual partition * count: this will make subsequent produce() calls fail * immediately. */ if (i == 5) rd_sleep(2); } /* Wait for messages to time out */ rd_kafka_flush(rk, -1); if (msgs_wait != 0) TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); if (fails > 0) TEST_FAIL("See previous error(s)\n"); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); return 0; }
int main_0006_symbols (int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); rd_kafka_get_debug_contexts(); rd_kafka_get_err_descs(NULL, NULL); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_errno2err(EINVAL); rd_kafka_errno(); rd_kafka_last_error(); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_conf_set_dr_cb(NULL, NULL); rd_kafka_conf_set_dr_msg_cb(NULL, NULL); rd_kafka_conf_set_error_cb(NULL, NULL); rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_log_cb(NULL, NULL); rd_kafka_conf_set_socket_cb(NULL, NULL); rd_kafka_conf_set_rebalance_cb(NULL, NULL); rd_kafka_conf_set_offset_commit_cb(NULL, NULL); rd_kafka_conf_set_throttle_cb(NULL, NULL); rd_kafka_conf_set_default_topic_conf(NULL, NULL); rd_kafka_conf_get(NULL, NULL, NULL, NULL); #ifndef _MSC_VER rd_kafka_conf_set_open_cb(NULL, NULL); #endif rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_opaque(NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); rd_kafka_conf_dump_free(NULL, 0); rd_kafka_conf_properties_show(NULL); rd_kafka_topic_conf_new(); rd_kafka_topic_conf_dup(NULL); rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); rd_kafka_topic_opaque(NULL); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); rd_kafka_name(NULL); rd_kafka_memberid(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); rd_kafka_message_timestamp(NULL, NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); rd_kafka_consume_batch(NULL, 0, 0, NULL, 0); rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL); rd_kafka_offset_store(NULL, 0, 0); rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL); rd_kafka_produce_batch(NULL, 0, 0, NULL, 0); rd_kafka_poll(NULL, 0); rd_kafka_brokers_add(NULL, NULL); /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */ rd_kafka_set_log_level(NULL, 0); rd_kafka_log_print(NULL, 0, NULL, NULL); #ifndef _MSC_VER rd_kafka_log_syslog(NULL, 0, NULL, NULL); #endif rd_kafka_outq_len(NULL); rd_kafka_dump(NULL, NULL); rd_kafka_thread_cnt(); rd_kafka_wait_destroyed(0); rd_kafka_metadata(NULL, 0, NULL, NULL, 0); rd_kafka_metadata_destroy(NULL); rd_kafka_queue_destroy(NULL); rd_kafka_consume_start_queue(NULL, 0, 0, NULL); rd_kafka_consume_queue(NULL, 0); rd_kafka_consume_batch_queue(NULL, 0, NULL, 0); rd_kafka_consume_callback_queue(NULL, 0, NULL, NULL); rd_kafka_seek(NULL, 0, 0, 0); rd_kafka_yield(NULL); rd_kafka_mem_free(NULL, NULL); rd_kafka_list_groups(NULL, NULL, NULL, 0); rd_kafka_group_list_destroy(NULL); /* KafkaConsumer API */ rd_kafka_subscribe(NULL, NULL); rd_kafka_unsubscribe(NULL); rd_kafka_subscription(NULL, NULL); rd_kafka_consumer_poll(NULL, 0); rd_kafka_consumer_close(NULL); rd_kafka_assign(NULL, NULL); rd_kafka_assignment(NULL, NULL); rd_kafka_commit(NULL, NULL, 0); rd_kafka_commit_message(NULL, NULL, 0); rd_kafka_committed(NULL, NULL, 0); rd_kafka_position(NULL, NULL); /* TopicPartition */ rd_kafka_topic_partition_list_new(0); rd_kafka_topic_partition_list_destroy(NULL); rd_kafka_topic_partition_list_add(NULL, NULL, 0); rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0); rd_kafka_topic_partition_list_del(NULL, NULL, 0); rd_kafka_topic_partition_list_del_by_idx(NULL, 0); rd_kafka_topic_partition_list_copy(NULL); rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0); rd_kafka_topic_partition_list_find(NULL, NULL, 0); rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0); rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL); } return 0; }
int main (int argc, char **argv) { int partition = 99; /* non-existent */ int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; char msg[128]; int msgcnt = 10; int i; const struct rd_kafka_metadata *metadata; test_conf_init(&conf, &topic_conf, 10); /* Set delivery report callback */ rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Create kafka instance */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); rkt = rd_kafka_topic_new(rk, test_mk_topic_name("generic", 0), topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); /* Request metadata so that we know the cluster is up before producing * messages, otherwise erroneous partitions will not fail immediately.*/ if ((r = rd_kafka_metadata(rk, 0, rkt, &metadata, 2000)) != RD_KAFKA_RESP_ERR_NO_ERROR) TEST_FAIL("Failed to acquire metadata: %s\n", rd_kafka_err2str(r)); rd_kafka_metadata_destroy(metadata); /* Produce a message */ for (i = 0 ; i < msgcnt ; i++) { int *msgidp = malloc(sizeof(*msgidp)); *msgidp = i; snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, strlen(msg), NULL, 0, msgidp); if (r == -1) { if (errno == ESRCH) TEST_SAY("Failed to produce message #%i: " "unknown partition: good!\n", i); else TEST_FAIL("Failed to produce message #%i: %s\n", i, strerror(errno)); } else { if (i > 5) TEST_FAIL("Message #%i produced: " "should've failed\n", i); msgs_wait |= (1 << i); } /* After half the messages: sleep to allow the metadata * to be fetched from broker and update the actual partition * count: this will make subsequent produce() calls fail * immediately. */ if (i == 5) sleep(2); } /* Wait for messages to time out */ while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 50); if (msgs_wait != 0) TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(10); /* If we havent failed at this point then * there were no threads leaked */ return 0; }