/** * Hermann::Lib::Consumer.consume * * @param VALUE self the Ruby object for this consumer * @param VALUE topic the Ruby string representing a topic to consume */ static VALUE consumer_consume(VALUE self, VALUE topic) { HermannInstanceConfig* consumerConfig; TRACER("starting consume\n"); Data_Get_Struct(self, HermannInstanceConfig, consumerConfig); if ((NULL == consumerConfig->topic) || (0 == strlen(consumerConfig->topic))) { fprintf(stderr, "Topic is null!\n"); rb_raise(rb_eRuntimeError, "Topic cannot be empty"); return self; } if (!consumerConfig->isInitialized) { consumer_init_kafka(consumerConfig); } /* Start consuming */ if (rd_kafka_consume_start(consumerConfig->rkt, consumerConfig->partition, consumerConfig->start_offset) == -1) { fprintf(stderr, "%% Failed to start consuming: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); rb_raise(rb_eRuntimeError, rd_kafka_err2str(rd_kafka_errno2err(errno))); return Qnil; } consumer_consume_loop(consumerConfig); /* Stop consuming */ rd_kafka_consume_stop(consumerConfig->rkt, consumerConfig->partition); return Qnil; }
int producer_push_data(const char* buf, const int buf_len, const wrapper_Info* producer_info) { int produce_ret; if (NULL == buf) return 0; if (0 == buf_len || buf_len > MAX_BUF_LEN) return -2; int tpart = 0; pthread_mutex_lock(&lock); tpart = part; part = (part + 1) % max_part; pthread_mutex_unlock(&lock); //printf("118producer->topic:%s, producer->partition:%d\n", producer_info->topic, producer_info->partition); /* Send/Produce message. */ produce_ret = rd_kafka_produce(producer_info->rkt, tpart, RD_KAFKA_MSG_F_COPY, /* Payload and length */ (void*)buf, (size_t)buf_len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ NULL); //printf("129produce_ret = %d\n", produce_ret); if (produce_ret == -1) { LOG(ERROR) << "errno :" << errno; fprintf(stderr, "%% Failed to produce to topic %s " "partition %i: %s\n", rd_kafka_topic_name(producer_info->rkt), producer_info->partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); LOG(ERROR) << rd_kafka_err2str( rd_kafka_errno2err(errno)); //printf("%s", stderr); /* Poll to handle delivery reports */ rd_kafka_poll(producer_info->rk, 0); return PUSH_DATA_FAILED; } //fprintf(stderr, "136%% Sent %d bytes to topic %s partition %i\n",buf_len, rd_kafka_topic_name(producer_info->rkt), producer_info->partition); /* Poll to handle delivery reports */ rd_kafka_poll(producer_info->rk, 0); return PUSH_DATA_SUCCESS; }
rd_kafka_topic_t *test_create_producer_topic (rd_kafka_t *rk, const char *topic, ...) { rd_kafka_topic_t *rkt; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; va_list ap; const char *name, *val; test_conf_init(NULL, &topic_conf, 20); va_start(ap, topic); while ((name = va_arg(ap, const char *)) && (val = va_arg(ap, const char *))) { if (rd_kafka_topic_conf_set(topic_conf, name, val, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("Conf failed: %s\n", errstr); } va_end(ap); /* Make sure all replicas are in-sync after producing * so that consume test wont fail. */ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1", errstr, sizeof(errstr)); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); return rkt; }
void kfc_rdkafka_init(rd_kafka_type_t type) { char errstr[512]; if (type == RD_KAFKA_PRODUCER) { char tmp[16]; snprintf(tmp, sizeof(tmp), "%i", SIGIO); rd_kafka_conf_set(conf.rk_conf, "internal.termination.signal", tmp, NULL, 0); } /* Create handle */ if (!(conf.rk = rd_kafka_new(type, conf.rk_conf, errstr, sizeof(errstr)))) FATAL("Failed to create rd_kafka struct: %s", errstr); rd_kafka_set_logger(conf.rk, rd_kafka_log_print); if (conf.debug) rd_kafka_set_log_level(conf.rk, LOG_DEBUG); else if (conf.verbosity == 0) rd_kafka_set_log_level(conf.rk, 0); /* Create topic, if specified */ if (conf.topic && !(conf.rkt = rd_kafka_topic_new(conf.rk, conf.topic, conf.rkt_conf))) FATAL("Failed to create rk_kafka_topic %s: %s", conf.topic, rd_kafka_err2str(rd_kafka_errno2err(errno))); conf.rk_conf = NULL; conf.rkt_conf = NULL; }
static rd_kafka_resp_err_t kafka_error() { #if RD_KAFKA_VERSION >= 0x000b00ff return rd_kafka_last_error(); #else return rd_kafka_errno2err(errno); #endif }
/** * Issue #530: * "Legacy Consumer. Delete hangs if done right after RdKafka::Consumer::create. * But If I put a start and stop in between, there is no issue." */ static int legacy_consumer_early_destroy (void) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; int pass; const char *topic = test_mk_topic_name(__FUNCTION__, 0); for (pass = 0 ; pass < 2 ; pass++) { TEST_SAY("%s: pass #%d\n", __FUNCTION__, pass); rk = test_create_handle(RD_KAFKA_CONSUMER, NULL); if (pass == 1) { /* Second pass, create a topic too. */ rkt = rd_kafka_topic_new(rk, topic, NULL); TEST_ASSERT(rkt, "failed to create topic: %s", rd_kafka_err2str( rd_kafka_errno2err(errno))); rd_sleep(1); rd_kafka_topic_destroy(rkt); } rd_kafka_destroy(rk); } return 0; }
int main (int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_errno2err(EINVAL); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_conf_set_dr_cb(NULL, NULL); rd_kafka_conf_set_error_cb(NULL, NULL); rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); rd_kafka_conf_dump_free(NULL, 0); rd_kafka_conf_properties_show(NULL); rd_kafka_topic_conf_new(); rd_kafka_topic_conf_dup(NULL); rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); rd_kafka_name(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); rd_kafka_consume_batch(NULL, 0, 0, NULL, 0); rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL); rd_kafka_offset_store(NULL, 0, 0); rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL); rd_kafka_poll(NULL, 0); rd_kafka_brokers_add(NULL, NULL); rd_kafka_set_logger(NULL, NULL); rd_kafka_set_log_level(NULL, 0); rd_kafka_log_print(NULL, 0, NULL, NULL); rd_kafka_log_syslog(NULL, 0, NULL, NULL); rd_kafka_outq_len(NULL); rd_kafka_dump(NULL, NULL); rd_kafka_thread_cnt(); rd_kafka_wait_destroyed(0); } return 0; }
int consumer_init(const int partition, const char* topic, const char* brokers, Consume_Data consume_data, wrapper_Info* producer_info) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; rd_kafka_t *rk; char errstr[512]; producer_info->start_offset = RD_KAFKA_OFFSET_END; producer_info->partition = partition; if (NULL != consume_data) producer_info->func_consume_data = consume_data; else return CONSUMER_INIT_FAILED; /* Kafka configuration */ conf = rd_kafka_conf_new(); if (NULL == conf) return CONSUMER_INIT_FAILED; if (RD_KAFKA_CONF_OK != rd_kafka_conf_set(conf, "group.id", "one", errstr, sizeof(errstr))) return CONSUMER_INIT_FAILED; /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", errstr); return CONSUMER_INIT_FAILED; } rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); return CONSUMER_INIT_FAILED; } /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); /* Create topic */ producer_info->rkt = rd_kafka_topic_new(rk, topic, topic_conf); producer_info->rk = rk; /* Start consuming */ if (rd_kafka_consume_start(producer_info->rkt, partition, RD_KAFKA_OFFSET_END) == -1){ fprintf(stderr, "%% Failed to start consuming: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); return CONSUMER_INIT_FAILED; } return CONSUMER_INIT_SUCCESS; }
void test_consumer_stop (const char *what, rd_kafka_topic_t *rkt, int32_t partition) { TEST_SAY("%s: consumer_stop: %s [%"PRId32"]\n", what, rd_kafka_topic_name(rkt), partition); if (rd_kafka_consume_stop(rkt, partition) == -1) TEST_FAIL("%s: consume_stop failed: %s\n", what, rd_kafka_err2str(rd_kafka_errno2err(errno))); }
void test_consumer_start (const char *what, rd_kafka_topic_t *rkt, int32_t partition, int64_t start_offset) { TEST_SAY("%s: consumer_start: %s [%"PRId32"] at offset %"PRId64"\n", what, rd_kafka_topic_name(rkt), partition, start_offset); if (rd_kafka_consume_start(rkt, partition, start_offset) == -1) TEST_FAIL("%s: consume_start failed: %s\n", what, rd_kafka_err2str(rd_kafka_errno2err(errno))); }
static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */ { char errbuf[1024]; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; if (ctx->kafka != NULL && ctx->topic != NULL) return(0); if (ctx->kafka == NULL) { if ((conf = rd_kafka_conf_dup(ctx->kafka_conf)) == NULL) { ERROR("write_kafka plugin: cannot duplicate kafka config"); return(1); } if ((ctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errbuf, sizeof(errbuf))) == NULL) { ERROR("write_kafka plugin: cannot create kafka handle."); return 1; } rd_kafka_conf_destroy(ctx->kafka_conf); ctx->kafka_conf = NULL; INFO ("write_kafka plugin: created KAFKA handle : %s", rd_kafka_name(ctx->kafka)); #if defined(HAVE_LIBRDKAFKA_LOGGER) && !defined(HAVE_LIBRDKAFKA_LOG_CB) rd_kafka_set_logger(ctx->kafka, kafka_log); #endif } if (ctx->topic == NULL ) { if ((topic_conf = rd_kafka_topic_conf_dup(ctx->conf)) == NULL) { ERROR("write_kafka plugin: cannot duplicate kafka topic config"); return 1; } if ((ctx->topic = rd_kafka_topic_new(ctx->kafka, ctx->topic_name, topic_conf)) == NULL) { ERROR("write_kafka plugin: cannot create topic : %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); return errno; } rd_kafka_topic_conf_destroy(ctx->conf); ctx->conf = NULL; INFO ("write_kafka plugin: handle created for topic : %s", rd_kafka_topic_name(ctx->topic)); } return(0); } /* }}} int kafka_handle */
int main_0001_multiobj (int argc, char **argv) { int partition = RD_KAFKA_PARTITION_UA; /* random */ int i; const int NUM_ITER = 10; const char *topic = NULL; TEST_SAY("Creating and destroying %i kafka instances\n", NUM_ITER); /* Create, use and destroy NUM_ITER kafka instances. */ for (i = 0 ; i < NUM_ITER ; i++) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char msg[128]; test_timing_t t_destroy; test_conf_init(&conf, &topic_conf, 30); if (!topic) topic = test_mk_topic_name("0001", 0); rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic for " "rdkafka instance #%i: %s\n", i, rd_kafka_err2str(rd_kafka_errno2err(errno))); rd_snprintf(msg, sizeof(msg), "%s test message for iteration #%i", argv[0], i); /* Produce a message */ rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, strlen(msg), NULL, 0, NULL); /* Wait for it to be sent (and possibly acked) */ rd_kafka_flush(rk, -1); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TIMING_START(&t_destroy, "rd_kafka_destroy()"); rd_kafka_destroy(rk); TIMING_STOP(&t_destroy); } return 0; }
void test_produce_msgs (rd_kafka_t *rk, rd_kafka_topic_t *rkt, uint64_t testid, int32_t partition, int msg_base, int cnt, const char *payload, size_t size) { int msg_id; test_timing_t t_all; int remains = 0; TEST_SAY("Produce to %s [%"PRId32"]: messages #%d..%d\n", rd_kafka_topic_name(rkt), partition, msg_base, msg_base+cnt); TIMING_START(&t_all, "PRODUCE"); for (msg_id = msg_base ; msg_id < msg_base + cnt ; msg_id++) { char key[128]; char buf[128]; const char *use_payload; size_t use_size; if (payload) { use_payload = payload; use_size = size; } else { test_msg_fmt(key, sizeof(key), testid, partition, msg_id); rd_snprintf(buf, sizeof(buf), "data: %s", key); use_payload = buf; use_size = strlen(buf); } remains++; if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, (void *)use_payload, use_size, key, strlen(key), &remains) == -1) TEST_FAIL("Failed to produce message %i " "to partition %i: %s", msg_id, (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); } /* Wait for messages to be delivered */ while (remains > 0 && rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 10); TIMING_STOP(&t_all); }
rd_kafka_topic_t *test_create_consumer_topic (rd_kafka_t *rk, const char *topic) { rd_kafka_topic_t *rkt; rd_kafka_topic_conf_t *topic_conf; test_conf_init(NULL, &topic_conf, 20); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); return rkt; }
/* Shared logic of Consumer_start and Producer_start */ static PyObject * RdkHandle_start(RdkHandle *self, rd_kafka_type_t rdk_type, const char *brokers, const char *topic_name) { if (RdkHandle_excl_lock(self)) return NULL; if (self->rdk_handle) { set_pykafka_error("RdKafkaException", "Already started!"); return RdkHandle_start_fail(self, RdkHandle_stop); } /* Configure and start rdk_handle */ char errstr[512]; Py_BEGIN_ALLOW_THREADS /* avoid callbacks deadlocking */ self->rdk_handle = rd_kafka_new( rdk_type, self->rdk_conf, errstr, sizeof(errstr)); self->rdk_conf = NULL; /* deallocated by rd_kafka_new() */ Py_END_ALLOW_THREADS if (! self->rdk_handle) { set_pykafka_error("RdKafkaException", errstr); return RdkHandle_start_fail(self, RdkHandle_stop); } /* Set brokers */ int brokers_added; Py_BEGIN_ALLOW_THREADS /* avoid callbacks deadlocking */ brokers_added = rd_kafka_brokers_add(self->rdk_handle, brokers); Py_END_ALLOW_THREADS if (brokers_added == 0) { set_pykafka_error("RdKafkaException", "adding brokers failed"); return RdkHandle_start_fail(self, RdkHandle_stop); } /* Configure and take out a topic handle */ Py_BEGIN_ALLOW_THREADS /* avoid callbacks deadlocking */ self->rdk_topic_handle = rd_kafka_topic_new(self->rdk_handle, topic_name, self->rdk_topic_conf); self->rdk_topic_conf = NULL; /* deallocated by rd_kafka_topic_new() */ Py_END_ALLOW_THREADS if (! self->rdk_topic_handle) { set_pykafka_error_from_code(rd_kafka_errno2err(errno), NULL); return RdkHandle_start_fail(self, RdkHandle_stop); } if (RdkHandle_unlock(self)) return NULL; Py_INCREF(Py_None); return Py_None; }
static void *consumer_recv_msg(void *ptr) { rd_kafka_message_t *ret; HermannInstanceConfig *consumerConfig = (HermannInstanceConfig *) ptr; ret = rd_kafka_consume(consumerConfig->rkt, consumerConfig->partition, CONSUMER_RECVMSG_TIMEOUT_MS); if ( ret == NULL ) { if ( errno != ETIMEDOUT ) fprintf(stderr, "%% Error: %s\n", rd_kafka_err2str( rd_kafka_errno2err(errno))); } return (void *) ret; }
/* Returns 0 on success. On failure, sets mapper->error and returns nonzero. */ int table_metadata_update_topic(table_mapper_t mapper, table_metadata_t table, const char* table_name) { const char* prev_table_name = table->table_name; if (table->topic) { if (strcmp(table_name, prev_table_name)) { logf("Registering new table (was \"%s\", now \"%s\") for relid %" PRIu32 "\n", prev_table_name, table_name, table->relid); free(table->table_name); rd_kafka_topic_destroy(table->topic); } else return 0; // table name didn't change, nothing to do } table->table_name = strdup(table_name); const char *topic_name; /* both branches set topic_name to a pointer we don't need to free, * since rd_kafka_topic_new below is going to copy it anyway */ if (mapper->topic_prefix != NULL) { char prefixed_name[TABLE_MAPPER_MAX_TOPIC_LEN]; int size = snprintf(prefixed_name, TABLE_MAPPER_MAX_TOPIC_LEN, "%s%c%s", mapper->topic_prefix, TABLE_MAPPER_TOPIC_PREFIX_DELIMITER, table_name); if (size >= TABLE_MAPPER_MAX_TOPIC_LEN) { mapper_error(mapper, "prefixed topic name is too long (max %d bytes): prefix %s, table name %s", TABLE_MAPPER_MAX_TOPIC_LEN, mapper->topic_prefix, table_name); return -1; } topic_name = prefixed_name; /* needn't free topic_name because prefixed_name was stack-allocated */ } else { topic_name = table_name; /* needn't free topic_name because it aliases table_name which we don't own */ } logf("Opening Kafka topic \"%s\" for table \"%s\"\n", topic_name, table_name); table->topic = rd_kafka_topic_new(mapper->kafka, topic_name, rd_kafka_topic_conf_dup(mapper->topic_conf)); if (!table->topic) { mapper_error(mapper, "Cannot open Kafka topic %s: %s", topic_name, rd_kafka_err2str(rd_kafka_errno2err(errno))); return -1; } return 0; }
/** * Produces \p cnt messages and returns immediately. * Does not wait for delivery. * \p msgcounterp is incremented for each produced messages and passed * as \p msg_opaque which is later used in test_dr_cb to decrement * the counter on delivery. */ void test_produce_msgs_nowait (rd_kafka_t *rk, rd_kafka_topic_t *rkt, uint64_t testid, int32_t partition, int msg_base, int cnt, const char *payload, size_t size, int *msgcounterp) { int msg_id; test_timing_t t_all; TEST_SAY("Produce to %s [%"PRId32"]: messages #%d..%d\n", rd_kafka_topic_name(rkt), partition, msg_base, msg_base+cnt); TIMING_START(&t_all, "PRODUCE"); for (msg_id = msg_base ; msg_id < msg_base + cnt ; msg_id++) { char key[128]; char buf[128]; const char *use_payload; size_t use_size; if (payload) { use_payload = payload; use_size = size; } else { test_msg_fmt(key, sizeof(key), testid, partition, msg_id); rd_snprintf(buf, sizeof(buf), "%s: data", key); use_payload = buf; use_size = strlen(buf); } if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, (void *)use_payload, use_size, key, strlen(key), msgcounterp) == -1) TEST_FAIL("Failed to produce message %i " "to partition %i: %s", msg_id, (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); (*msgcounterp)++; } TIMING_STOP(&t_all); }
/** * Produces a single message, retries on queue congestion, and * exits hard on error. */ static int produce_message (void *buf, size_t len, const void *key, size_t key_len, int msgflags) { /* Produce message: keep trying until it succeeds. */ int ret = 0; do { rd_kafka_resp_err_t err; if (!conf.run) { INFO(LOG_ERR, "Program terminated while producing message of %zd bytes", len); ret = -1; break; } if (rd_kafka_produce(conf.rkt, conf.partition, msgflags, buf, len, key, key_len, NULL) == 0) { stats.tx++; break; } err = rd_kafka_errno2err(errno); if (err != RD_KAFKA_RESP_ERR__QUEUE_FULL) { INFO(LOG_ERR, "Failed to produce message (%zd bytes): %s", len, rd_kafka_err2str(err)); ret = -1; break; } stats.tx_err_q++; /* Internal queue full, sleep to allow * messages to be produced/time out * before trying again. */ usleep(5); } while (1); return ret; }
int p_kafka_produce_data_to_part(struct p_kafka_host *kafka_host, void *data, u_int32_t data_len, int part) { int ret = SUCCESS; kafkap_ret_err_cb = FALSE; if (kafka_host && kafka_host->rk && kafka_host->topic) { ret = rd_kafka_produce(kafka_host->topic, part, RD_KAFKA_MSG_F_COPY, data, data_len, kafka_host->key, kafka_host->key_len, NULL); if (ret == ERR) { Log(LOG_ERR, "ERROR ( %s/%s ): Failed to produce to topic %s partition %i: %s\n", config.name, config.type, rd_kafka_topic_name(kafka_host->topic), part, rd_kafka_err2str(rd_kafka_errno2err(errno))); p_kafka_close(kafka_host, TRUE); } } else return ERR; rd_kafka_poll(kafka_host->rk, 0); return ret; }
/* Destroy all internal state of the consumer */ static PyObject * Consumer_stop(RdkHandle *self) { if (RdkHandle_safe_lock(self, /* check_running= */ 0)) return NULL; int errored = 0; if (self->rdk_topic_handle && self->partition_ids) { Py_ssize_t i, len = PyList_Size(self->partition_ids); for (i = 0; i != len; ++i) { /* Error handling here is a bit poor; we cannot bail out directly if we want to clean up as much as we can. */ long part_id = PyLong_AsLong( PyList_GetItem(self->partition_ids, i)); if (part_id == -1) { errored += 1; PyObject *log_res = PyObject_CallMethod( logger, "exception", "s", "In Consumer_stop:"); Py_XDECREF(log_res); continue; } int res; Py_BEGIN_ALLOW_THREADS /* avoid callbacks deadlocking */ res = rd_kafka_consume_stop(self->rdk_topic_handle, part_id); Py_END_ALLOW_THREADS if (res == -1) { set_pykafka_error_from_code(rd_kafka_errno2err(errno), NULL); errored += 1; PyObject *log_res = PyObject_CallMethod( logger, "exception", "sl", "Error in rd_kafka_consume_stop, part_id=%s", part_id); Py_XDECREF(log_res); continue; } } }
/** * Create topic_t object with va-arg list as key-value config pairs * terminated by NULL. */ rd_kafka_topic_t *test_create_topic (rd_kafka_t *rk, const char *topic, ...) { rd_kafka_topic_t *rkt; rd_kafka_topic_conf_t *topic_conf; va_list ap; const char *name, *val; test_conf_init(NULL, &topic_conf, 0); va_start(ap, topic); while ((name = va_arg(ap, const char *)) && (val = va_arg(ap, const char *))) { test_topic_conf_set(topic_conf, name, val); } va_end(ap); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); return rkt; }
VCL_STRING vmod_send_msg(const struct vrt_ctx *ctx, VCL_STRING broker, VCL_STRING topic_name, VCL_STRING name) { char *p; unsigned u, v; char errstr[512]; char *brokers = (char*)broker; char *topic = (char*)topic_name; rd_kafka_topic_t *rkt; int partition = RD_KAFKA_PARTITION_UA; u = WS_Reserve(ctx->ws, 0); /* Reserve some work space */ p = ctx->ws->f; /* Front of workspace area */ v = snprintf(p, u, "%s", name); v++; if (v > u) { /* No space, reset and leave */ WS_Release(ctx->ws, 0); return (NULL); } /* Update work space with what we've used */ WS_Release(ctx->ws, v); /* * Producer */ //char buf[2048]; //char *buf= "essai de test du Test msg sur topic fred"; char *buf= name; int sendcnt = 0; /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ // rd_kafka_conf_set_dr_cb(conf, msg_delivered); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); size_t len = strlen(buf); if (buf[len-1] == '\n') buf[--len] = '\0'; /* Send/Produce message. */ if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ buf, len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ NULL) == -1) { fprintf(stderr, "%% Failed to produce to topic %s " "partition %i: %s\n", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); exit(2); } if (!quiet) fprintf(stderr, "%% Sent %zd bytes to topic " "%s partition %i\n", len, rd_kafka_topic_name(rkt), partition); sendcnt++; /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Wait for messages to be delivered */ while ( rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy the handle */ rd_kafka_destroy(rk); return (p); }
/* * kafka_consume_main * * Main function for Kafka consumers running as background workers */ void kafka_consume_main(Datum arg) { char err_msg[512]; rd_kafka_topic_conf_t *topic_conf; rd_kafka_t *kafka; rd_kafka_topic_t *topic; rd_kafka_message_t **messages; const struct rd_kafka_metadata *meta; struct rd_kafka_metadata_topic topic_meta; rd_kafka_resp_err_t err; bool found; Oid id = (Oid) arg; ListCell *lc; KafkaConsumerProc *proc = hash_search(consumer_procs, &id, HASH_FIND, &found); KafkaConsumer consumer; CopyStmt *copy; int valid_brokers = 0; int i; int my_partitions = 0; if (!found) elog(ERROR, "kafka consumer %d not found", id); pqsignal(SIGTERM, kafka_consume_main_sigterm); #define BACKTRACE_SEGFAULTS #ifdef BACKTRACE_SEGFAULTS pqsignal(SIGSEGV, debug_segfault); #endif /* we're now ready to receive signals */ BackgroundWorkerUnblockSignals(); /* give this proc access to the database */ BackgroundWorkerInitializeConnection(NameStr(proc->dbname), NULL); /* load saved consumer state */ StartTransactionCommand(); load_consumer_state(proc->consumer_id, &consumer); copy = get_copy_statement(&consumer); topic_conf = rd_kafka_topic_conf_new(); kafka = rd_kafka_new(RD_KAFKA_CONSUMER, NULL, err_msg, sizeof(err_msg)); rd_kafka_set_logger(kafka, logger); /* * Add all brokers currently in pipeline_kafka_brokers */ if (consumer.brokers == NIL) elog(ERROR, "no valid brokers were found"); foreach(lc, consumer.brokers) valid_brokers += rd_kafka_brokers_add(kafka, lfirst(lc)); if (!valid_brokers) elog(ERROR, "no valid brokers were found"); /* * Set up our topic to read from */ topic = rd_kafka_topic_new(kafka, consumer.topic, topic_conf); err = rd_kafka_metadata(kafka, false, topic, &meta, CONSUMER_TIMEOUT); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) elog(ERROR, "failed to acquire metadata: %s", rd_kafka_err2str(err)); Assert(meta->topic_cnt == 1); topic_meta = meta->topics[0]; load_consumer_offsets(&consumer, &topic_meta, proc->offset); CommitTransactionCommand(); /* * Begin consuming all partitions that this process is responsible for */ for (i = 0; i < topic_meta.partition_cnt; i++) { int partition = topic_meta.partitions[i].id; Assert(partition <= consumer.num_partitions); if (partition % consumer.parallelism != proc->partition_group) continue; elog(LOG, "[kafka consumer] %s <- %s consuming partition %d from offset %ld", consumer.rel->relname, consumer.topic, partition, consumer.offsets[partition]); if (rd_kafka_consume_start(topic, partition, consumer.offsets[partition]) == -1) elog(ERROR, "failed to start consuming: %s", rd_kafka_err2str(rd_kafka_errno2err(errno))); my_partitions++; } /* * No point doing anything if we don't have any partitions assigned to us */ if (my_partitions == 0) { elog(LOG, "[kafka consumer] %s <- %s consumer %d doesn't have any partitions to read from", consumer.rel->relname, consumer.topic, MyProcPid); goto done; } messages = palloc0(sizeof(rd_kafka_message_t) * consumer.batch_size); /* * Consume messages until we are terminated */ while (!got_sigterm) { ssize_t num_consumed; int i; int messages_buffered = 0; int partition; StringInfoData buf; bool xact = false; for (partition = 0; partition < consumer.num_partitions; partition++) { if (partition % consumer.parallelism != proc->partition_group) continue; num_consumed = rd_kafka_consume_batch(topic, partition, CONSUMER_TIMEOUT, messages, consumer.batch_size); if (num_consumed <= 0) continue; if (!xact) { StartTransactionCommand(); xact = true; } initStringInfo(&buf); for (i = 0; i < num_consumed; i++) { if (messages[i]->payload != NULL) { appendBinaryStringInfo(&buf, messages[i]->payload, messages[i]->len); if (buf.len > 0 && buf.data[buf.len - 1] != '\n') appendStringInfoChar(&buf, '\n'); messages_buffered++; } consumer.offsets[partition] = messages[i]->offset; rd_kafka_message_destroy(messages[i]); } } if (!xact) { pg_usleep(1 * 1000); continue; } /* we don't want to die in the event of any errors */ PG_TRY(); { if (messages_buffered) execute_copy(copy, &buf); } PG_CATCH(); { elog(LOG, "[kafka consumer] %s <- %s failed to process batch, dropped %d message%s:", consumer.rel->relname, consumer.topic, (int) num_consumed, (num_consumed == 1 ? "" : "s")); EmitErrorReport(); FlushErrorState(); AbortCurrentTransaction(); xact = false; } PG_END_TRY(); if (!xact) StartTransactionCommand(); if (messages_buffered) save_consumer_state(&consumer, proc->partition_group); CommitTransactionCommand(); } done: hash_search(consumer_procs, &id, HASH_REMOVE, NULL); rd_kafka_topic_destroy(topic); rd_kafka_destroy(kafka); rd_kafka_wait_destroyed(CONSUMER_TIMEOUT); }
int main (int argc, char **argv) { rd_kafka_topic_t *rkt; char *brokers = "localhost:9092"; char mode = 'C'; char *topic = NULL; int partition = RD_KAFKA_PARTITION_UA; int opt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; const char *debug = NULL; int64_t start_offset = 0; int report_offsets = 0; int do_conf_dump = 0; quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ conf = rd_kafka_conf_new(); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:A")) != -1) { switch (opt) { case 'P': case 'C': case 'L': mode = opt; break; case 't': topic = optarg; break; case 'p': partition = atoi(optarg); break; case 'b': brokers = optarg; break; case 'z': if (rd_kafka_conf_set(conf, "compression.codec", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } break; case 'o': if (!strcmp(optarg, "end")) start_offset = RD_KAFKA_OFFSET_END; else if (!strcmp(optarg, "beginning")) start_offset = RD_KAFKA_OFFSET_BEGINNING; else if (!strcmp(optarg, "stored")) start_offset = RD_KAFKA_OFFSET_STORED; else if (!strcmp(optarg, "report")) report_offsets = 1; else start_offset = strtoll(optarg, NULL, 10); break; case 'e': exit_eof = 1; break; case 'd': debug = optarg; break; case 'q': quiet = 1; break; case 'A': output = OUTPUT_RAW; break; case 'X': { char *name, *val; rd_kafka_conf_res_t res; if (!strcmp(optarg, "list") || !strcmp(optarg, "help")) { rd_kafka_conf_properties_show(stdout); exit(0); } if (!strcmp(optarg, "dump")) { do_conf_dump = 1; continue; } name = optarg; if (!(val = strchr(name, '='))) { fprintf(stderr, "%% Expected " "-X property=value, not %s\n", name); exit(1); } *val = '\0'; val++; res = RD_KAFKA_CONF_UNKNOWN; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ if (!strncmp(name, "topic.", strlen("topic."))) res = rd_kafka_topic_conf_set(topic_conf, name+ strlen("topic."), val, errstr, sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN) res = rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } } break; default: goto usage; } } if (do_conf_dump) { const char **arr; size_t cnt; int pass; for (pass = 0 ; pass < 2 ; pass++) { int i; if (pass == 0) { arr = rd_kafka_conf_dump(conf, &cnt); printf("# Global config\n"); } else { printf("# Topic config\n"); arr = rd_kafka_topic_conf_dump(topic_conf, &cnt); } for (i = 0 ; i < cnt ; i += 2) printf("%s = %s\n", arr[i], arr[i+1]); printf("\n"); rd_kafka_conf_dump_free(arr, cnt); } exit(0); } if (optind != argc || (mode != 'L' && !topic)) { usage: fprintf(stderr, "Usage: %s -C|-P|-L -t <topic> " "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n" "\n" "librdkafka version %s (0x%08x)\n" "\n" " Options:\n" " -C | -P Consumer or Producer mode\n" " -L Metadata list mode\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (random partitioner)\n" " -b <brokers> Broker address (localhost:9092)\n" " -z <codec> Enable compression:\n" " none|gzip|snappy\n" " -o <offset> Start offset (consumer)\n" " -o report Report message offsets (producer)\n" " -e Exit consumer when last message\n" " in partition has been received.\n" " -d [facs..] Enable debugging contexts:\n" " -q Be quiet\n" " -A Raw payload output (consumer)\n" " %s\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" "\n" " In Consumer mode:\n" " writes fetched messages to stdout\n" " In Producer mode:\n" " reads messages from stdin and sends to broker\n" " In List mode:\n" " queries broker for metadata information, " "topic is optional.\n" "\n" "\n" "\n", argv[0], rd_kafka_version_str(), rd_kafka_version(), RD_KAFKA_DEBUG_CONTEXTS); exit(1); } signal(SIGINT, stop); signal(SIGUSR1, sig_usr1); if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% Debug configuration failed: %s: %s\n", errstr, debug); exit(1); } if (mode == 'P') { /* * Producer */ char buf[2048]; int sendcnt = 0; /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ /* If offset reporting (-o report) is enabled, use the * richer dr_msg_cb instead. */ if (report_offsets) { rd_kafka_topic_conf_set(topic_conf, "produce.offset.report", "true", errstr, sizeof(errstr)); rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered2); } else rd_kafka_conf_set_dr_cb(conf, msg_delivered); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!quiet) fprintf(stderr, "%% Type stuff and hit enter to send\n"); while (run && fgets(buf, sizeof(buf), stdin)) { size_t len = strlen(buf); if (buf[len-1] == '\n') buf[--len] = '\0'; /* Send/Produce message. */ if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ buf, len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ NULL) == -1) { fprintf(stderr, "%% Failed to produce to topic %s " "partition %i: %s\n", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); continue; } if (!quiet) fprintf(stderr, "%% Sent %zd bytes to topic " "%s partition %i\n", len, rd_kafka_topic_name(rkt), partition); sendcnt++; /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); } /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Wait for messages to be delivered */ while (run && rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); /* Destroy the handle */ rd_kafka_destroy(rk); } else if (mode == 'C') { /* * Consumer */ /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); /* Start consuming */ if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){ fprintf(stderr, "%% Failed to start consuming: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); exit(1); } while (run) { rd_kafka_message_t *rkmessage; /* Consume single message. * See rdkafka_performance.c for high speed * consuming of messages. */ rkmessage = rd_kafka_consume(rkt, partition, 1000); if (!rkmessage) /* timeout */ continue; msg_consume(rkmessage, NULL); /* Return message to rdkafka */ rd_kafka_message_destroy(rkmessage); } /* Stop consuming */ rd_kafka_consume_stop(rkt, partition); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); } else if (mode == 'L') { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ if (topic) rkt = rd_kafka_topic_new(rk, topic, topic_conf); else rkt = NULL; while (run) { const struct rd_kafka_metadata *metadata; /* Fetch metadata */ err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, &metadata, 5000); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { fprintf(stderr, "%% Failed to acquire metadata: %s\n", rd_kafka_err2str(err)); run = 0; break; } metadata_print(topic, metadata); rd_kafka_metadata_destroy(metadata); run = 0; } /* Destroy the handle */ rd_kafka_destroy(rk); /* Exit right away, dont wait for background cleanup, we haven't * done anything important anyway. */ exit(err ? 2 : 0); } /* Let background threads clean up and terminate cleanly. */ rd_kafka_wait_destroyed(2000); return 0; }
static PyObject * Producer_produce(RdkHandle *self, PyObject *message) { if (RdkHandle_safe_lock(self, /* check_running= */ 1)) return NULL; PyObject *value = NULL; PyObject *partition_key = NULL; PyObject *partition_id = NULL; /* Keep message alive until the delivery-callback runs. Needed both * because we may want to put the message on a report queue when the * callback runs, and because we'll tell rd_kafka_produce() not to copy * the payload and it can safely use the raw Message bytes directly */ Py_INCREF(message); /* Get pointers to raw Message contents */ value = PyObject_GetAttrString(message, "value"); if (! value) goto failed; partition_key = PyObject_GetAttrString(message, "partition_key"); if (! partition_key) goto failed; partition_id = PyObject_GetAttrString(message, "partition_id"); if (! partition_id) goto failed; char *v = NULL; Py_ssize_t v_len = 0; if (value != Py_None) { v = PyBytes_AsString(value); if (! v) goto failed; v_len = PyBytes_GET_SIZE(value); } char *pk = NULL; Py_ssize_t pk_len = 0; if (partition_key != Py_None) { pk = PyBytes_AsString(partition_key); if (! pk) goto failed; pk_len = PyBytes_GET_SIZE(partition_key); } int32_t p_id = PyLong_AsLong(partition_id); if (p_id == -1 && PyErr_Occurred()) goto failed; int res = 0; Py_BEGIN_ALLOW_THREADS res = rd_kafka_produce(self->rdk_topic_handle, p_id, 0, /* ie don't copy and don't dealloc v */ v, v_len, pk, pk_len, (void *)message); Py_END_ALLOW_THREADS if (res == -1) { rd_kafka_resp_err_t err = rd_kafka_errno2err(errno); if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) { set_pykafka_error("ProducerQueueFullError", ""); goto failed; } else { /* Any other errors should go through the report queue, * because that's where pykafka.Producer would put them */ PyObject *put_func = (PyObject *)rd_kafka_opaque(self->rdk_handle); if (-1 == Producer_delivery_report_put(put_func, message, err)) { goto failed; } } Py_DECREF(message); /* There won't be a delivery-callback */ } Py_DECREF(value); Py_DECREF(partition_key); Py_DECREF(partition_id); if (RdkHandle_unlock(self)) return NULL; Py_INCREF(Py_None); return Py_None; failed: Py_XDECREF(value); Py_XDECREF(partition_key); Py_XDECREF(partition_id); RdkHandle_unlock(self); return NULL; }
int kafka_produce(rd_kafka_t *r, char* topic, char* msg, int msg_len, int report, long timeout) { char errstr[512]; rd_kafka_topic_t *rkt; struct produce_cb_params pcb = {1, 0, 0, 0, 0, NULL}; void *opaque; int partition = RD_KAFKA_PARTITION_UA; //decide whether to pass callback params or not... if (report) opaque = &pcb; else opaque = NULL; rd_kafka_topic_conf_t *topic_conf; if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to produce to topic: %s", topic); } return -2; } /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); char timeoutStr[64]; snprintf(timeoutStr, 64, "%lu", timeout); if (rd_kafka_topic_conf_set(topic_conf, "message.timeout.ms", timeoutStr, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_ERR, "Failed to configure topic param 'message.timeout.ms' to %lu before producing; config err was: %s", timeout, errstr ); } rd_kafka_topic_conf_destroy(topic_conf); return -3; } /* Create topic */ rkt = rd_kafka_topic_new(r, topic, topic_conf); if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ msg, msg_len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ opaque) == -1) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Failed to produce to topic %s " "partition %i: %s", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); } rd_kafka_topic_destroy(rkt); return -1; } /* Poll to handle delivery reports */ rd_kafka_poll(r, 0); /* Wait for messages to be delivered */ while (report && pcb.msg_count && rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 10); //set global to NULL again rd_kafka_topic_destroy(rkt); return 0; }
static void consume_messages (uint64_t testid, const char *topic, int32_t partition, int msg_base, int batch_cnt, int msgcnt) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; int i; test_conf_init(&conf, &topic_conf, 20); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_CONSUMER, conf); TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); TEST_SAY("Consuming %i messages from partition %i\n", batch_cnt, partition); /* Consume messages */ if (rd_kafka_consume_start(rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1) TEST_FAIL("consume_start(%i, -%i) failed: %s", (int)partition, batch_cnt, rd_kafka_err2str(rd_kafka_errno2err(errno))); for (i = 0 ; i < batch_cnt ; ) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consume(rkt, partition, tmout_multip(5000)); if (!rkmessage) TEST_FAIL("Failed to consume message %i/%i from " "partition %i: %s", i, batch_cnt, (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF){ rd_kafka_message_destroy(rkmessage); continue; } TEST_FAIL("Consume message %i/%i from partition %i " "has error: %s", i, batch_cnt, (int)partition, rd_kafka_err2str(rkmessage->err)); } verify_consumed_msg(testid, partition, msg_base+i, rkmessage); rd_kafka_message_destroy(rkmessage); i++; } rd_kafka_consume_stop(rkt, partition); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); }
static void consume_messages_with_queues (uint64_t testid, const char *topic, int partition_cnt, int msgcnt) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; rd_kafka_queue_t *rkqu; int i; int32_t partition; int batch_cnt = msgcnt / partition_cnt; test_conf_init(&conf, &topic_conf, 20); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_CONSUMER, conf); /* Create queue */ rkqu = rd_kafka_queue_new(rk); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); TEST_SAY("Consuming %i messages from one queue serving %i partitions\n", msgcnt, partition_cnt); /* Start consuming each partition */ for (partition = 0 ; partition < partition_cnt ; partition++) { /* Consume messages */ TEST_SAY("Start consuming partition %i at offset -%i\n", partition, batch_cnt); if (rd_kafka_consume_start_queue(rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt), rkqu) == -1) TEST_FAIL("consume_start_queue(%i) failed: %s", (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); } /* Consume messages from queue */ for (i = 0 ; i < msgcnt ; ) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000)); if (!rkmessage) TEST_FAIL("Failed to consume message %i/%i from " "queue: %s", i, msgcnt, rd_kafka_err2str(rd_kafka_errno2err(errno))); if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF){ TEST_SAY("Topic %s [%"PRId32"] reached " "EOF at offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); rd_kafka_message_destroy(rkmessage); continue; } TEST_FAIL("Consume message %i/%i from queue " "has error (offset %"PRId64 ", partition %"PRId32"): %s", i, msgcnt, rkmessage->offset, rkmessage->partition, rd_kafka_err2str(rkmessage->err)); } verify_consumed_msg(testid, -1, -1, rkmessage); rd_kafka_message_destroy(rkmessage); i++; } /* Stop consuming each partition */ for (partition = 0 ; partition < partition_cnt ; partition++) rd_kafka_consume_stop(rkt, partition); /* Destroy queue */ rd_kafka_queue_destroy(rkqu); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); }
/** * Produces 'msgcnt' messages split over 'partition_cnt' partitions. */ static void produce_messages (uint64_t testid, const char *topic, int partition_cnt, int msgcnt) { int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; char msg[128]; int failcnt = 0; int i; rd_kafka_message_t *rkmessages; int32_t partition; int msgid = 0; test_conf_init(&conf, &topic_conf, 20); rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Make sure all replicas are in-sync after producing * so that consume test wont fail. */ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1", errstr, sizeof(errstr)); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Create messages. */ prod_msg_remains = msgcnt; rkmessages = calloc(sizeof(*rkmessages), msgcnt / partition_cnt); for (partition = 0 ; partition < partition_cnt ; partition++) { int batch_cnt = msgcnt / partition_cnt; for (i = 0 ; i < batch_cnt ; i++) { rd_snprintf(msg, sizeof(msg), "testid=%"PRIu64", partition=%i, msg=%i", testid, (int)partition, msgid); rkmessages[i].payload = rd_strdup(msg); rkmessages[i].len = strlen(msg); msgid++; } TEST_SAY("Start produce to partition %i: msgs #%d..%d\n", (int)partition, msgid-batch_cnt, msgid); /* Produce batch for this partition */ r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE, rkmessages, batch_cnt); if (r == -1) TEST_FAIL("Failed to produce " "batch for partition %i: %s", (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); /* Scan through messages to check for errors. */ for (i = 0 ; i < batch_cnt ; i++) { if (rkmessages[i].err) { failcnt++; if (failcnt < 100) TEST_SAY("Message #%i failed: %s\n", i, rd_kafka_err2str(rkmessages[i]. err)); } } /* All messages should've been produced. */ if (r < batch_cnt) { TEST_SAY("Not all messages were accepted " "by produce_batch(): %i < %i\n", r, batch_cnt); if (batch_cnt - r != failcnt) TEST_SAY("Discrepency between failed " "messages (%i) " "and return value %i (%i - %i)\n", failcnt, batch_cnt - r, batch_cnt, r); TEST_FAIL("%i/%i messages failed\n", batch_cnt - r, batch_cnt); } TEST_SAY("Produced %i messages to partition %i, " "waiting for deliveries\n", r, partition); } free(rkmessages); /* Wait for messages to be delivered */ while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); if (fails) TEST_FAIL("%i failures, see previous errors", fails); if (prod_msg_remains != 0) TEST_FAIL("Still waiting for %i messages to be produced", prod_msg_remains); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); }