static rd_kafka_message_t *msg_consume(rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Consumer reached end of %s [%"PRId32"] " "message queue at offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); if (exit_eof) run = 0; return; } openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Consume error for topic \"%s\" [%"PRId32"] " "offset %"PRId64": %s\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, rd_kafka_message_errstr(rkmessage)); return; } //php_printf("%.*s\n", (int)rkmessage->len, (char *)rkmessage->payload); return rkmessage; }
static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { printf("%% Consumer reached end of %s [%"PRId32"] " "message queue at offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); if (exit_eof) run = 0; return; } printf("%% Consume error for topic \"%s\" [%"PRId32"] " "offset %"PRId64": %s\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, rd_kafka_message_errstr(rkmessage)); return; } printf("%% Message (offset %"PRId64", %zd bytes):\n", rkmessage->offset, rkmessage->len); if (rkmessage->key_len) hexdump(stdout, "Message Key", rkmessage->key, rkmessage->key_len); hexdump(stdout, "Message Payload", rkmessage->payload, rkmessage->len); }
/** * msg_consume * * Callback on message receipt. * * @param rkmessage rd_kafka_message_t* the message * @param opaque void* opaque context */ static void msg_consume(rd_kafka_message_t *rkmessage, HermannInstanceConfig *cfg) { if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { if (cfg->exit_eof) { fprintf(stderr, "%% Consumer reached end of %s [%"PRId32"] " "message queue at offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); cfg->run = 0; } return; } fprintf(stderr, "%% Consume error for topic \"%s\" [%"PRId32"] " "offset %"PRId64": %s\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, rd_kafka_message_errstr(rkmessage)); return; } if (DEBUG && rkmessage->key_len) { if (output == OUTPUT_HEXDUMP) { hexdump(stdout, "Message Key", rkmessage->key, rkmessage->key_len); } else { printf("Key: %.*s\n", (int)rkmessage->key_len, (char *)rkmessage->key); } } if (output == OUTPUT_HEXDUMP) { if (DEBUG) { hexdump(stdout, "Message Payload", rkmessage->payload, rkmessage->len); } } else { if (DEBUG) { printf("%.*s\n", (int)rkmessage->len, (char *)rkmessage->payload); } } // Yield the data to the Consumer's block if (rb_block_given_p()) { VALUE value = rb_str_new((char *)rkmessage->payload, rkmessage->len); rb_yield(value); } else { if (DEBUG) { fprintf(stderr, "No block given\n"); // todo: should this be an error? } } }
/** * Handle and print a consumed message. * Internally crafted messages are also used to propagate state from * librdkafka to the application. The application needs to check * the `rkmessage->err` field for this purpose. */ static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { fprintf(stderr, "%% Consumer reached end of %s [%"PRId32"] " "message queue at offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); if (exit_eof && --wait_eof == 0) run = 0; return; } if (rkmessage->rkt) fprintf(stderr, "%% Consume error for " "topic \"%s\" [%"PRId32"] " "offset %"PRId64": %s\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, rd_kafka_message_errstr(rkmessage)); else fprintf(stderr, "%% Consumer error: %s: %s\n", rd_kafka_err2str(rkmessage->err), rd_kafka_message_errstr(rkmessage)); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) run = 0; return; } if (!quiet) fprintf(stdout, "%% Message (topic %s [%"PRId32"], " "offset %"PRId64", %zd bytes):\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, rkmessage->len); if (rkmessage->key_len) { if (output == OUTPUT_HEXDUMP) hexdump(stdout, "Message Key", rkmessage->key, rkmessage->key_len); else printf("Key: %.*s\n", (int)rkmessage->key_len, (char *)rkmessage->key); } if (output == OUTPUT_HEXDUMP) hexdump(stdout, "Message Payload", rkmessage->payload, rkmessage->len); else printf("%.*s\n", (int)rkmessage->len, (char *)rkmessage->payload); }
int test_consumer_poll (const char *what, rd_kafka_t *rk, uint64_t testid, int exp_eof_cnt, int exp_msg_base, int exp_cnt) { int eof_cnt = 0; int cnt = 0; test_timing_t t_cons; TEST_SAY("%s: consume %d messages\n", what, exp_cnt); TIMING_START(&t_cons, "CONSUME"); while ((exp_eof_cnt == -1 || eof_cnt < exp_eof_cnt) && (cnt < exp_cnt)) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consumer_poll(rk, 10*1000); if (!rkmessage) /* Shouldn't take this long to get a msg */ TEST_FAIL("%s: consumer_poll() timeout\n", what); if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { TEST_SAY("%s [%"PRId32"] reached EOF at " "offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); eof_cnt++; } else if (rkmessage->err) { TEST_SAY("%s [%"PRId32"] error (offset %"PRId64"): %s", rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt) : "(no-topic)", rkmessage->partition, rkmessage->offset, rd_kafka_message_errstr(rkmessage)); } else { if (test_level > 2) TEST_SAY("%s [%"PRId32"] " "message at offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); test_verify_rkmessage(rkmessage, testid, -1, -1); cnt++; } rd_kafka_message_destroy(rkmessage); } TIMING_STOP(&t_cons); TEST_SAY("%s: consumed %d/%d messages (%d/%d EOFs)\n", what, cnt, exp_cnt, eof_cnt, exp_eof_cnt); return cnt; }
void test_consumer_seek (const char *what, rd_kafka_topic_t *rkt, int32_t partition, int64_t offset) { int err; TEST_SAY("%s: consumer_seek: %s [%"PRId32"] to offset %"PRId64"\n", what, rd_kafka_topic_name(rkt), partition, offset); if ((err = rd_kafka_seek(rkt, partition, offset, 2000))) TEST_FAIL("%s: consume_seek(%s, %"PRId32", %"PRId64") " "failed: %s\n", what, rd_kafka_topic_name(rkt), partition, offset, rd_kafka_err2str(err)); }
/** * Handle delivery reports */ static void handle_drs (rd_kafka_event_t *rkev) { const rd_kafka_message_t *rkmessage; while ((rkmessage = rd_kafka_event_message_next(rkev))) { int msgid = *(int *)rkmessage->_private; free(rkmessage->_private); TEST_SAYL(3,"Got rkmessage %s [%"PRId32"] @ %"PRId64": %s\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, rd_kafka_err2str(rkmessage->err)); if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR) TEST_FAIL("Message delivery failed: %s\n", rd_kafka_err2str(rkmessage->err)); if (msgid != msgid_next) { fails++; TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next); return; } msgid_next = msgid+1; } }
int test_consumer_poll (const char *what, rd_kafka_t *rk, uint64_t testid, int exp_eof_cnt, int exp_msg_base, int exp_cnt) { int eof_cnt = 0; int cnt = 0; while (eof_cnt < exp_eof_cnt) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consumer_poll(rk, 10*1000); if (!rkmessage) /* Shouldn't take this long to get a msg */ TEST_FAIL("%s: consumer_poll() timeout\n", what); if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { TEST_SAY("%s [%"PRId32"] reached EOF at " "offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); eof_cnt++; } else if (rkmessage->err) { TEST_SAY("%s [%"PRId32"] error (offset %"PRId64"): %s", rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt) : "(no-topic)", rkmessage->partition, rkmessage->offset, rd_kafka_message_errstr(rkmessage)); } else { if (test_level > 2) TEST_SAY("%s [%"PRId32"] " "message at offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); cnt++; } rd_kafka_message_destroy(rkmessage); } return cnt; }
int main (int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_errno2err(EINVAL); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_conf_set_dr_cb(NULL, NULL); rd_kafka_conf_set_error_cb(NULL, NULL); rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); rd_kafka_conf_dump_free(NULL, 0); rd_kafka_conf_properties_show(NULL); rd_kafka_topic_conf_new(); rd_kafka_topic_conf_dup(NULL); rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); rd_kafka_name(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); rd_kafka_consume_batch(NULL, 0, 0, NULL, 0); rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL); rd_kafka_offset_store(NULL, 0, 0); rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL); rd_kafka_poll(NULL, 0); rd_kafka_brokers_add(NULL, NULL); rd_kafka_set_logger(NULL, NULL); rd_kafka_set_log_level(NULL, 0); rd_kafka_log_print(NULL, 0, NULL, NULL); rd_kafka_log_syslog(NULL, 0, NULL, NULL); rd_kafka_outq_len(NULL); rd_kafka_dump(NULL, NULL); rd_kafka_thread_cnt(); rd_kafka_wait_destroyed(0); } return 0; }
void test_consumer_stop (const char *what, rd_kafka_topic_t *rkt, int32_t partition) { TEST_SAY("%s: consumer_stop: %s [%"PRId32"]\n", what, rd_kafka_topic_name(rkt), partition); if (rd_kafka_consume_stop(rkt, partition) == -1) TEST_FAIL("%s: consume_stop failed: %s\n", what, rd_kafka_err2str(rd_kafka_errno2err(errno))); }
static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { printf("%% Consumer reached end of %s [%"PRId32"] " "message queue at offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); if (exit_eof) run = 0; return; } printf("%% Consume error for topic \"%s\" [%"PRId32"] " "offset %"PRId64": %s\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, rd_kafka_message_errstr(rkmessage)); msgs_failed++; return; } cnt.msgs++; cnt.bytes += rkmessage->len; if (!(cnt.msgs % 1000000)) printf("@%"PRId64": %.*s\n", rkmessage->offset, (int)rkmessage->len, (char *)rkmessage->payload); #if 0 /* Future API */ /* We store offset when we're done processing * the current message. */ rd_kafka_offset_store(rkmessage->rkt, rkmessage->partition, rd_kafka_offset_next(rkmessage)); #endif }
void test_consumer_start (const char *what, rd_kafka_topic_t *rkt, int32_t partition, int64_t start_offset) { TEST_SAY("%s: consumer_start: %s [%"PRId32"] at offset %"PRId64"\n", what, rd_kafka_topic_name(rkt), partition, start_offset); if (rd_kafka_consume_start(rkt, partition, start_offset) == -1) TEST_FAIL("%s: consume_start failed: %s\n", what, rd_kafka_err2str(rd_kafka_errno2err(errno))); }
static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */ { char errbuf[1024]; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; if (ctx->kafka != NULL && ctx->topic != NULL) return(0); if (ctx->kafka == NULL) { if ((conf = rd_kafka_conf_dup(ctx->kafka_conf)) == NULL) { ERROR("write_kafka plugin: cannot duplicate kafka config"); return(1); } if ((ctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errbuf, sizeof(errbuf))) == NULL) { ERROR("write_kafka plugin: cannot create kafka handle."); return 1; } rd_kafka_conf_destroy(ctx->kafka_conf); ctx->kafka_conf = NULL; INFO ("write_kafka plugin: created KAFKA handle : %s", rd_kafka_name(ctx->kafka)); #if defined(HAVE_LIBRDKAFKA_LOGGER) && !defined(HAVE_LIBRDKAFKA_LOG_CB) rd_kafka_set_logger(ctx->kafka, kafka_log); #endif } if (ctx->topic == NULL ) { if ((topic_conf = rd_kafka_topic_conf_dup(ctx->conf)) == NULL) { ERROR("write_kafka plugin: cannot duplicate kafka topic config"); return 1; } if ((ctx->topic = rd_kafka_topic_new(ctx->kafka, ctx->topic_name, topic_conf)) == NULL) { ERROR("write_kafka plugin: cannot create topic : %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); return errno; } rd_kafka_topic_conf_destroy(ctx->conf); ctx->conf = NULL; INFO ("write_kafka plugin: handle created for topic : %s", rd_kafka_topic_name(ctx->topic)); } return(0); } /* }}} int kafka_handle */
/* Returns 0 on success. On failure, sets mapper->error and returns nonzero. */ int table_metadata_update_schema(table_mapper_t mapper, table_metadata_t table, int is_key, const char* schema_json, size_t schema_len) { int prev_schema_id = is_key ? table->key_schema_id : table->row_schema_id; int schema_id = TABLE_MAPPER_SCHEMA_ID_MISSING; int err; if (mapper->registry) { err = schema_registry_request(mapper->registry, rd_kafka_topic_name(table->topic), is_key, schema_json, schema_len, &schema_id); if (err) { mapper_error(mapper, "Failed to register %s schema: %s", is_key ? "key" : "row", mapper->registry->error); return err; } table_metadata_set_schema_id(table, is_key, schema_id); } avro_schema_t schema; /* If running with a schema registry, we can use the registry to detect * if the schema we just saw is the same as the one we remembered * previously (since the registry guarantees to return the same id for * identical schemas). If the registry returns the same id as before, we * can skip parsing the new schema and just keep the previous one. * * However, if we're running without a registry, it's not so easy to detect * whether or not the schema changed, so in that case we just always parse * the new schema. (We could store the previous schema JSON and strcmp() * it with the new JSON, but that probably wouldn't save much over just * parsing the JSON, given this isn't a hot code path.) */ if (prev_schema_id == TABLE_MAPPER_SCHEMA_ID_MISSING || prev_schema_id != schema_id) { if (schema_json) { err = avro_schema_from_json_length(schema_json, schema_len, &schema); if (err) { mapper_error(mapper, "Could not parse %s schema: %s", is_key ? "key" : "row", avro_strerror()); return err; } } else { schema = NULL; } table_metadata_set_schema(table, is_key, schema); if (schema) avro_schema_decref(schema); } return 0; }
void test_produce_msgs (rd_kafka_t *rk, rd_kafka_topic_t *rkt, uint64_t testid, int32_t partition, int msg_base, int cnt, const char *payload, size_t size) { int msg_id; test_timing_t t_all; int remains = 0; TEST_SAY("Produce to %s [%"PRId32"]: messages #%d..%d\n", rd_kafka_topic_name(rkt), partition, msg_base, msg_base+cnt); TIMING_START(&t_all, "PRODUCE"); for (msg_id = msg_base ; msg_id < msg_base + cnt ; msg_id++) { char key[128]; char buf[128]; const char *use_payload; size_t use_size; if (payload) { use_payload = payload; use_size = size; } else { test_msg_fmt(key, sizeof(key), testid, partition, msg_id); rd_snprintf(buf, sizeof(buf), "data: %s", key); use_payload = buf; use_size = strlen(buf); } remains++; if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, (void *)use_payload, use_size, key, strlen(key), &remains) == -1) TEST_FAIL("Failed to produce message %i " "to partition %i: %s", msg_id, (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); } /* Wait for messages to be delivered */ while (remains > 0 && rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 10); TIMING_STOP(&t_all); }
static void offset_queue_consume(rd_kafka_message_t *message, void *opaque) { struct consume_cb_params *params = opaque; if (params->eop == 0) return; if (message->err) { params->error_count += 1; if (params->auto_commit == 0) rd_kafka_offset_store( message->rkt, message->partition, message->offset == 0 ? 0 : message->offset -1 ); if (message->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { if (params->partition_offset[message->partition] == -2) {//no previous message read from this partition //set offset value to last possible value (-1 or last existing) //reduce eop count params->eop -= 1; params->read_count += 1; params->partition_offset[message->partition] = message->offset -1; } if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Consumer reached end of %s [%"PRId32"] " "message queue at offset %"PRId64"\n", rd_kafka_topic_name(message->rkt), message->partition, message->offset); } } return; } if (params->partition_offset[message->partition] == -1) params->eop -= 1; //we have an offset, save it params->partition_offset[message->partition] = message->offset; //tally read_count params->read_count += 1; if (params->auto_commit == 0) rd_kafka_offset_store( message->rkt, message->partition, message->offset == 0 ? 0 : message->offset -1 ); }
static rd_kafka_message_t *msg_consume(rd_kafka_message_t *rkmessage, void *opaque) { int *run = opaque; if (rkmessage->err) { *run = 0; if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Consumer reached end of %s [%"PRId32"] " "message queue at offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); } return NULL; } if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Consume error for topic \"%s\" [%"PRId32"] " "offset %"PRId64": %s\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, rd_kafka_message_errstr(rkmessage) ); } return NULL; } return rkmessage; }
int producer_push_data(const char* buf, const int buf_len, const wrapper_Info* producer_info) { int produce_ret; if (NULL == buf) return 0; if (0 == buf_len || buf_len > MAX_BUF_LEN) return -2; int tpart = 0; pthread_mutex_lock(&lock); tpart = part; part = (part + 1) % max_part; pthread_mutex_unlock(&lock); //printf("118producer->topic:%s, producer->partition:%d\n", producer_info->topic, producer_info->partition); /* Send/Produce message. */ produce_ret = rd_kafka_produce(producer_info->rkt, tpart, RD_KAFKA_MSG_F_COPY, /* Payload and length */ (void*)buf, (size_t)buf_len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ NULL); //printf("129produce_ret = %d\n", produce_ret); if (produce_ret == -1) { LOG(ERROR) << "errno :" << errno; fprintf(stderr, "%% Failed to produce to topic %s " "partition %i: %s\n", rd_kafka_topic_name(producer_info->rkt), producer_info->partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); LOG(ERROR) << rd_kafka_err2str( rd_kafka_errno2err(errno)); //printf("%s", stderr); /* Poll to handle delivery reports */ rd_kafka_poll(producer_info->rk, 0); return PUSH_DATA_FAILED; } //fprintf(stderr, "136%% Sent %d bytes to topic %s partition %i\n",buf_len, rd_kafka_topic_name(producer_info->rkt), producer_info->partition); /* Poll to handle delivery reports */ rd_kafka_poll(producer_info->rk, 0); return PUSH_DATA_SUCCESS; }
/** * Produces \p cnt messages and returns immediately. * Does not wait for delivery. * \p msgcounterp is incremented for each produced messages and passed * as \p msg_opaque which is later used in test_dr_cb to decrement * the counter on delivery. */ void test_produce_msgs_nowait (rd_kafka_t *rk, rd_kafka_topic_t *rkt, uint64_t testid, int32_t partition, int msg_base, int cnt, const char *payload, size_t size, int *msgcounterp) { int msg_id; test_timing_t t_all; TEST_SAY("Produce to %s [%"PRId32"]: messages #%d..%d\n", rd_kafka_topic_name(rkt), partition, msg_base, msg_base+cnt); TIMING_START(&t_all, "PRODUCE"); for (msg_id = msg_base ; msg_id < msg_base + cnt ; msg_id++) { char key[128]; char buf[128]; const char *use_payload; size_t use_size; if (payload) { use_payload = payload; use_size = size; } else { test_msg_fmt(key, sizeof(key), testid, partition, msg_id); rd_snprintf(buf, sizeof(buf), "%s: data", key); use_payload = buf; use_size = strlen(buf); } if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, (void *)use_payload, use_size, key, strlen(key), msgcounterp) == -1) TEST_FAIL("Failed to produce message %i " "to partition %i: %s", msg_id, (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); (*msgcounterp)++; } TIMING_STOP(&t_all); }
rd_kafka_resp_err_t rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async) { rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; rd_kafka_resp_err_t err; if (rkmessage->err) return RD_KAFKA_RESP_ERR__INVALID_ARG; offsets = rd_kafka_topic_partition_list_new(1); rktpar = rd_kafka_topic_partition_list_add( offsets, rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition); rktpar->offset = rkmessage->offset+1; err = rd_kafka_commit(rk, offsets, async); rd_kafka_topic_partition_list_destroy(offsets); return err; }
int p_kafka_produce_data_to_part(struct p_kafka_host *kafka_host, void *data, u_int32_t data_len, int part) { int ret = SUCCESS; kafkap_ret_err_cb = FALSE; if (kafka_host && kafka_host->rk && kafka_host->topic) { ret = rd_kafka_produce(kafka_host->topic, part, RD_KAFKA_MSG_F_COPY, data, data_len, kafka_host->key, kafka_host->key_len, NULL); if (ret == ERR) { Log(LOG_ERR, "ERROR ( %s/%s ): Failed to produce to topic %s partition %i: %s\n", config.name, config.type, rd_kafka_topic_name(kafka_host->topic), part, rd_kafka_err2str(rd_kafka_errno2err(errno))); p_kafka_close(kafka_host, TRUE); } } else return ERR; rd_kafka_poll(kafka_host->rk, 0); return ret; }
int p_kafka_manage_consumer(struct p_kafka_host *kafka_host, int is_start) { int ret = SUCCESS; kafkap_ret_err_cb = FALSE; if (kafka_host && kafka_host->rk && kafka_host->topic && !validate_truefalse(is_start)) { if (is_start) { ret = rd_kafka_consume_start(kafka_host->topic, kafka_host->partition, RD_KAFKA_OFFSET_END); if (ret == ERR) { Log(LOG_ERR, "ERROR ( %s/%s ): Failed to start consuming topic %s partition %i: %s\n", config.name, config.type, rd_kafka_topic_name(kafka_host->topic), kafka_host->partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); p_kafka_close(kafka_host, TRUE); } } else { rd_kafka_consume_stop(kafka_host->topic, kafka_host->partition); p_kafka_close(kafka_host, FALSE); } } else return ERR; return ret; }
VCL_STRING vmod_send_msg(const struct vrt_ctx *ctx, VCL_STRING broker, VCL_STRING topic_name, VCL_STRING name) { char *p; unsigned u, v; char errstr[512]; char *brokers = (char*)broker; char *topic = (char*)topic_name; rd_kafka_topic_t *rkt; int partition = RD_KAFKA_PARTITION_UA; u = WS_Reserve(ctx->ws, 0); /* Reserve some work space */ p = ctx->ws->f; /* Front of workspace area */ v = snprintf(p, u, "%s", name); v++; if (v > u) { /* No space, reset and leave */ WS_Release(ctx->ws, 0); return (NULL); } /* Update work space with what we've used */ WS_Release(ctx->ws, v); /* * Producer */ //char buf[2048]; //char *buf= "essai de test du Test msg sur topic fred"; char *buf= name; int sendcnt = 0; /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ // rd_kafka_conf_set_dr_cb(conf, msg_delivered); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); size_t len = strlen(buf); if (buf[len-1] == '\n') buf[--len] = '\0'; /* Send/Produce message. */ if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ buf, len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ NULL) == -1) { fprintf(stderr, "%% Failed to produce to topic %s " "partition %i: %s\n", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); exit(2); } if (!quiet) fprintf(stderr, "%% Sent %zd bytes to topic " "%s partition %i\n", len, rd_kafka_topic_name(rkt), partition); sendcnt++; /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Wait for messages to be delivered */ while ( rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy the handle */ rd_kafka_destroy(rk); return (p); }
int main (int argc, char **argv) { rd_kafka_topic_t *rkt; char *brokers = "localhost:9092"; char mode = 'C'; char *topic = NULL; int partition = RD_KAFKA_PARTITION_UA; int opt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; const char *debug = NULL; int64_t start_offset = 0; int report_offsets = 0; int do_conf_dump = 0; quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ conf = rd_kafka_conf_new(); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:A")) != -1) { switch (opt) { case 'P': case 'C': case 'L': mode = opt; break; case 't': topic = optarg; break; case 'p': partition = atoi(optarg); break; case 'b': brokers = optarg; break; case 'z': if (rd_kafka_conf_set(conf, "compression.codec", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } break; case 'o': if (!strcmp(optarg, "end")) start_offset = RD_KAFKA_OFFSET_END; else if (!strcmp(optarg, "beginning")) start_offset = RD_KAFKA_OFFSET_BEGINNING; else if (!strcmp(optarg, "stored")) start_offset = RD_KAFKA_OFFSET_STORED; else if (!strcmp(optarg, "report")) report_offsets = 1; else start_offset = strtoll(optarg, NULL, 10); break; case 'e': exit_eof = 1; break; case 'd': debug = optarg; break; case 'q': quiet = 1; break; case 'A': output = OUTPUT_RAW; break; case 'X': { char *name, *val; rd_kafka_conf_res_t res; if (!strcmp(optarg, "list") || !strcmp(optarg, "help")) { rd_kafka_conf_properties_show(stdout); exit(0); } if (!strcmp(optarg, "dump")) { do_conf_dump = 1; continue; } name = optarg; if (!(val = strchr(name, '='))) { fprintf(stderr, "%% Expected " "-X property=value, not %s\n", name); exit(1); } *val = '\0'; val++; res = RD_KAFKA_CONF_UNKNOWN; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ if (!strncmp(name, "topic.", strlen("topic."))) res = rd_kafka_topic_conf_set(topic_conf, name+ strlen("topic."), val, errstr, sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN) res = rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } } break; default: goto usage; } } if (do_conf_dump) { const char **arr; size_t cnt; int pass; for (pass = 0 ; pass < 2 ; pass++) { int i; if (pass == 0) { arr = rd_kafka_conf_dump(conf, &cnt); printf("# Global config\n"); } else { printf("# Topic config\n"); arr = rd_kafka_topic_conf_dump(topic_conf, &cnt); } for (i = 0 ; i < cnt ; i += 2) printf("%s = %s\n", arr[i], arr[i+1]); printf("\n"); rd_kafka_conf_dump_free(arr, cnt); } exit(0); } if (optind != argc || (mode != 'L' && !topic)) { usage: fprintf(stderr, "Usage: %s -C|-P|-L -t <topic> " "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n" "\n" "librdkafka version %s (0x%08x)\n" "\n" " Options:\n" " -C | -P Consumer or Producer mode\n" " -L Metadata list mode\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (random partitioner)\n" " -b <brokers> Broker address (localhost:9092)\n" " -z <codec> Enable compression:\n" " none|gzip|snappy\n" " -o <offset> Start offset (consumer)\n" " -o report Report message offsets (producer)\n" " -e Exit consumer when last message\n" " in partition has been received.\n" " -d [facs..] Enable debugging contexts:\n" " -q Be quiet\n" " -A Raw payload output (consumer)\n" " %s\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" "\n" " In Consumer mode:\n" " writes fetched messages to stdout\n" " In Producer mode:\n" " reads messages from stdin and sends to broker\n" " In List mode:\n" " queries broker for metadata information, " "topic is optional.\n" "\n" "\n" "\n", argv[0], rd_kafka_version_str(), rd_kafka_version(), RD_KAFKA_DEBUG_CONTEXTS); exit(1); } signal(SIGINT, stop); signal(SIGUSR1, sig_usr1); if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% Debug configuration failed: %s: %s\n", errstr, debug); exit(1); } if (mode == 'P') { /* * Producer */ char buf[2048]; int sendcnt = 0; /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ /* If offset reporting (-o report) is enabled, use the * richer dr_msg_cb instead. */ if (report_offsets) { rd_kafka_topic_conf_set(topic_conf, "produce.offset.report", "true", errstr, sizeof(errstr)); rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered2); } else rd_kafka_conf_set_dr_cb(conf, msg_delivered); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!quiet) fprintf(stderr, "%% Type stuff and hit enter to send\n"); while (run && fgets(buf, sizeof(buf), stdin)) { size_t len = strlen(buf); if (buf[len-1] == '\n') buf[--len] = '\0'; /* Send/Produce message. */ if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ buf, len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ NULL) == -1) { fprintf(stderr, "%% Failed to produce to topic %s " "partition %i: %s\n", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); continue; } if (!quiet) fprintf(stderr, "%% Sent %zd bytes to topic " "%s partition %i\n", len, rd_kafka_topic_name(rkt), partition); sendcnt++; /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); } /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Wait for messages to be delivered */ while (run && rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); /* Destroy the handle */ rd_kafka_destroy(rk); } else if (mode == 'C') { /* * Consumer */ /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); /* Start consuming */ if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){ fprintf(stderr, "%% Failed to start consuming: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); exit(1); } while (run) { rd_kafka_message_t *rkmessage; /* Consume single message. * See rdkafka_performance.c for high speed * consuming of messages. */ rkmessage = rd_kafka_consume(rkt, partition, 1000); if (!rkmessage) /* timeout */ continue; msg_consume(rkmessage, NULL); /* Return message to rdkafka */ rd_kafka_message_destroy(rkmessage); } /* Stop consuming */ rd_kafka_consume_stop(rkt, partition); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); } else if (mode == 'L') { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ if (topic) rkt = rd_kafka_topic_new(rk, topic, topic_conf); else rkt = NULL; while (run) { const struct rd_kafka_metadata *metadata; /* Fetch metadata */ err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, &metadata, 5000); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { fprintf(stderr, "%% Failed to acquire metadata: %s\n", rd_kafka_err2str(err)); run = 0; break; } metadata_print(topic, metadata); rd_kafka_metadata_destroy(metadata); run = 0; } /* Destroy the handle */ rd_kafka_destroy(rk); /* Exit right away, dont wait for background cleanup, we haven't * done anything important anyway. */ exit(err ? 2 : 0); } /* Let background threads clean up and terminate cleanly. */ rd_kafka_wait_destroyed(2000); return 0; }
static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { fprintf(stderr, "%% Consumer reached end of %s [%"PRId32"] " "message queue at offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); if (exit_eof) run = 0; return; } fprintf(stderr, "%% Consume error for topic \"%s\" [%"PRId32"] " "offset %"PRId64": %s\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, rd_kafka_message_errstr(rkmessage)); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) run = 0; return; } if (!quiet) { rd_kafka_timestamp_type_t tstype; int64_t timestamp; fprintf(stdout, "%% Message (offset %"PRId64", %zd bytes):\n", rkmessage->offset, rkmessage->len); timestamp = rd_kafka_message_timestamp(rkmessage, &tstype); if (tstype != RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) { const char *tsname = "?"; if (tstype == RD_KAFKA_TIMESTAMP_CREATE_TIME) tsname = "create time"; else if (tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) tsname = "log append time"; fprintf(stdout, "%% Message timestamp: %s %"PRId64 " (%ds ago)\n", tsname, timestamp, !timestamp ? 0 : (int)time(NULL) - (int)(timestamp/1000)); } } if (rkmessage->key_len) { if (output == OUTPUT_HEXDUMP) hexdump(stdout, "Message Key", rkmessage->key, rkmessage->key_len); else printf("Key: %.*s\n", (int)rkmessage->key_len, (char *)rkmessage->key); } if (output == OUTPUT_HEXDUMP) hexdump(stdout, "Message Payload", rkmessage->payload, rkmessage->len); else printf("%.*s\n", (int)rkmessage->len, (char *)rkmessage->payload); }
/** * Returns offset of the last message consumed */ int64_t test_consume_msgs (const char *what, rd_kafka_topic_t *rkt, uint64_t testid, int32_t partition, int64_t offset, int exp_msg_base, int exp_cnt, int parse_fmt) { int cnt = 0; int msg_next = exp_msg_base; int fails = 0; int64_t offset_last = -1; test_timing_t t_first, t_all; TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: expect msg #%d..%d " "at offset %"PRId64"\n", what, rd_kafka_topic_name(rkt), partition, exp_msg_base, exp_cnt, offset); if (offset != TEST_NO_SEEK) { rd_kafka_resp_err_t err; test_timing_t t_seek; TIMING_START(&t_seek, "SEEK"); if ((err = rd_kafka_seek(rkt, partition, offset, 5000))) TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: " "seek to %"PRId64" failed: %s\n", what, rd_kafka_topic_name(rkt), partition, offset, rd_kafka_err2str(err)); TIMING_STOP(&t_seek); TEST_SAY("%s: seeked to offset %"PRId64"\n", what, offset); } TIMING_START(&t_first, "FIRST MSG"); TIMING_START(&t_all, "ALL MSGS"); while (cnt < exp_cnt) { rd_kafka_message_t *rkmessage; int msg_id; rkmessage = rd_kafka_consume(rkt, partition, 5000); if (!rkmessage) TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: " "expected msg #%d (%d/%d): timed out\n", what, rd_kafka_topic_name(rkt), partition, msg_next, cnt, exp_cnt); if (rkmessage->err) TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: " "expected msg #%d (%d/%d): got error: %s\n", what, rd_kafka_topic_name(rkt), partition, msg_next, cnt, exp_cnt, rd_kafka_err2str(rkmessage->err)); if (cnt == 0) TIMING_STOP(&t_first); if (parse_fmt) test_msg_parse(testid, rkmessage->key, rkmessage->key_len, partition, &msg_id); else msg_id = 0; if (test_level >= 3) TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: " "got msg #%d at offset %"PRId64 " (expect #%d at offset %"PRId64")\n", what, rd_kafka_topic_name(rkt), partition, msg_id, rkmessage->offset, msg_next, offset >= 0 ? offset + cnt : -1); if (parse_fmt && msg_id != msg_next) { TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: " "expected msg #%d (%d/%d): got msg #%d\n", what, rd_kafka_topic_name(rkt), partition, msg_next, cnt, exp_cnt, msg_id); fails++; } cnt++; msg_next++; offset_last = rkmessage->offset; rd_kafka_message_destroy(rkmessage); } TIMING_STOP(&t_all); if (fails) TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: %d failures\n", what, rd_kafka_topic_name(rkt), partition, fails); TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: " "%d/%d messages consumed succesfully\n", what, rd_kafka_topic_name(rkt), partition, cnt, exp_cnt); return offset_last; }
int kafka_produce_batch(rd_kafka_t *r, char *topic, char **msg, int *msg_len, int msg_cnt, int report, long timeout) { char errstr[512]; rd_kafka_topic_t *rkt; struct produce_cb_params pcb = {msg_cnt, 0, 0, 0, 0, NULL}; void *opaque; int partition = RD_KAFKA_PARTITION_UA; int i, err_cnt = 0; if (report) opaque = &pcb; else opaque = NULL; rd_kafka_topic_conf_t *topic_conf; if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to produce to topic: %s", topic); } return -2; } /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); char timeoutStr[64]; snprintf(timeoutStr, 64, "%lu", timeout); if (rd_kafka_topic_conf_set(topic_conf, "message.timeout.ms", timeoutStr, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_ERR, "Failed to configure topic param 'message.timeout.ms' to %lu before producing; config err was: %s", timeout, errstr ); } rd_kafka_topic_conf_destroy(topic_conf); return -3; } /* Create topic */ rkt = rd_kafka_topic_new(r, topic, topic_conf); //do we have VLA? rd_kafka_message_t *messages = calloc(sizeof *messages, msg_cnt); if (messages == NULL) {//fallback to individual produce calls for (i=0;i<msg_cnt;++i) { if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg[i], msg_len[i], NULL, 0, opaque) == -1) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Failed to produce to topic %s " "partition %i: %s", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); } } } } else { for (i=0;i<msg_cnt;++i) { messages[i].payload = msg[i]; messages[i].len = msg_len[i]; } i = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_COPY, messages, msg_cnt); if (i < msg_cnt) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_WARNING, "Failed to queue full message batch, %d of %d were put in queue", i, msg_cnt); } } err_cnt = msg_cnt - i; free(messages); messages = NULL; } /* Poll to handle delivery reports */ rd_kafka_poll(r, 0); /* Wait for messages to be delivered */ while (report && pcb.msg_count && rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 10); //set global to NULL again rd_kafka_topic_destroy(rkt); if (report) err_cnt = pcb.err_count; return err_cnt; }
int kafka_produce(rd_kafka_t *r, char* topic, char* msg, int msg_len, int report, long timeout) { char errstr[512]; rd_kafka_topic_t *rkt; struct produce_cb_params pcb = {1, 0, 0, 0, 0, NULL}; void *opaque; int partition = RD_KAFKA_PARTITION_UA; //decide whether to pass callback params or not... if (report) opaque = &pcb; else opaque = NULL; rd_kafka_topic_conf_t *topic_conf; if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to produce to topic: %s", topic); } return -2; } /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); char timeoutStr[64]; snprintf(timeoutStr, 64, "%lu", timeout); if (rd_kafka_topic_conf_set(topic_conf, "message.timeout.ms", timeoutStr, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_ERR, "Failed to configure topic param 'message.timeout.ms' to %lu before producing; config err was: %s", timeout, errstr ); } rd_kafka_topic_conf_destroy(topic_conf); return -3; } /* Create topic */ rkt = rd_kafka_topic_new(r, topic, topic_conf); if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ msg, msg_len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ opaque) == -1) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Failed to produce to topic %s " "partition %i: %s", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); } rd_kafka_topic_destroy(rkt); return -1; } /* Poll to handle delivery reports */ rd_kafka_poll(r, 0); /* Wait for messages to be delivered */ while (report && pcb.msg_count && rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 10); //set global to NULL again rd_kafka_topic_destroy(rkt); return 0; }
static void queue_consume(rd_kafka_message_t *message, void *opaque) { struct consume_cb_params *params = opaque; zval *return_value = params->return_value; //all partitions EOF if (params->eop < 1) return; //nothing more to read... if (params->read_count == 0) return; if (message->err) { params->error_count += 1; //if auto-commit is disabled: if (params->auto_commit == 0) //store offset rd_kafka_offset_store( message->rkt, message->partition, message->offset == 0 ? 0 : message->offset -1 ); if (message->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { if (params->partition_ends[message->partition] == 0) { params->eop -= 1; params->partition_ends[message->partition] = 1; } if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Consumer reached end of %s [%"PRId32"] " "message queue at offset %"PRId64"\n", rd_kafka_topic_name(message->rkt), message->partition, message->offset); } return; } //add_next_index_string(return_value, rd_kafka_message_errstr(message), 1); if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Consume error for topic \"%s\" [%"PRId32"] " "offset %"PRId64": %s\n", rd_kafka_topic_name(message->rkt), message->partition, message->offset, rd_kafka_message_errstr(message) ); } return; } //only count successful reads! //-1 means read all from offset until end if (params->read_count != -1) params->read_count -= 1; //add message to return value (perhaps add as array -> offset + msg? if (message->len > 0) { //ensure there is a payload char payload[(int) message->len]; sprintf(payload, "%.*s", (int) message->len, (char *) message->payload); //add_index_string(return_value, (int) message->offset, payload, 1); add_next_index_string(return_value, payload, 1); } else { add_next_index_string(return_value, "", 1); } //store offset if autocommit is disabled if (params->auto_commit == 0) rd_kafka_offset_store( message->rkt, message->partition, message->offset ); }
static void consume_messages_with_queues (uint64_t testid, const char *topic, int partition_cnt, int msgcnt) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; rd_kafka_queue_t *rkqu; int i; int32_t partition; int batch_cnt = msgcnt / partition_cnt; test_conf_init(&conf, &topic_conf, 20); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_CONSUMER, conf); /* Create queue */ rkqu = rd_kafka_queue_new(rk); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); TEST_SAY("Consuming %i messages from one queue serving %i partitions\n", msgcnt, partition_cnt); /* Start consuming each partition */ for (partition = 0 ; partition < partition_cnt ; partition++) { /* Consume messages */ TEST_SAY("Start consuming partition %i at offset -%i\n", partition, batch_cnt); if (rd_kafka_consume_start_queue(rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt), rkqu) == -1) TEST_FAIL("consume_start_queue(%i) failed: %s", (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); } /* Consume messages from queue */ for (i = 0 ; i < msgcnt ; ) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000)); if (!rkmessage) TEST_FAIL("Failed to consume message %i/%i from " "queue: %s", i, msgcnt, rd_kafka_err2str(rd_kafka_errno2err(errno))); if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF){ TEST_SAY("Topic %s [%"PRId32"] reached " "EOF at offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); rd_kafka_message_destroy(rkmessage); continue; } TEST_FAIL("Consume message %i/%i from queue " "has error (offset %"PRId64 ", partition %"PRId32"): %s", i, msgcnt, rkmessage->offset, rkmessage->partition, rd_kafka_err2str(rkmessage->err)); } verify_consumed_msg(testid, -1, -1, rkmessage); rd_kafka_message_destroy(rkmessage); i++; } /* Stop consuming each partition */ for (partition = 0 ; partition < partition_cnt ; partition++) rd_kafka_consume_stop(rkt, partition); /* Destroy queue */ rd_kafka_queue_destroy(rkqu); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); }