void kfc_rdkafka_init(rd_kafka_type_t type) { char errstr[512]; if (type == RD_KAFKA_PRODUCER) { char tmp[16]; snprintf(tmp, sizeof(tmp), "%i", SIGIO); rd_kafka_conf_set(conf.rk_conf, "internal.termination.signal", tmp, NULL, 0); } /* Create handle */ if (!(conf.rk = rd_kafka_new(type, conf.rk_conf, errstr, sizeof(errstr)))) FATAL("Failed to create rd_kafka struct: %s", errstr); rd_kafka_set_logger(conf.rk, rd_kafka_log_print); if (conf.debug) rd_kafka_set_log_level(conf.rk, LOG_DEBUG); else if (conf.verbosity == 0) rd_kafka_set_log_level(conf.rk, 0); /* Create topic, if specified */ if (conf.topic && !(conf.rkt = rd_kafka_topic_new(conf.rk, conf.topic, conf.rkt_conf))) FATAL("Failed to create rk_kafka_topic %s: %s", conf.topic, rd_kafka_err2str(rd_kafka_errno2err(errno))); conf.rk_conf = NULL; conf.rkt_conf = NULL; }
int main (int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_errno2err(EINVAL); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_conf_set_dr_cb(NULL, NULL); rd_kafka_conf_set_error_cb(NULL, NULL); rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); rd_kafka_conf_dump_free(NULL, 0); rd_kafka_conf_properties_show(NULL); rd_kafka_topic_conf_new(); rd_kafka_topic_conf_dup(NULL); rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); rd_kafka_name(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); rd_kafka_consume_batch(NULL, 0, 0, NULL, 0); rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL); rd_kafka_offset_store(NULL, 0, 0); rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL); rd_kafka_poll(NULL, 0); rd_kafka_brokers_add(NULL, NULL); rd_kafka_set_logger(NULL, NULL); rd_kafka_set_log_level(NULL, 0); rd_kafka_log_print(NULL, 0, NULL, NULL); rd_kafka_log_syslog(NULL, 0, NULL, NULL); rd_kafka_outq_len(NULL); rd_kafka_dump(NULL, NULL); rd_kafka_thread_cnt(); rd_kafka_wait_destroyed(0); } return 0; }
/** * producer_init_kafka * * Initialize the producer instance, setting up the Kafka topic and context. * * @param self VALUE Instance of the Producer Ruby object * @param config HermannInstanceConfig* the instance configuration associated with this producer. */ void producer_init_kafka(VALUE self, HermannInstanceConfig* config) { TRACER("initing (%p)\n", config); config->quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ config->conf = rd_kafka_conf_new(); /* Add our `self` to the opaque pointer for error and logging callbacks */ rd_kafka_conf_set_opaque(config->conf, (void*)config); rd_kafka_conf_set_error_cb(config->conf, producer_error_callback); /* Topic configuration */ config->topic_conf = rd_kafka_topic_conf_new(); /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ rd_kafka_conf_set_dr_msg_cb(config->conf, msg_delivered); /* Create Kafka handle */ if (!(config->rk = rd_kafka_new(RD_KAFKA_PRODUCER, config->conf, config->errstr, sizeof(config->errstr)))) { /* TODO: Use proper logger */ fprintf(stderr, "%% Failed to create new producer: %s\n", config->errstr); rb_raise(rb_eRuntimeError, "%% Failed to create new producer: %s\n", config->errstr); } /* Set logger */ rd_kafka_set_logger(config->rk, logger); rd_kafka_set_log_level(config->rk, LOG_DEBUG); if (rd_kafka_brokers_add(config->rk, config->brokers) == 0) { /* TODO: Use proper logger */ fprintf(stderr, "%% No valid brokers specified\n"); rb_raise(rb_eRuntimeError, "No valid brokers specified"); return; } /* Create topic */ config->rkt = rd_kafka_topic_new(config->rk, config->topic, config->topic_conf); /* Set the partitioner callback */ rd_kafka_topic_conf_set_partitioner_cb( config->topic_conf, producer_partitioner_callback); /* We're now initialized */ config->isInitialized = 1; TRACER("completed kafka init\n"); }
static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */ { char errbuf[1024]; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; if (ctx->kafka != NULL && ctx->topic != NULL) return(0); if (ctx->kafka == NULL) { if ((conf = rd_kafka_conf_dup(ctx->kafka_conf)) == NULL) { ERROR("write_kafka plugin: cannot duplicate kafka config"); return(1); } if ((ctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errbuf, sizeof(errbuf))) == NULL) { ERROR("write_kafka plugin: cannot create kafka handle."); return 1; } rd_kafka_conf_destroy(ctx->kafka_conf); ctx->kafka_conf = NULL; INFO ("write_kafka plugin: created KAFKA handle : %s", rd_kafka_name(ctx->kafka)); #if defined(HAVE_LIBRDKAFKA_LOGGER) && !defined(HAVE_LIBRDKAFKA_LOG_CB) rd_kafka_set_logger(ctx->kafka, kafka_log); #endif } if (ctx->topic == NULL ) { if ((topic_conf = rd_kafka_topic_conf_dup(ctx->conf)) == NULL) { ERROR("write_kafka plugin: cannot duplicate kafka topic config"); return 1; } if ((ctx->topic = rd_kafka_topic_new(ctx->kafka, ctx->topic_name, topic_conf)) == NULL) { ERROR("write_kafka plugin: cannot create topic : %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); return errno; } rd_kafka_topic_conf_destroy(ctx->conf); ctx->conf = NULL; INFO ("write_kafka plugin: handle created for topic : %s", rd_kafka_topic_name(ctx->topic)); } return(0); } /* }}} int kafka_handle */
/** * @brief setup_kafka initialises librdkafka based on the config * wrapped in kafka_t * @param k kafka configuration **/ int setup_kafka(kafka_t* k) { char* brokers = "localhost:9092"; char* zookeepers = NULL; char* topic = "bloh"; config* fk_conf = (config*) fuse_get_context()->private_data; if(fk_conf->zookeepers_n > 0) zookeepers = fk_conf->zookeepers[0]; if(fk_conf->brokers_n > 0) brokers = fk_conf->brokers[0]; topic = fk_conf->topic[0]; rd_kafka_topic_conf_t *topic_conf; rd_kafka_conf_t *conf; conf = rd_kafka_conf_new(); rd_kafka_conf_set_dr_cb(conf, msg_delivered); if(rd_kafka_conf_set(conf, "debug", "all", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || rd_kafka_conf_set(conf, "batch.num.messages", "1", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { printf("%% Debug configuration failed: %s: %s\n", errstr, "all"); return(1); } if (!(k->rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); return(1); } rd_kafka_set_logger(k->rk, logger); rd_kafka_set_log_level(k->rk, 7); if (zookeepers != NULL) { initialize_zookeeper(zookeepers, k); return 0; } else { if (rd_kafka_brokers_add(k->rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); return(1); } topic_conf = rd_kafka_topic_conf_new(); k->rkt = rd_kafka_topic_new(k->rk, topic, topic_conf); if(k->rkt == NULL) printf("topic %s creation failed\n", topic); return k->rkt == NULL; } }
/** * consumer_init_kafka * * Initialize the Kafka context and instantiate a consumer. * * @param config HermannInstanceConfig* pointer to the instance configuration for this producer or consumer */ void consumer_init_kafka(HermannInstanceConfig* config) { TRACER("configuring rd_kafka\n"); config->quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ config->conf = rd_kafka_conf_new(); /* Topic configuration */ config->topic_conf = rd_kafka_topic_conf_new(); /* Create Kafka handle */ if (!(config->rk = rd_kafka_new(RD_KAFKA_CONSUMER, config->conf, config->errstr, sizeof(config->errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", config->errstr); rb_raise(rb_eRuntimeError, "%% Failed to create new consumer: %s\n", config->errstr); } /* Set logger */ rd_kafka_set_logger(config->rk, logger); rd_kafka_set_log_level(config->rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(config->rk, config->brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); rb_raise(rb_eRuntimeError, "No valid brokers specified"); return; } /* Create topic */ config->rkt = rd_kafka_topic_new(config->rk, config->topic, config->topic_conf); /* We're now initialized */ config->isInitialized = 1; }
/* * kafka_consume_main * * Main function for Kafka consumers running as background workers */ void kafka_consume_main(Datum arg) { char err_msg[512]; rd_kafka_topic_conf_t *topic_conf; rd_kafka_t *kafka; rd_kafka_topic_t *topic; rd_kafka_message_t **messages; const struct rd_kafka_metadata *meta; struct rd_kafka_metadata_topic topic_meta; rd_kafka_resp_err_t err; bool found; Oid id = (Oid) arg; ListCell *lc; KafkaConsumerProc *proc = hash_search(consumer_procs, &id, HASH_FIND, &found); KafkaConsumer consumer; CopyStmt *copy; int valid_brokers = 0; int i; int my_partitions = 0; if (!found) elog(ERROR, "kafka consumer %d not found", id); pqsignal(SIGTERM, kafka_consume_main_sigterm); #define BACKTRACE_SEGFAULTS #ifdef BACKTRACE_SEGFAULTS pqsignal(SIGSEGV, debug_segfault); #endif /* we're now ready to receive signals */ BackgroundWorkerUnblockSignals(); /* give this proc access to the database */ BackgroundWorkerInitializeConnection(NameStr(proc->dbname), NULL); /* load saved consumer state */ StartTransactionCommand(); load_consumer_state(proc->consumer_id, &consumer); copy = get_copy_statement(&consumer); topic_conf = rd_kafka_topic_conf_new(); kafka = rd_kafka_new(RD_KAFKA_CONSUMER, NULL, err_msg, sizeof(err_msg)); rd_kafka_set_logger(kafka, logger); /* * Add all brokers currently in pipeline_kafka_brokers */ if (consumer.brokers == NIL) elog(ERROR, "no valid brokers were found"); foreach(lc, consumer.brokers) valid_brokers += rd_kafka_brokers_add(kafka, lfirst(lc)); if (!valid_brokers) elog(ERROR, "no valid brokers were found"); /* * Set up our topic to read from */ topic = rd_kafka_topic_new(kafka, consumer.topic, topic_conf); err = rd_kafka_metadata(kafka, false, topic, &meta, CONSUMER_TIMEOUT); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) elog(ERROR, "failed to acquire metadata: %s", rd_kafka_err2str(err)); Assert(meta->topic_cnt == 1); topic_meta = meta->topics[0]; load_consumer_offsets(&consumer, &topic_meta, proc->offset); CommitTransactionCommand(); /* * Begin consuming all partitions that this process is responsible for */ for (i = 0; i < topic_meta.partition_cnt; i++) { int partition = topic_meta.partitions[i].id; Assert(partition <= consumer.num_partitions); if (partition % consumer.parallelism != proc->partition_group) continue; elog(LOG, "[kafka consumer] %s <- %s consuming partition %d from offset %ld", consumer.rel->relname, consumer.topic, partition, consumer.offsets[partition]); if (rd_kafka_consume_start(topic, partition, consumer.offsets[partition]) == -1) elog(ERROR, "failed to start consuming: %s", rd_kafka_err2str(rd_kafka_errno2err(errno))); my_partitions++; } /* * No point doing anything if we don't have any partitions assigned to us */ if (my_partitions == 0) { elog(LOG, "[kafka consumer] %s <- %s consumer %d doesn't have any partitions to read from", consumer.rel->relname, consumer.topic, MyProcPid); goto done; } messages = palloc0(sizeof(rd_kafka_message_t) * consumer.batch_size); /* * Consume messages until we are terminated */ while (!got_sigterm) { ssize_t num_consumed; int i; int messages_buffered = 0; int partition; StringInfoData buf; bool xact = false; for (partition = 0; partition < consumer.num_partitions; partition++) { if (partition % consumer.parallelism != proc->partition_group) continue; num_consumed = rd_kafka_consume_batch(topic, partition, CONSUMER_TIMEOUT, messages, consumer.batch_size); if (num_consumed <= 0) continue; if (!xact) { StartTransactionCommand(); xact = true; } initStringInfo(&buf); for (i = 0; i < num_consumed; i++) { if (messages[i]->payload != NULL) { appendBinaryStringInfo(&buf, messages[i]->payload, messages[i]->len); if (buf.len > 0 && buf.data[buf.len - 1] != '\n') appendStringInfoChar(&buf, '\n'); messages_buffered++; } consumer.offsets[partition] = messages[i]->offset; rd_kafka_message_destroy(messages[i]); } } if (!xact) { pg_usleep(1 * 1000); continue; } /* we don't want to die in the event of any errors */ PG_TRY(); { if (messages_buffered) execute_copy(copy, &buf); } PG_CATCH(); { elog(LOG, "[kafka consumer] %s <- %s failed to process batch, dropped %d message%s:", consumer.rel->relname, consumer.topic, (int) num_consumed, (num_consumed == 1 ? "" : "s")); EmitErrorReport(); FlushErrorState(); AbortCurrentTransaction(); xact = false; } PG_END_TRY(); if (!xact) StartTransactionCommand(); if (messages_buffered) save_consumer_state(&consumer, proc->partition_group); CommitTransactionCommand(); } done: hash_search(consumer_procs, &id, HASH_REMOVE, NULL); rd_kafka_topic_destroy(topic); rd_kafka_destroy(kafka); rd_kafka_wait_destroyed(CONSUMER_TIMEOUT); }
int main (int argc, char **argv) { rd_kafka_topic_t *rkt; char *brokers = "localhost:9092"; char mode = 'C'; char *topic = NULL; int partition = RD_KAFKA_PARTITION_UA; int opt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; const char *debug = NULL; int64_t start_offset = 0; int report_offsets = 0; int do_conf_dump = 0; quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ conf = rd_kafka_conf_new(); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:A")) != -1) { switch (opt) { case 'P': case 'C': case 'L': mode = opt; break; case 't': topic = optarg; break; case 'p': partition = atoi(optarg); break; case 'b': brokers = optarg; break; case 'z': if (rd_kafka_conf_set(conf, "compression.codec", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } break; case 'o': if (!strcmp(optarg, "end")) start_offset = RD_KAFKA_OFFSET_END; else if (!strcmp(optarg, "beginning")) start_offset = RD_KAFKA_OFFSET_BEGINNING; else if (!strcmp(optarg, "stored")) start_offset = RD_KAFKA_OFFSET_STORED; else if (!strcmp(optarg, "report")) report_offsets = 1; else start_offset = strtoll(optarg, NULL, 10); break; case 'e': exit_eof = 1; break; case 'd': debug = optarg; break; case 'q': quiet = 1; break; case 'A': output = OUTPUT_RAW; break; case 'X': { char *name, *val; rd_kafka_conf_res_t res; if (!strcmp(optarg, "list") || !strcmp(optarg, "help")) { rd_kafka_conf_properties_show(stdout); exit(0); } if (!strcmp(optarg, "dump")) { do_conf_dump = 1; continue; } name = optarg; if (!(val = strchr(name, '='))) { fprintf(stderr, "%% Expected " "-X property=value, not %s\n", name); exit(1); } *val = '\0'; val++; res = RD_KAFKA_CONF_UNKNOWN; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ if (!strncmp(name, "topic.", strlen("topic."))) res = rd_kafka_topic_conf_set(topic_conf, name+ strlen("topic."), val, errstr, sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN) res = rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } } break; default: goto usage; } } if (do_conf_dump) { const char **arr; size_t cnt; int pass; for (pass = 0 ; pass < 2 ; pass++) { int i; if (pass == 0) { arr = rd_kafka_conf_dump(conf, &cnt); printf("# Global config\n"); } else { printf("# Topic config\n"); arr = rd_kafka_topic_conf_dump(topic_conf, &cnt); } for (i = 0 ; i < cnt ; i += 2) printf("%s = %s\n", arr[i], arr[i+1]); printf("\n"); rd_kafka_conf_dump_free(arr, cnt); } exit(0); } if (optind != argc || (mode != 'L' && !topic)) { usage: fprintf(stderr, "Usage: %s -C|-P|-L -t <topic> " "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n" "\n" "librdkafka version %s (0x%08x)\n" "\n" " Options:\n" " -C | -P Consumer or Producer mode\n" " -L Metadata list mode\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (random partitioner)\n" " -b <brokers> Broker address (localhost:9092)\n" " -z <codec> Enable compression:\n" " none|gzip|snappy\n" " -o <offset> Start offset (consumer)\n" " -o report Report message offsets (producer)\n" " -e Exit consumer when last message\n" " in partition has been received.\n" " -d [facs..] Enable debugging contexts:\n" " -q Be quiet\n" " -A Raw payload output (consumer)\n" " %s\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" "\n" " In Consumer mode:\n" " writes fetched messages to stdout\n" " In Producer mode:\n" " reads messages from stdin and sends to broker\n" " In List mode:\n" " queries broker for metadata information, " "topic is optional.\n" "\n" "\n" "\n", argv[0], rd_kafka_version_str(), rd_kafka_version(), RD_KAFKA_DEBUG_CONTEXTS); exit(1); } signal(SIGINT, stop); signal(SIGUSR1, sig_usr1); if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% Debug configuration failed: %s: %s\n", errstr, debug); exit(1); } if (mode == 'P') { /* * Producer */ char buf[2048]; int sendcnt = 0; /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ /* If offset reporting (-o report) is enabled, use the * richer dr_msg_cb instead. */ if (report_offsets) { rd_kafka_topic_conf_set(topic_conf, "produce.offset.report", "true", errstr, sizeof(errstr)); rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered2); } else rd_kafka_conf_set_dr_cb(conf, msg_delivered); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!quiet) fprintf(stderr, "%% Type stuff and hit enter to send\n"); while (run && fgets(buf, sizeof(buf), stdin)) { size_t len = strlen(buf); if (buf[len-1] == '\n') buf[--len] = '\0'; /* Send/Produce message. */ if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ buf, len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ NULL) == -1) { fprintf(stderr, "%% Failed to produce to topic %s " "partition %i: %s\n", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); continue; } if (!quiet) fprintf(stderr, "%% Sent %zd bytes to topic " "%s partition %i\n", len, rd_kafka_topic_name(rkt), partition); sendcnt++; /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); } /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Wait for messages to be delivered */ while (run && rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); /* Destroy the handle */ rd_kafka_destroy(rk); } else if (mode == 'C') { /* * Consumer */ /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); /* Start consuming */ if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){ fprintf(stderr, "%% Failed to start consuming: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); exit(1); } while (run) { rd_kafka_message_t *rkmessage; /* Consume single message. * See rdkafka_performance.c for high speed * consuming of messages. */ rkmessage = rd_kafka_consume(rkt, partition, 1000); if (!rkmessage) /* timeout */ continue; msg_consume(rkmessage, NULL); /* Return message to rdkafka */ rd_kafka_message_destroy(rkmessage); } /* Stop consuming */ rd_kafka_consume_stop(rkt, partition); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); } else if (mode == 'L') { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ if (topic) rkt = rd_kafka_topic_new(rk, topic, topic_conf); else rkt = NULL; while (run) { const struct rd_kafka_metadata *metadata; /* Fetch metadata */ err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, &metadata, 5000); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { fprintf(stderr, "%% Failed to acquire metadata: %s\n", rd_kafka_err2str(err)); run = 0; break; } metadata_print(topic, metadata); rd_kafka_metadata_destroy(metadata); run = 0; } /* Destroy the handle */ rd_kafka_destroy(rk); /* Exit right away, dont wait for background cleanup, we haven't * done anything important anyway. */ exit(err ? 2 : 0); } /* Let background threads clean up and terminate cleanly. */ rd_kafka_wait_destroyed(2000); return 0; }
int main(int argc, char *argv[],char *envp[]) { rd_kafka_t *rks[1024] = { 0 }; int rkcount = 0; char value[1024] = { 0 }; char brokers[1024] = "localhost:9092"; char *broker = NULL; char topic[1024] = "topic"; int sendcnt = 0; int partitions = 4; int opt; int len = 0; char *opbuf = NULL; char config_file[1024] = ""; char path[PATH_MAX] = {0}; char processname[1024] = {0}; get_executable_path(path,processname,sizeof(processname)); snprintf(config_file, sizeof(config_file), "/etc/sendkafka/%s.conf", processname); if (read_config("brokers", value, sizeof(value), config_file) > 0) { strcpy(brokers, value); } if (read_config("topic", value, sizeof(value),config_file) > 0) { strcpy(topic, value); } if (read_config ("partitions", value, sizeof(value), config_file) > 0) { partitions = atoi(value); if (partitions <= 0 || partitions > 256) { partitions = 4; } } if (read_config ("data_path", value, sizeof(value), config_file) > 0) { strcpy(g_queue_data_filepath, value); } if (read_config ("error_path", value, sizeof(value), config_file) > 0) { strcpy(g_error_logpath, value); } if (read_config ("logsavelocal_tag", value, sizeof(value), config_file) > 0) { g_logsavelocal_tag = atoi(value); } if (read_config ("lognum_max", value, sizeof(value), config_file) > 0) { g_logfilenum_max = atoi(value); } if (read_config ("monitor_period", value, sizeof(value), config_file) > 0) { g_monitor_period = atoi(value); } if (read_config ("logsize_max", value, sizeof(value), config_file) > 0) { g_logfilesize_max = atoi(value); } if (read_config ("queue_sizepath", value, sizeof(value), config_file) > 0) { strcpy(g_monitor_qusizelogpath, value); } while ((opt = getopt(argc, argv, "hb:c:d:p:t:o:m:n:l:x:")) != -1) { switch (opt) { case 'b': strncpy(brokers, optarg, sizeof(brokers)); brokers[sizeof(brokers) - 1] = '\0'; break; case 'c': if (read_config("brokers", value, sizeof(value), optarg) > 0) { strcpy(brokers, value); } if (read_config("topic", value, sizeof(value), optarg) > 0) { strcpy(topic, value); } if (read_config ("partitions", value, sizeof(value), optarg) > 0) { partitions = atoi(value); if (partitions <= 0 || partitions > 256) { partitions = 4; } } if (read_config ("data_path", value, sizeof(value), optarg) > 0) { strcpy(g_queue_data_filepath, value); } if (read_config ("queue_sizepath", value, sizeof(value), optarg) > 0) { strcpy(g_monitor_qusizelogpath, value); } if (read_config ("error_path", value, sizeof(value), optarg) > 0) { strcpy(g_error_logpath, value); } if (read_config ("savelocal_tag", value, sizeof(value), optarg) > 0) { g_logsavelocal_tag = atoi(value); } if (read_config ("monitor_period", value, sizeof(value), optarg) > 0) { g_monitor_period = atoi(value); } if (read_config ("lognum_max", value, sizeof(value), optarg) > 0) { g_logfilenum_max = atoi(value); } if (read_config ("logsize_max", value, sizeof(value), optarg) > 0) { g_logfilesize_max = atoi(value); } break; case 'o': if (NULL != optarg) { g_logsavelocal_tag = atoi(optarg); } break; case 't': if (NULL != optarg) { strncpy(topic, optarg, sizeof(topic)); topic[sizeof(topic) - 1] = '\0'; } break; case 'p': if (NULL != optarg) { partitions = atoi(optarg); if (partitions <= 0 || partitions > 256) { partitions = 4; } } break; case 'm': if (NULL != optarg) { g_logfilesize_max = atoi(optarg); } break; case 'l': if (NULL != optarg) { strcpy(g_error_logpath, optarg); } break; case 'd': if (NULL != optarg) { strcpy(g_queue_data_filepath, optarg); } break; case 'x': if (NULL != optarg) { strcpy(g_monitor_qusizelogpath, optarg); } break; case 'n': if (NULL != optarg) { g_logfilenum_max = atoi(optarg); } break; case 'r': if (NULL != optarg) { g_monitor_period = atoi(optarg); } break; case 'h': default: usage(argv[0]); break; } } if(g_logsavelocal_tag == 0){ rd_kafka_set_logger(save_liberr_tolocal); } else{ rd_kafka_set_logger(rd_kafka_log_syslog); } signal(SIGINT, stop); signal(SIGTERM, stop); // see: https://github.com/edenhill/librdkafka/issues/2 signal(SIGPIPE, SIG_IGN); signal(SIGHUP, stop); /* Producer */ char buf[4096]; //int sendcnt = 0; int i = 0; /* Create Kafka handle */ for (broker = strtok(brokers, ","), rkcount = 0; broker && rkcount < sizeof(rks); broker = strtok(NULL, ","), ++rkcount) { rks[rkcount] = rd_kafka_new(RD_KAFKA_PRODUCER, broker, NULL); if (!rks[rkcount]) { for (i = 0; i < rkcount; i++) { rd_kafka_destroy(rks[i]); rks[i] = NULL; } strcpy(buf, getcurrenttime()); buf[strlen(buf) - 1] = '\0'; strcpy(buf, "kafka_new producer is fail..."); perror(buf); strcpy(buf, "kafka_new producer is fail..."); save_error(g_logsavelocal_tag, LOG_CRIT, buf); exit(9); } } FILE *fp = NULL; opbuf = NULL; if (access(g_queue_data_filepath, F_OK) == 0) { fp = fopen(g_queue_data_filepath, "r"); if (fp == NULL) { char buf[100] = { 0 }; sprintf(buf, "%d line open %s file fail...", __LINE__ - 4,g_queue_data_filepath); perror(buf); save_error(g_logsavelocal_tag, LOG_CRIT, buf); exit(8); } while (fgets(buf, sizeof(buf), fp)) { ++sendcnt; opbuf = strdup(buf); len = strlen(opbuf); producer(rks, topic, partitions, RD_KAFKA_OP_F_FREE, opbuf, len, rkcount); } if (get_file_size(g_queue_data_filepath) > 0) { unlink(g_queue_data_filepath); } } if(NULL!=fp) { fclose(fp); } char *eptr = NULL; while (g_run_tag) { eptr = fgets(buf, sizeof(buf), stdin); if (EINTR == errno || NULL == eptr) { g_run_tag = 0; break; } ++sendcnt; opbuf = strdup(buf); len = strlen(opbuf); producer(rks, topic, partitions, RD_KAFKA_OP_F_FREE, opbuf, len, rkcount); if ((sendcnt % 100000) == 0) { char timebuf[50] = { 0 }; strcpy(timebuf, getcurrenttime()); timebuf[strlen(timebuf) - 1] = '\0'; fprintf(stderr, "%s sendkafka[%d]: Sent %i messages to topic %s\n", timebuf, getpid(), sendcnt, topic); char *buf = calloc(1, strlen(topic) + 128); sprintf(buf, "sendkafka[%d]: Sent %i messages to topic %s\n", getpid(), sendcnt, topic); save_error(g_logsavelocal_tag, LOG_INFO, buf); free(buf); buf = NULL; } } printf("sendcnt num %d\n", sendcnt); save_queuedata_tofile(rks, rkcount); /* Destroy the handle */ for (i = 0; i < rkcount; i++) { rd_kafka_destroy(rks[i]); } return 0; }
int main (int argc, char **argv) { rd_kafka_topic_t *rkt; char *brokers = 0; char *topic = NULL; int partition = RD_KAFKA_PARTITION_UA; int opt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; char tmp[16]; char* msg = 0; char* key = 0; /* Kafka configuration */ conf = rd_kafka_conf_new(); /* Quick termination */ snprintf(tmp, sizeof(tmp), "%i", SIGIO); rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); while ((opt = getopt(argc, argv, "t:p:b:z:m:k:")) != -1) { switch (opt) { case 't': topic = optarg; break; case 'p': partition = atoi(optarg); break; case 'b': brokers = optarg; break; case 'z': if (rd_kafka_conf_set(conf, "compression.codec", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } break; case 'm': msg = optarg; break; case 'k': key = optarg; break; default: goto usage; } } if (optind != argc || msg == 0 || brokers == 0) { usage: fprintf(stderr, "Usage: %s -t <topic> -m <message>" "[-p <partition>] [-b <host1:port1,host2:port2,..>] [-k <key>]\n" "\n" "librdkafka version %s (0x%08x)\n" "\n" " Options:\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (random partitioner)\n" " -b <brokers> Broker address (localhost:9092)\n" " -z <codec> Enable compression:\n" " none|gzip|snappy\n" " -m <msg> Message to send\n" " -k <key> Key of message\n" "\n" "\n", argv[0], rd_kafka_version_str(), rd_kafka_version()); exit(1); } { /* * Producer */ /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ /* If offset reporting (-o report) is enabled, use the * richer dr_msg_cb instead. */ rd_kafka_conf_set_dr_cb(conf, msg_delivered); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); /* Send/Produce message. */ if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ msg, strlen(msg), /* Optional key and its length */ key, key?strlen(key): 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ NULL) == -1) { fprintf(stderr, "%% Failed to produce to topic %s " "partition %i: %s\n", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); exit(1); } /* Poll to handle delivery reports */ // rd_kafka_poll(rk, 0); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Wait for messages to be delivered */ while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy the handle */ rd_kafka_destroy(rk); } /* Let background threads clean up and terminate cleanly. */ rd_kafka_wait_destroyed(2000); return 0; }