static PyObject *Producer_flush (Handle *self, PyObject *args, PyObject *kwargs) { double tmout = -1; int qlen; static char *kws[] = { "timeout", NULL }; #if RD_KAFKA_VERSION >= 0x00090300 CallState cs; #endif if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|d", kws, &tmout)) return NULL; #if RD_KAFKA_VERSION >= 0x00090300 CallState_begin(self, &cs); rd_kafka_flush(self->rk, tmout < 0 ? -1 : (int)(tmout * 1000)); if (!CallState_end(self, &cs)) return NULL; qlen = rd_kafka_outq_len(self->rk); #else while ((qlen = rd_kafka_outq_len(self->rk)) > 0) { if (Producer_poll0(self, 500) == -1) return NULL; } #endif return cfl_PyInt_FromInt(qlen); }
static void test_producer_no_connection (void) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_t *rkt; int i; const int partition_cnt = 2; int msgcnt = 0; test_timing_t t_destroy; test_conf_init(&conf, NULL, 20); test_conf_set(conf, "bootstrap.servers", NULL); rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = test_create_topic_object(rk, __FUNCTION__, "message.timeout.ms", "5000", NULL); test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 100, NULL, 100, 0, &msgcnt); for (i = 0 ; i < partition_cnt ; i++) test_produce_msgs_nowait(rk, rkt, 0, i, 0, 100, NULL, 100, 0, &msgcnt); rd_kafka_poll(rk, 1000); TEST_SAY("%d messages in queue\n", rd_kafka_outq_len(rk)); rd_kafka_topic_destroy(rkt); TIMING_START(&t_destroy, "rd_kafka_destroy()"); rd_kafka_destroy(rk); TIMING_STOP(&t_destroy); }
int p_kafka_check_outq_len(struct p_kafka_host *kafka_host) { int outq_len = 0, old_outq_len = 0; if (kafka_host->rk) { while ((outq_len = rd_kafka_outq_len(kafka_host->rk)) > 0) { if (!old_outq_len) { old_outq_len = outq_len; } else { if (outq_len == old_outq_len) { Log(LOG_ERR, "ERROR ( %s/%s ): Connection failed to Kafka: p_kafka_check_outq_len()\n", config.name, config.type); p_kafka_close(kafka_host, TRUE); return outq_len; } } rd_kafka_poll(kafka_host->rk, 100); sleep(1); } } else return ERR; return SUCCESS; }
/* * function: check librdkafka queue and write it to * local file if the queue not empty,the path will * depend on usr configure, default /var/log/sendkafka */ void save_queuedata_tofile(rd_kafka_t ** rks, int rkcount) { int fd = open(g_queue_data_filepath, O_WRONLY | O_APPEND | O_CREAT, 0666); if (fd == -1) { char buf[100] = { 0 }; sprintf(buf, "%d line open %s file fail...", __LINE__ - 4,g_queue_data_filepath); perror(buf); save_error(g_logsavelocal_tag, LOG_CRIT, buf); exit(5); } rd_kafka_op_t *rko = NULL; int i = 0; for (i = 0; i < rkcount; i++) { while (rd_kafka_outq_len(rks[i]) > 0) { rko = rd_kafka_q_read(&(rks[i]->rk_op), RD_POLL_INFINITE); write(fd, rko->rko_payload, rko->rko_len); } } close(fd); }
int main (int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_errno2err(EINVAL); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_conf_set_dr_cb(NULL, NULL); rd_kafka_conf_set_error_cb(NULL, NULL); rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); rd_kafka_conf_dump_free(NULL, 0); rd_kafka_conf_properties_show(NULL); rd_kafka_topic_conf_new(); rd_kafka_topic_conf_dup(NULL); rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); rd_kafka_name(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); rd_kafka_consume_batch(NULL, 0, 0, NULL, 0); rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL); rd_kafka_offset_store(NULL, 0, 0); rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL); rd_kafka_poll(NULL, 0); rd_kafka_brokers_add(NULL, NULL); rd_kafka_set_logger(NULL, NULL); rd_kafka_set_log_level(NULL, 0); rd_kafka_log_print(NULL, 0, NULL, NULL); rd_kafka_log_syslog(NULL, 0, NULL, NULL); rd_kafka_outq_len(NULL); rd_kafka_dump(NULL, NULL); rd_kafka_thread_cnt(); rd_kafka_wait_destroyed(0); } return 0; }
void kafka_destroy(rd_kafka_t *r, int timeout) { if(r != NULL) { //poll handle status rd_kafka_poll(r, 0); if (rd_kafka_outq_len(r) > 0) {//wait for out-queue to clear while(rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, timeout); timeout = 1; } rd_kafka_destroy(r); //this wait is blocking PHP //not calling it will yield segfault, though rd_kafka_wait_destroyed(timeout); r = NULL; } }
/** * Waits for the messages tracked by counter \p msgcounterp to be delivered. */ void test_wait_delivery (rd_kafka_t *rk, int *msgcounterp) { test_timing_t t_all; TIMING_START(&t_all, "PRODUCE.DELIVERY.WAIT"); /* Wait for messages to be delivered */ while (*msgcounterp > 0 && rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 10); TIMING_STOP(&t_all); }
RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::close () { rd_kafka_resp_err_t err; err = rd_kafka_consumer_close(rk_); if (err) return static_cast<RdKafka::ErrorCode>(err); while (rd_kafka_outq_len(rk_) > 0) rd_kafka_poll(rk_, 10); rd_kafka_destroy(rk_); return static_cast<RdKafka::ErrorCode>(err); }
static PyObject * RdkHandle_outq_len(RdkHandle *self) { if (RdkHandle_safe_lock(self, /* check_running= */ 1)) return NULL; int outq_len = -1; Py_BEGIN_ALLOW_THREADS /* avoid callbacks deadlocking */ outq_len = rd_kafka_outq_len(self->rdk_handle); Py_END_ALLOW_THREADS if (RdkHandle_unlock(self)) return NULL; return Py_BuildValue("i", outq_len); }
void test_produce_msgs (rd_kafka_t *rk, rd_kafka_topic_t *rkt, uint64_t testid, int32_t partition, int msg_base, int cnt, const char *payload, size_t size) { int msg_id; test_timing_t t_all; int remains = 0; TEST_SAY("Produce to %s [%"PRId32"]: messages #%d..%d\n", rd_kafka_topic_name(rkt), partition, msg_base, msg_base+cnt); TIMING_START(&t_all, "PRODUCE"); for (msg_id = msg_base ; msg_id < msg_base + cnt ; msg_id++) { char key[128]; char buf[128]; const char *use_payload; size_t use_size; if (payload) { use_payload = payload; use_size = size; } else { test_msg_fmt(key, sizeof(key), testid, partition, msg_id); rd_snprintf(buf, sizeof(buf), "data: %s", key); use_payload = buf; use_size = strlen(buf); } remains++; if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, (void *)use_payload, use_size, key, strlen(key), &remains) == -1) TEST_FAIL("Failed to produce message %i " "to partition %i: %s", msg_id, (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); } /* Wait for messages to be delivered */ while (remains > 0 && rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 10); TIMING_STOP(&t_all); }
void consumer_close(wrapper_Info* consumer_info) { /* Stop consuming */ rd_kafka_consume_stop(consumer_info->rkt, consumer_info->partition); while (rd_kafka_outq_len(consumer_info->rk) > 0) rd_kafka_poll(consumer_info->rk, 10); /* Destroy topic */ rd_kafka_topic_destroy(consumer_info->rkt); /* Destroy handle */ rd_kafka_destroy(consumer_info->rk); }
static VALUE kafka_destroy() { if(rk) { for(int i = 0 ; i < MAX_SHUTDOWN_TRIES; ++i) { if(!rd_kafka_outq_len(rk)) { break; } rd_kafka_poll(rk, 100); } rd_kafka_destroy(rk); int res = rd_kafka_wait_destroyed(100); if(res) { error("wait_destroyed returned: %d\n", res); } rk = NULL; } return Qnil; }
static void KafkaLog_Close (rd_kafka_t* handle) { if ( !handle ) return; /* Wait for messaging to finish. */ unsigned throw_msg_count = 10; unsigned msg_left,prev_msg_left; while((msg_left = rd_kafka_outq_len (handle) > 0) && throw_msg_count) { if(prev_msg_left == msg_left) /* Send no messages in a second? probably, the broker has fall down */ throw_msg_count--; else throw_msg_count = 10; DEBUG_WRAP(DebugMessage(DEBUG_OUTPUT_PLUGIN, "[Thread %u] Waiting for messages to send. Still %u messages to be exported. %u retries left.\n");); prev_msg_left = msg_left; sleep(1); //rd_kafka_poll(); }
/* * function monitor librdkafka queue size and write to * local file , the path will depend on usr configure * default /var/log/sendkafka */ void check_queuedata_size(rd_kafka_t ** rks, int num, char *queuesize_path) { static time_t lasttime = 0; time_t curenttime = getcurrents(); if ((curenttime % g_monitor_period) == 0 && curenttime != lasttime) { char buf[128] = { 0 }; int i = 0; rotate_logs(queuesize_path); int fd = open(queuesize_path, O_WRONLY | O_APPEND | O_CREAT, 0666); if(fd == -1){ char buf[1024] = { 0 }; sprintf(buf, "%d line open %s fail...", __LINE__ - 4,queuesize_path); save_error(g_logsavelocal_tag, LOG_CRIT, buf); exit(3); } char timebuf[50] = { 0 }; strcpy(timebuf, getcurrenttime()); timebuf[strlen(timebuf) - 1] = '\0'; for (; i < num; ++i) { sprintf(buf, "%s|%s| queue size= %d\n", timebuf, rks[i] ? rks[i]->rk_broker.name : "", rd_kafka_outq_len(rks[i])); write(fd, buf, strlen(buf)); memset(buf, '\0', 128); } close(fd); lasttime = curenttime; } }
int main (int argc, char **argv) { rd_kafka_topic_t *rkt; char *brokers = "localhost:9092"; char mode = 'C'; char *topic = NULL; int partition = RD_KAFKA_PARTITION_UA; int opt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; const char *debug = NULL; int64_t start_offset = 0; int report_offsets = 0; int do_conf_dump = 0; quiet = !isatty(STDIN_FILENO); /* Kafka configuration */ conf = rd_kafka_conf_new(); /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:A")) != -1) { switch (opt) { case 'P': case 'C': case 'L': mode = opt; break; case 't': topic = optarg; break; case 'p': partition = atoi(optarg); break; case 'b': brokers = optarg; break; case 'z': if (rd_kafka_conf_set(conf, "compression.codec", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } break; case 'o': if (!strcmp(optarg, "end")) start_offset = RD_KAFKA_OFFSET_END; else if (!strcmp(optarg, "beginning")) start_offset = RD_KAFKA_OFFSET_BEGINNING; else if (!strcmp(optarg, "stored")) start_offset = RD_KAFKA_OFFSET_STORED; else if (!strcmp(optarg, "report")) report_offsets = 1; else start_offset = strtoll(optarg, NULL, 10); break; case 'e': exit_eof = 1; break; case 'd': debug = optarg; break; case 'q': quiet = 1; break; case 'A': output = OUTPUT_RAW; break; case 'X': { char *name, *val; rd_kafka_conf_res_t res; if (!strcmp(optarg, "list") || !strcmp(optarg, "help")) { rd_kafka_conf_properties_show(stdout); exit(0); } if (!strcmp(optarg, "dump")) { do_conf_dump = 1; continue; } name = optarg; if (!(val = strchr(name, '='))) { fprintf(stderr, "%% Expected " "-X property=value, not %s\n", name); exit(1); } *val = '\0'; val++; res = RD_KAFKA_CONF_UNKNOWN; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ if (!strncmp(name, "topic.", strlen("topic."))) res = rd_kafka_topic_conf_set(topic_conf, name+ strlen("topic."), val, errstr, sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN) res = rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } } break; default: goto usage; } } if (do_conf_dump) { const char **arr; size_t cnt; int pass; for (pass = 0 ; pass < 2 ; pass++) { int i; if (pass == 0) { arr = rd_kafka_conf_dump(conf, &cnt); printf("# Global config\n"); } else { printf("# Topic config\n"); arr = rd_kafka_topic_conf_dump(topic_conf, &cnt); } for (i = 0 ; i < cnt ; i += 2) printf("%s = %s\n", arr[i], arr[i+1]); printf("\n"); rd_kafka_conf_dump_free(arr, cnt); } exit(0); } if (optind != argc || (mode != 'L' && !topic)) { usage: fprintf(stderr, "Usage: %s -C|-P|-L -t <topic> " "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n" "\n" "librdkafka version %s (0x%08x)\n" "\n" " Options:\n" " -C | -P Consumer or Producer mode\n" " -L Metadata list mode\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (random partitioner)\n" " -b <brokers> Broker address (localhost:9092)\n" " -z <codec> Enable compression:\n" " none|gzip|snappy\n" " -o <offset> Start offset (consumer)\n" " -o report Report message offsets (producer)\n" " -e Exit consumer when last message\n" " in partition has been received.\n" " -d [facs..] Enable debugging contexts:\n" " -q Be quiet\n" " -A Raw payload output (consumer)\n" " %s\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" "\n" " In Consumer mode:\n" " writes fetched messages to stdout\n" " In Producer mode:\n" " reads messages from stdin and sends to broker\n" " In List mode:\n" " queries broker for metadata information, " "topic is optional.\n" "\n" "\n" "\n", argv[0], rd_kafka_version_str(), rd_kafka_version(), RD_KAFKA_DEBUG_CONTEXTS); exit(1); } signal(SIGINT, stop); signal(SIGUSR1, sig_usr1); if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% Debug configuration failed: %s: %s\n", errstr, debug); exit(1); } if (mode == 'P') { /* * Producer */ char buf[2048]; int sendcnt = 0; /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ /* If offset reporting (-o report) is enabled, use the * richer dr_msg_cb instead. */ if (report_offsets) { rd_kafka_topic_conf_set(topic_conf, "produce.offset.report", "true", errstr, sizeof(errstr)); rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered2); } else rd_kafka_conf_set_dr_cb(conf, msg_delivered); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!quiet) fprintf(stderr, "%% Type stuff and hit enter to send\n"); while (run && fgets(buf, sizeof(buf), stdin)) { size_t len = strlen(buf); if (buf[len-1] == '\n') buf[--len] = '\0'; /* Send/Produce message. */ if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ buf, len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ NULL) == -1) { fprintf(stderr, "%% Failed to produce to topic %s " "partition %i: %s\n", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); continue; } if (!quiet) fprintf(stderr, "%% Sent %zd bytes to topic " "%s partition %i\n", len, rd_kafka_topic_name(rkt), partition); sendcnt++; /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); } /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Wait for messages to be delivered */ while (run && rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); /* Destroy the handle */ rd_kafka_destroy(rk); } else if (mode == 'C') { /* * Consumer */ /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new consumer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); /* Start consuming */ if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){ fprintf(stderr, "%% Failed to start consuming: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); exit(1); } while (run) { rd_kafka_message_t *rkmessage; /* Consume single message. * See rdkafka_performance.c for high speed * consuming of messages. */ rkmessage = rd_kafka_consume(rkt, partition, 1000); if (!rkmessage) /* timeout */ continue; msg_consume(rkmessage, NULL); /* Return message to rdkafka */ rd_kafka_message_destroy(rkmessage); } /* Stop consuming */ rd_kafka_consume_stop(rkt, partition); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); } else if (mode == 'L') { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Set logger */ rd_kafka_set_logger(rk, logger); rd_kafka_set_log_level(rk, LOG_DEBUG); /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ if (topic) rkt = rd_kafka_topic_new(rk, topic, topic_conf); else rkt = NULL; while (run) { const struct rd_kafka_metadata *metadata; /* Fetch metadata */ err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, &metadata, 5000); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { fprintf(stderr, "%% Failed to acquire metadata: %s\n", rd_kafka_err2str(err)); run = 0; break; } metadata_print(topic, metadata); rd_kafka_metadata_destroy(metadata); run = 0; } /* Destroy the handle */ rd_kafka_destroy(rk); /* Exit right away, dont wait for background cleanup, we haven't * done anything important anyway. */ exit(err ? 2 : 0); } /* Let background threads clean up and terminate cleanly. */ rd_kafka_wait_destroyed(2000); return 0; }
int main (int argc, char **argv) { rd_kafka_t *rk; char *broker = NULL; char mode = 'C'; char *topic = NULL; int partition = 0; int opt; while ((opt = getopt(argc, argv, "PCt:p:b:")) != -1) { switch (opt) { case 'P': case 'C': mode = opt; break; case 't': topic = optarg; break; case 'p': partition = atoi(optarg); break; case 'b': broker = optarg; break; default: goto usage; } } if (!topic || optind != argc) { usage: fprintf(stderr, "Usage: %s [-C|-P] -t <topic> " "[-p <partition>] [-b <broker>]\n" "\n" " Options:\n" " -C | -P Consumer or Producer mode\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (defaults to 0)\n" " -b <broker> Broker address (localhost:9092)\n" "\n" " In Consumer mode:\n" " writes fetched messages to stdout\n" " In Producer mode:\n" " reads messages from stdin and sends to broker\n" "\n", argv[0]); exit(1); } signal(SIGINT, stop); if (mode == 'P') { /* * Producer */ char buf[2048]; int sendcnt = 0; /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, broker, NULL))) { perror("kafka_new producer"); exit(1); } fprintf(stderr, "%% Type stuff and hit enter to send\n"); while (run && (fgets(buf, sizeof(buf), stdin))) { int len = strlen(buf); char *opbuf = malloc(len + 1); strncpy(opbuf, buf, len + 1); /* Send/Produce message. */ rd_kafka_produce(rk, topic, partition, RD_KAFKA_OP_F_FREE, opbuf, len); fprintf(stderr, "%% Sent %i bytes to topic " "%s partition %i\n", len, topic, partition); sendcnt++; } /* Wait for messaging to finish. */ while (rd_kafka_outq_len(rk) > 0) usleep(50000); /* Since there is no ack for produce messages in 0.7 * we wait some more for any packets to be sent. * This is fixed in protocol version 0.8 */ if (sendcnt > 0) usleep(500000); /* Destroy the handle */ rd_kafka_destroy(rk); } else if (mode == 'C') { /* * Consumer */ rd_kafka_op_t *rko; /* Base our configuration on the default config. */ rd_kafka_conf_t conf = rd_kafka_defaultconf; /* The offset storage file is optional but its presence * avoids starting all over from offset 0 again when * the program restarts. * ZooKeeper functionality will be implemented in future * versions and then the offset will be stored there instead. */ conf.consumer.offset_file = "."; /* current directory */ /* Indicate to rdkafka that the application is responsible * for storing the offset. This allows the application to * succesfully handle a message before storing the offset. * If this flag is not set rdkafka will store the offset * just prior to returning the message from rd_kafka_consume(). */ conf.flags |= RD_KAFKA_CONF_F_APP_OFFSET_STORE; /* Use the consumer convenience function * to create a Kafka handle. */ if (!(rk = rd_kafka_new_consumer(broker, topic, (uint32_t)partition, 0, &conf))) { perror("kafka_new_consumer"); exit(1); } while (run) { /* Fetch an "op" which is one of: * - a kafka message (if rko_len>0 && rko_err==0) * - an error (if rko_err) */ if (!(rko = rd_kafka_consume(rk, 1000/*timeout ms*/))) continue; if (rko->rko_err) fprintf(stderr, "%% Error: %.*s\n", rko->rko_len, rko->rko_payload); else if (rko->rko_len) { fprintf(stderr, "%% Message with " "next-offset %"PRIu64" is %i bytes\n", rko->rko_offset, rko->rko_len); hexdump(stdout, "Message", rko->rko_payload, rko->rko_len); } /* rko_offset contains the offset of the _next_ * message. We store it when we're done processing * the current message. */ if (rko->rko_offset) rd_kafka_offset_store(rk, rko->rko_offset); /* Destroy the op */ rd_kafka_op_destroy(rk, rko); } /* Destroy the handle */ rd_kafka_destroy(rk); } return 0; }
int main_0006_symbols (int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); rd_kafka_get_debug_contexts(); rd_kafka_get_err_descs(NULL, NULL); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_errno2err(EINVAL); rd_kafka_errno(); rd_kafka_last_error(); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_conf_set_dr_cb(NULL, NULL); rd_kafka_conf_set_dr_msg_cb(NULL, NULL); rd_kafka_conf_set_error_cb(NULL, NULL); rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_log_cb(NULL, NULL); rd_kafka_conf_set_socket_cb(NULL, NULL); rd_kafka_conf_set_rebalance_cb(NULL, NULL); rd_kafka_conf_set_offset_commit_cb(NULL, NULL); rd_kafka_conf_set_throttle_cb(NULL, NULL); rd_kafka_conf_set_default_topic_conf(NULL, NULL); rd_kafka_conf_get(NULL, NULL, NULL, NULL); #ifndef _MSC_VER rd_kafka_conf_set_open_cb(NULL, NULL); #endif rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_opaque(NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); rd_kafka_conf_dump_free(NULL, 0); rd_kafka_conf_properties_show(NULL); rd_kafka_topic_conf_new(); rd_kafka_topic_conf_dup(NULL); rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); rd_kafka_topic_opaque(NULL); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); rd_kafka_name(NULL); rd_kafka_memberid(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); rd_kafka_message_timestamp(NULL, NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); rd_kafka_consume_batch(NULL, 0, 0, NULL, 0); rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL); rd_kafka_offset_store(NULL, 0, 0); rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL); rd_kafka_produce_batch(NULL, 0, 0, NULL, 0); rd_kafka_poll(NULL, 0); rd_kafka_brokers_add(NULL, NULL); /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */ rd_kafka_set_log_level(NULL, 0); rd_kafka_log_print(NULL, 0, NULL, NULL); #ifndef _MSC_VER rd_kafka_log_syslog(NULL, 0, NULL, NULL); #endif rd_kafka_outq_len(NULL); rd_kafka_dump(NULL, NULL); rd_kafka_thread_cnt(); rd_kafka_wait_destroyed(0); rd_kafka_metadata(NULL, 0, NULL, NULL, 0); rd_kafka_metadata_destroy(NULL); rd_kafka_queue_destroy(NULL); rd_kafka_consume_start_queue(NULL, 0, 0, NULL); rd_kafka_consume_queue(NULL, 0); rd_kafka_consume_batch_queue(NULL, 0, NULL, 0); rd_kafka_consume_callback_queue(NULL, 0, NULL, NULL); rd_kafka_seek(NULL, 0, 0, 0); rd_kafka_yield(NULL); rd_kafka_mem_free(NULL, NULL); rd_kafka_list_groups(NULL, NULL, NULL, 0); rd_kafka_group_list_destroy(NULL); /* KafkaConsumer API */ rd_kafka_subscribe(NULL, NULL); rd_kafka_unsubscribe(NULL); rd_kafka_subscription(NULL, NULL); rd_kafka_consumer_poll(NULL, 0); rd_kafka_consumer_close(NULL); rd_kafka_assign(NULL, NULL); rd_kafka_assignment(NULL, NULL); rd_kafka_commit(NULL, NULL, 0); rd_kafka_commit_message(NULL, NULL, 0); rd_kafka_committed(NULL, NULL, 0); rd_kafka_position(NULL, NULL); /* TopicPartition */ rd_kafka_topic_partition_list_new(0); rd_kafka_topic_partition_list_destroy(NULL); rd_kafka_topic_partition_list_add(NULL, NULL, 0); rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0); rd_kafka_topic_partition_list_del(NULL, NULL, 0); rd_kafka_topic_partition_list_del_by_idx(NULL, 0); rd_kafka_topic_partition_list_copy(NULL); rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0); rd_kafka_topic_partition_list_find(NULL, NULL, 0); rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0); rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL); } return 0; }
/** * Produces 'msgcnt' messages split over 'partition_cnt' partitions. */ static void produce_messages (uint64_t testid, const char *topic, int partition_cnt, int msgcnt) { int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; char msg[128]; int failcnt = 0; int i; rd_kafka_message_t *rkmessages; int32_t partition; int msgid = 0; test_conf_init(&conf, &topic_conf, 20); rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Make sure all replicas are in-sync after producing * so that consume test wont fail. */ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1", errstr, sizeof(errstr)); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Create messages. */ prod_msg_remains = msgcnt; rkmessages = calloc(sizeof(*rkmessages), msgcnt / partition_cnt); for (partition = 0 ; partition < partition_cnt ; partition++) { int batch_cnt = msgcnt / partition_cnt; for (i = 0 ; i < batch_cnt ; i++) { rd_snprintf(msg, sizeof(msg), "testid=%"PRIu64", partition=%i, msg=%i", testid, (int)partition, msgid); rkmessages[i].payload = rd_strdup(msg); rkmessages[i].len = strlen(msg); msgid++; } TEST_SAY("Start produce to partition %i: msgs #%d..%d\n", (int)partition, msgid-batch_cnt, msgid); /* Produce batch for this partition */ r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE, rkmessages, batch_cnt); if (r == -1) TEST_FAIL("Failed to produce " "batch for partition %i: %s", (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); /* Scan through messages to check for errors. */ for (i = 0 ; i < batch_cnt ; i++) { if (rkmessages[i].err) { failcnt++; if (failcnt < 100) TEST_SAY("Message #%i failed: %s\n", i, rd_kafka_err2str(rkmessages[i]. err)); } } /* All messages should've been produced. */ if (r < batch_cnt) { TEST_SAY("Not all messages were accepted " "by produce_batch(): %i < %i\n", r, batch_cnt); if (batch_cnt - r != failcnt) TEST_SAY("Discrepency between failed " "messages (%i) " "and return value %i (%i - %i)\n", failcnt, batch_cnt - r, batch_cnt, r); TEST_FAIL("%i/%i messages failed\n", batch_cnt - r, batch_cnt); } TEST_SAY("Produced %i messages to partition %i, " "waiting for deliveries\n", r, partition); } free(rkmessages); /* Wait for messages to be delivered */ while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); if (fails) TEST_FAIL("%i failures, see previous errors", fails); if (prod_msg_remains != 0) TEST_FAIL("Still waiting for %i messages to be produced", prod_msg_remains); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); }
int main (int argc, char **argv) { char *brokers = "localhost"; char mode = 'C'; char *topic = NULL; const char *key = NULL; int partition = RD_KAFKA_PARTITION_UA; /* random */ int opt; int msgcnt = -1; int sendflags = 0; char *msgpattern = "librdkafka_performance testing!"; int msgsize = strlen(msgpattern); const char *debug = NULL; rd_ts_t now; char errstr[512]; uint64_t seq = 0; int seed = time(NULL); rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; const char *compression = "no"; int64_t start_offset = 0; int batch_size = 0; /* Kafka configuration */ conf = rd_kafka_conf_new(); rd_kafka_conf_set_error_cb(conf, err_cb); rd_kafka_conf_set_dr_cb(conf, msg_delivered); /* Producer config */ rd_kafka_conf_set(conf, "queue.buffering.max.messages", "500000", NULL, 0); rd_kafka_conf_set(conf, "message.send.max.retries", "3", NULL, 0); rd_kafka_conf_set(conf, "retry.backoff.ms", "500", NULL, 0); /* Consumer config */ /* Tell rdkafka to (try to) maintain 1M messages * in its internal receive buffers. This is to avoid * application -> rdkafka -> broker per-message ping-pong * latency. * The larger the local queue, the higher the performance. * Try other values with: ... -X queued.min.messages=1000 */ rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0); /* Kafka topic configuration */ topic_conf = rd_kafka_topic_conf_new(); rd_kafka_topic_conf_set(topic_conf, "message.timeout.ms", "5000", NULL, 0); while ((opt = getopt(argc, argv, "PCt:p:b:s:k:c:fi:Dd:m:S:x:R:a:z:o:X:B:eT:q")) != -1) { switch (opt) { case 'P': case 'C': mode = opt; break; case 't': topic = optarg; break; case 'p': partition = atoi(optarg); break; case 'b': brokers = optarg; break; case 's': msgsize = atoi(optarg); break; case 'k': key = optarg; break; case 'c': msgcnt = atoi(optarg); break; case 'D': sendflags |= RD_KAFKA_MSG_F_FREE; break; case 'i': dispintvl = atoi(optarg); break; case 'm': msgpattern = optarg; break; case 'S': seq = strtoull(optarg, NULL, 10); do_seq = 1; break; case 'x': exit_after = atoi(optarg); break; case 'R': seed = atoi(optarg); break; case 'a': if (rd_kafka_topic_conf_set(topic_conf, "request.required.acks", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } break; case 'B': batch_size = atoi(optarg); break; case 'z': if (rd_kafka_conf_set(conf, "compression.codec", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } compression = optarg; break; case 'o': start_offset = strtoll(optarg, NULL, 10); break; case 'e': exit_eof = 1; break; case 'd': debug = optarg; break; case 'X': { char *name, *val; rd_kafka_conf_res_t res; if (!strcmp(optarg, "list") || !strcmp(optarg, "help")) { rd_kafka_conf_properties_show(stdout); exit(0); } name = optarg; if (!(val = strchr(name, '='))) { fprintf(stderr, "%% Expected " "-X property=value, not %s\n", name); exit(1); } *val = '\0'; val++; res = RD_KAFKA_CONF_UNKNOWN; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ if (!strncmp(name, "topic.", strlen("topic."))) res = rd_kafka_topic_conf_set(topic_conf, name+ strlen("topic"), val, errstr, sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN) res = rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } } break; case 'T': if (rd_kafka_conf_set(conf, "statistics.interval.ms", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } rd_kafka_conf_set_stats_cb(conf, stats_cb); break; case 'q': quiet = 1; break; default: goto usage; } } if (!topic || optind != argc) { usage: fprintf(stderr, "Usage: %s [-C|-P] -t <topic> " "[-p <partition>] [-b <broker,broker..>] [options..]\n" "\n" " Options:\n" " -C | -P Consumer or Producer mode\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (defaults to random)\n" " -b <brokers> Broker address list (host[:port],..)\n" " -s <size> Message size (producer)\n" " -k <key> Message key (producer)\n" " -c <cnt> Messages to transmit/receive\n" " -D Copy/Duplicate data buffer (producer)\n" " -i <ms> Display interval\n" " -m <msg> Message payload pattern\n" " -S <start> Send a sequence number starting at " "<start> as payload\n" " -R <seed> Random seed value (defaults to time)\n" " -a <acks> Required acks (producer): " "-1, 0, 1, >1\n" " -B <size> Consume batch size (# of msgs)\n" " -z <codec> Enable compression:\n" " none|gzip|snappy\n" " -o <offset> Start offset (consumer)\n" " -d [facs..] Enable debugging contexts:\n" " %s\n" " -X <prop=name> Set arbitrary librdkafka " "configuration property\n" " Properties prefixed with \"topic.\" " "will be set on topic object.\n" " Use '-X list' to see the full list\n" " of supported properties.\n" " -T <intvl> Enable statistics from librdkafka at " "specified interval (ms)\n" " -q Be more quiet\n" "\n" " In Consumer mode:\n" " consumes messages and prints thruput\n" " If -B <..> is supplied the batch consumer\n" " mode is used, else the callback mode is used.\n" "\n" " In Producer mode:\n" " writes messages of size -s <..> and prints thruput\n" "\n", argv[0], RD_KAFKA_DEBUG_CONTEXTS); exit(1); } dispintvl *= 1000; /* us */ printf("%% Using random seed %i\n", seed); srand(seed); signal(SIGINT, stop); signal(SIGUSR1, sig_usr1); if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { printf("%% Debug configuration failed: %s: %s\n", errstr, debug); exit(1); } /* Socket hangups are gracefully handled in librdkafka on socket error * without the use of signals, so SIGPIPE should be ignored by the * calling program. */ signal(SIGPIPE, SIG_IGN); if (msgcnt != -1) forever = 0; if (mode == 'P') { /* * Producer */ char *sbuf; char *pbuf; int outq; int i; int keylen = key ? strlen(key) : 0; off_t rof = 0; size_t plen = strlen(msgpattern); if (do_seq) { if (msgsize < strlen("18446744073709551615: ")+1) msgsize = strlen("18446744073709551615: ")+1; /* Force duplication of payload */ sendflags |= RD_KAFKA_MSG_F_FREE; } sbuf = malloc(msgsize); /* Copy payload content to new buffer */ while (rof < msgsize) { size_t xlen = RD_MIN(msgsize-rof, plen); memcpy(sbuf+rof, msgpattern, xlen); rof += xlen; } if (msgcnt == -1) printf("%% Sending messages of size %i bytes\n", msgsize); else printf("%% Sending %i messages of size %i bytes\n", msgcnt, msgsize); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create Kafka producer: %s\n", errstr); exit(1); } if (debug) rd_kafka_set_log_level(rk, 7); /* Add broker(s) */ if (rd_kafka_brokers_add(rk, brokers) < 1) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Explicitly create topic to avoid per-msg lookups. */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); cnt.t_start = rd_clock(); while (run && (msgcnt == -1 || cnt.msgs < msgcnt)) { /* Send/Produce message. */ if (do_seq) { snprintf(sbuf, msgsize-1, "%"PRIu64": ", seq); seq++; } if (sendflags & RD_KAFKA_MSG_F_FREE) { /* Duplicate memory */ pbuf = malloc(msgsize); memcpy(pbuf, sbuf, msgsize); } else pbuf = sbuf; cnt.tx++; while (run && rd_kafka_produce(rkt, partition, sendflags, pbuf, msgsize, key, keylen, NULL) == -1) { if (!quiet || errno != ENOBUFS) printf("produce error: %s%s\n", strerror(errno), errno == ENOBUFS ? " (backpressure)":""); cnt.tx_err++; if (errno != ENOBUFS) { run = 0; break; } now = rd_clock(); if (cnt.t_last + dispintvl <= now) { printf("%% Backpressure %i " "(tx %"PRIu64", " "txerr %"PRIu64")\n", rd_kafka_outq_len(rk), cnt.tx, cnt.tx_err); cnt.t_last = now; } /* Poll to handle delivery reports */ rd_kafka_poll(rk, 10); } msgs_wait_cnt++; cnt.msgs++; cnt.bytes += msgsize; print_stats(mode, 0, compression); /* Must poll to handle delivery reports */ rd_kafka_poll(rk, 0); } forever = 0; printf("All messages produced, " "now waiting for %li deliveries\n", msgs_wait_cnt); rd_kafka_dump(stdout, rk); /* Wait for messages to be delivered */ i = 0; while (run && rd_kafka_poll(rk, 1000) != -1) { if (!(i++ % (dispintvl/1000))) printf("%% Waiting for %li, " "%i messages in outq " "to be sent. Abort with Ctrl-c\n", msgs_wait_cnt, rd_kafka_outq_len(rk)); } outq = rd_kafka_outq_len(rk); printf("%% %i messages in outq\n", outq); cnt.msgs -= outq; cnt.bytes -= msgsize * outq; cnt.t_end = t_end; if (cnt.tx_err > 0) printf("%% %"PRIu64" backpressures for %"PRIu64 " produce calls: %.3f%% backpressure rate\n", cnt.tx_err, cnt.tx, ((double)cnt.tx_err / (double)cnt.tx) * 100.0); rd_kafka_dump(stdout, rk); /* Destroy the handle */ rd_kafka_destroy(rk); } else if (mode == 'C') { /* * Consumer */ rd_kafka_message_t **rkmessages = NULL; #if 0 /* Future API */ /* The offset storage file is optional but its presence * avoids starting all over from offset 0 again when * the program restarts. * ZooKeeper functionality will be implemented in future * versions and then the offset will be stored there instead. */ conf.consumer.offset_file = "."; /* current directory */ /* Indicate to rdkafka that the application is responsible * for storing the offset. This allows the application to * successfully handle a message before storing the offset. * If this flag is not set rdkafka will store the offset * just prior to returning the message from rd_kafka_consume(). */ conf.flags |= RD_KAFKA_CONF_F_APP_OFFSET_STORE; #endif /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create Kafka producer: %s\n", errstr); exit(1); } if (debug) rd_kafka_set_log_level(rk, 7); /* Add broker(s) */ if (rd_kafka_brokers_add(rk, brokers) < 1) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic to consume from */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); /* Batch consumer */ if (batch_size) rkmessages = malloc(sizeof(*rkmessages) * batch_size); /* Start consuming */ if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){ fprintf(stderr, "%% Failed to start consuming: %s\n", strerror(errno)); exit(1); } cnt.t_start = rd_clock(); while (run && (msgcnt == -1 || msgcnt > cnt.msgs)) { /* Consume messages. * A message may either be a real message, or * an error signaling (if rkmessage->err is set). */ uint64_t latency; int r; latency = rd_clock(); if (batch_size) { int i; /* Batch fetch mode */ r = rd_kafka_consume_batch(rkt, partition, 1000, rkmessages, batch_size); if (r != -1) { for (i = 0 ; i < r ; i++) { msg_consume(rkmessages[i],NULL); rd_kafka_message_destroy( rkmessages[i]); } } } else { /* Callback mode */ r = rd_kafka_consume_callback(rkt, partition, 1000/*timeout*/, msg_consume, NULL); } cnt.t_latency += rd_clock() - latency; if (r == -1) fprintf(stderr, "%% Error: %s\n", strerror(errno)); print_stats(mode, 0, compression); /* Poll to handle stats callbacks */ rd_kafka_poll(rk, 0); } cnt.t_end = rd_clock(); /* Stop consuming */ rd_kafka_consume_stop(rkt, partition); /* Destroy topic */ rd_kafka_topic_destroy(rkt); if (batch_size) free(rkmessages); /* Destroy the handle */ rd_kafka_destroy(rk); } print_stats(mode, 1, compression); if (cnt.t_latency && cnt.msgs) printf("%% Average application fetch latency: %"PRIu64"us\n", cnt.t_latency / cnt.msgs); /* Let background threads clean up and terminate cleanly. */ rd_kafka_wait_destroyed(2000); return 0; }
int main_0008_reqacks (int argc, char **argv) { int partition = 0; int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; char msg[128]; int msgcnt = 100; int i; int reqacks; int idbase = 0; const char *topic = NULL; TEST_SAY("\033[33mNOTE! This test requires at " "least 3 brokers!\033[0m\n"); TEST_SAY("\033[33mNOTE! This test requires " "default.replication.factor=3 to be configured on " "all brokers!\033[0m\n"); /* Try different request.required.acks settings (issue #75) */ for (reqacks = -1 ; reqacks <= 1 ; reqacks++) { char tmp[10]; test_conf_init(&conf, &topic_conf, 10); if (!topic) topic = test_mk_topic_name("0008", 0); rd_snprintf(tmp, sizeof(tmp), "%i", reqacks); if (rd_kafka_topic_conf_set(topic_conf, "request.required.acks", tmp, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s", errstr); /* Set delivery report callback */ rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Create kafka instance */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); TEST_SAY("Created kafka instance %s with required acks %i\n", rd_kafka_name(rk), reqacks); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Produce messages */ for (i = 0 ; i < msgcnt ; i++) { int *msgidp = malloc(sizeof(*msgidp)); *msgidp = idbase + i; rd_snprintf(msg, sizeof(msg), "%s test message #%i (acks=%i)", argv[0], *msgidp, reqacks); r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, strlen(msg), NULL, 0, msgidp); if (r == -1) TEST_FAIL("Failed to produce message #%i: %s\n", *msgidp, rd_strerror(errno)); } TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt); /* Wait for messages to time out */ while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 50); if (fails) TEST_FAIL("%i failures, see previous errors", fails); if (msgid_next != idbase + msgcnt) TEST_FAIL("Still waiting for messages: " "next %i != end %i\n", msgid_next, msgcnt); idbase += i; /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); } return 0; }
int kafka_produce_report(rd_kafka_t *r, const char *topic, char *msg, int msg_len, long timeout) { char errstr[512]; rd_kafka_topic_t *rkt = NULL; int partition = RD_KAFKA_PARTITION_UA; rd_kafka_topic_conf_t *conf = NULL; struct produce_cb_params pcb = {1, 0, 0, 0, 0, NULL}; if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "No connection provided to produce to topic %s", topic); } return -2; } /* Topic configuration */ conf = rd_kafka_topic_conf_new(); rd_kafka_topic_conf_set(conf,"produce.offset.report", "true", errstr, sizeof errstr ); char timeoutStr[64]; snprintf(timeoutStr, 64, "%lu", timeout); if (rd_kafka_topic_conf_set(conf, "message.timeout.ms", timeoutStr, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_ERR, "Failed to configure topic param 'message.timeout.ms' to %lu before producing; config err was: %s", timeout, errstr ); } rd_kafka_topic_conf_destroy(conf); return -3; } //callback already set in kafka_set_connection rkt = rd_kafka_topic_new(r, topic, conf); if (!rkt) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "Failed to open topic %s", topic); } rd_kafka_topic_conf_destroy(conf); return -1; } //begin producing: if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, msg_len,NULL, 0,&pcb) == -1) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "Failed to produce message: %s", rd_kafka_err2str(rd_kafka_errno2err(errno))); } //handle delivery response (callback) rd_kafka_poll(rk, 0); rd_kafka_topic_destroy(rkt); return -1; } rd_kafka_poll(rk, 0); while(pcb.msg_count && rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 10); rd_kafka_topic_destroy(rkt); return 0; }
void kafka_consume_all(rd_kafka_t *rk, zval *return_value, const char *topic, const char *offset, int item_count) { char errstr[512]; rd_kafka_topic_t *rkt; rd_kafka_topic_conf_t *conf; const struct rd_kafka_metadata *meta = NULL; rd_kafka_queue_t *rkqu = NULL; int current, p, i = 0; int32_t partition = 0; int64_t start; struct consume_cb_params cb_params = {item_count, return_value, NULL, 0, 0, 0}; //check for NULL pointers, all arguments are required! if (rk == NULL || return_value == NULL || topic == NULL || offset == NULL || strlen(offset) == 0) return; if (!strcmp(offset, "end")) start = RD_KAFKA_OFFSET_END; else if (!strcmp(offset, "beginning")) start = RD_KAFKA_OFFSET_BEGINNING; else if (!strcmp(offset, "stored")) start = RD_KAFKA_OFFSET_STORED; else start = strtoll(offset, NULL, 10); /* Topic configuration */ conf = rd_kafka_topic_conf_new(); /* Disable autocommit, queue_consume sets offsets automatically */ if (rd_kafka_topic_conf_set(conf, "auto.commit.enable", "false", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_WARNING, "failed to turn autocommit off consuming %d messages (start offset %"PRId64") from topic %s: %s", item_count, start, topic, errstr ); } cb_params.auto_commit = 1; } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, conf); if (!rkt) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - Failed to read %s from %"PRId64" (%s)", topic, start, offset); } return; } rkqu = rd_kafka_queue_new(rk); if (RD_KAFKA_RESP_ERR_NO_ERROR == rd_kafka_metadata(rk, 0, rkt, &meta, 5)) { p = meta->topics->partition_cnt; cb_params.partition_ends = calloc(sizeof *cb_params.partition_ends, p); if (cb_params.partition_ends == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - Failed to read %s from %"PRId64" (%s)", topic, start, offset); } rd_kafka_metadata_destroy(meta); meta = NULL; rd_kafka_queue_destroy(rkqu); rd_kafka_topic_destroy(rkt); return; } cb_params.eop = p; for (i=0;i<p;++i) { partition = meta->topics[0].partitions[i].id; if (rd_kafka_consume_start_queue(rkt, partition, start, rkqu)) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "Failed to start consuming topic %s [%"PRId32"]: %s", topic, partition, offset ); } continue; } } while(cb_params.read_count && cb_params.eop) rd_kafka_consume_callback_queue(rkqu, 200, queue_consume, &cb_params); free(cb_params.partition_ends); cb_params.partition_ends = NULL; for (i=0;i<p;++i) { partition = meta->topics[0].partitions[i].id; rd_kafka_consume_stop(rkt, partition); } rd_kafka_metadata_destroy(meta); meta = NULL; rd_kafka_queue_destroy(rkqu); while(rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 50); rd_kafka_topic_destroy(rkt); } if (meta) rd_kafka_metadata_destroy(meta); }
/* Produce a batch of messages to a single partition. */ static void test_single_partition (void) { char *topic = "rdkafkatest1"; int partition = 0; int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; char msg[128]; int msgcnt = 100000; int failcnt; int i; rd_kafka_message_t *rkmessages; msgid_next = 0; test_conf_init(&conf, &topic_conf, 20); /* Set delivery report callback */ rd_kafka_conf_set_dr_cb(conf, dr_single_partition_cb); /* Create kafka instance */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); /* Create messages */ rkmessages = calloc(sizeof(*rkmessages), msgcnt); for (i = 0 ; i < msgcnt ; i++) { int *msgidp = malloc(sizeof(*msgidp)); *msgidp = i; snprintf(msg, sizeof(msg), "%s:%s test message #%i", __FILE__, __FUNCTION__, i); rkmessages[i].payload = strdup(msg); rkmessages[i].len = strlen(msg); rkmessages[i]._private = msgidp; } r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE, rkmessages, msgcnt); /* Scan through messages to check for errors. */ for (i = 0 ; i < msgcnt ; i++) { if (rkmessages[i].err) { failcnt++; if (failcnt < 100) TEST_SAY("Message #%i failed: %s\n", i, rd_kafka_err2str(rkmessages[i].err)); } } /* All messages should've been produced. */ if (r < msgcnt) { TEST_SAY("Not all messages were accepted " "by produce_batch(): %i < %i\n", r, msgcnt); if (msgcnt - r != failcnt) TEST_SAY("Discrepency between failed messages (%i) " "and return value %i (%i - %i)\n", failcnt, msgcnt - r, msgcnt, r); TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt); } free(rkmessages); TEST_SAY("Single partition: " "Produced %i messages, waiting for deliveries\n", r); /* Wait for messages to be delivered */ while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 50); if (fails) TEST_FAIL("%i failures, see previous errors", fails); if (msgid_next != msgcnt) TEST_FAIL("Still waiting for messages: next %i != end %i\n", msgid_next, msgcnt); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(10); /* If we havent failed at this point then * there were no threads leaked */ return; }
/** * @brief Get all partitions for topic and their beginning offsets, useful * if we're consuming messages without knowing the actual partition beforehand * @param int **partitions should be pointer to NULL, will be allocated here * @param const char * topic topic name * @return int (0 == meta error, -2: no connection, -1: allocation error, all others indicate success (nr of elems in array)) */ int kafka_partition_offsets(rd_kafka_t *r, long **partitions, const char *topic) { rd_kafka_topic_t *rkt = NULL; rd_kafka_topic_conf_t *conf = NULL; rd_kafka_queue_t *rkqu = NULL; struct consume_cb_params cb_params = {0, NULL, NULL, 0, 0, 0}; int i = 0; //make life easier, 1 level of indirection... long *values = *partitions; //connect as consumer if required if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to get offsets of topic: %s", topic); } return -2; } /* Topic configuration */ conf = rd_kafka_topic_conf_new(); /* Create topic */ rkt = rd_kafka_topic_new(r, topic, conf); rkqu = rd_kafka_queue_new(rk); const struct rd_kafka_metadata *meta = NULL; if (RD_KAFKA_RESP_ERR_NO_ERROR == rd_kafka_metadata(r, 0, rkt, &meta, 5)) { values = realloc(values, meta->topics->partition_cnt * sizeof *values); if (values == NULL) { *partitions = values;//possible corrupted pointer now //free metadata, return error rd_kafka_metadata_destroy(meta); return -1; } //we need eop to reach 0, if there are 4 partitions, start at 3 (0, 1, 2, 3) cb_params.eop = meta->topics->partition_cnt -1; cb_params.partition_offset = values; for (i=0;i<meta->topics->partition_cnt;++i) { //initialize: set to -2 for callback values[i] = -2; if (rd_kafka_consume_start_queue(rkt, meta->topics->partitions[i].id, RD_KAFKA_OFFSET_BEGINNING, rkqu)) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "Failed to start consuming topic %s [%"PRId32"]", topic, meta->topics->partitions[i].id ); } continue; } } //eiter eop reached 0, or the read errors >= nr of partitions //either way, we've consumed a message from each partition, and therefore, we're done while(cb_params.eop && cb_params.error_count < meta->topics->partition_cnt) rd_kafka_consume_callback_queue(rkqu, 100, offset_queue_consume, &cb_params); //stop consuming for all partitions for (i=0;i<meta->topics->partition_cnt;++i) rd_kafka_consume_stop(rkt, meta->topics[0].partitions[i].id); rd_kafka_queue_destroy(rkqu); //do we need this poll here? while(rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 5); //let's be sure to pass along the correct values here... *partitions = values; i = meta->topics->partition_cnt; } if (meta) rd_kafka_metadata_destroy(meta); rd_kafka_topic_destroy(rkt); return i; }
int kafka_produce(rd_kafka_t *r, char* topic, char* msg, int msg_len, int report, long timeout) { char errstr[512]; rd_kafka_topic_t *rkt; struct produce_cb_params pcb = {1, 0, 0, 0, 0, NULL}; void *opaque; int partition = RD_KAFKA_PARTITION_UA; //decide whether to pass callback params or not... if (report) opaque = &pcb; else opaque = NULL; rd_kafka_topic_conf_t *topic_conf; if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to produce to topic: %s", topic); } return -2; } /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); char timeoutStr[64]; snprintf(timeoutStr, 64, "%lu", timeout); if (rd_kafka_topic_conf_set(topic_conf, "message.timeout.ms", timeoutStr, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_ERR, "Failed to configure topic param 'message.timeout.ms' to %lu before producing; config err was: %s", timeout, errstr ); } rd_kafka_topic_conf_destroy(topic_conf); return -3; } /* Create topic */ rkt = rd_kafka_topic_new(r, topic, topic_conf); if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ msg, msg_len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ opaque) == -1) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Failed to produce to topic %s " "partition %i: %s", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); } rd_kafka_topic_destroy(rkt); return -1; } /* Poll to handle delivery reports */ rd_kafka_poll(r, 0); /* Wait for messages to be delivered */ while (report && pcb.msg_count && rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 10); //set global to NULL again rd_kafka_topic_destroy(rkt); return 0; }
/** * Produces 'msgcnt' messages split over 'partition_cnt' partitions. */ static void produce_null_messages (uint64_t testid, const char *topic, int partition_cnt, int msgcnt) { int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; int i; int32_t partition; int msgid = 0; test_conf_init(&conf, &topic_conf, 20); rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Make sure all replicas are in-sync after producing * so that consume test wont fail. */ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1", errstr, sizeof(errstr)); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_last_error())); /* Produce messages */ prod_msg_remains = msgcnt; for (partition = 0 ; partition < partition_cnt ; partition++) { int batch_cnt = msgcnt / partition_cnt; for (i = 0 ; i < batch_cnt ; i++) { char key[128]; rd_snprintf(key, sizeof(key), "testid=%"PRIu64", partition=%i, msg=%i", testid, (int)partition, msgid); r = rd_kafka_produce(rkt, partition, 0, NULL, 0, key, strlen(key), NULL); if (r == -1) TEST_FAIL("Failed to produce message %i " "to partition %i: %s", msgid, (int)partition, rd_kafka_err2str(rd_kafka_last_error())); msgid++; } } TEST_SAY("Produced %d messages to %d partition(s), " "waiting for deliveries\n", msgcnt, partition_cnt); /* Wait for messages to be delivered */ while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); if (fails) TEST_FAIL("%i failures, see previous errors", fails); if (prod_msg_remains != 0) TEST_FAIL("Still waiting for %i messages to be produced", prod_msg_remains); else TEST_SAY("All messages delivered\n"); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); }
VCL_STRING vmod_send_msg(const struct vrt_ctx *ctx, VCL_STRING broker, VCL_STRING topic_name, VCL_STRING name) { char *p; unsigned u, v; char errstr[512]; char *brokers = (char*)broker; char *topic = (char*)topic_name; rd_kafka_topic_t *rkt; int partition = RD_KAFKA_PARTITION_UA; u = WS_Reserve(ctx->ws, 0); /* Reserve some work space */ p = ctx->ws->f; /* Front of workspace area */ v = snprintf(p, u, "%s", name); v++; if (v > u) { /* No space, reset and leave */ WS_Release(ctx->ws, 0); return (NULL); } /* Update work space with what we've used */ WS_Release(ctx->ws, v); /* * Producer */ //char buf[2048]; //char *buf= "essai de test du Test msg sur topic fred"; char *buf= name; int sendcnt = 0; /* Set up a message delivery report callback. * It will be called once for each message, either on successful * delivery to broker, or upon failure to deliver to broker. */ // rd_kafka_conf_set_dr_cb(conf, msg_delivered); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)))) { fprintf(stderr, "%% Failed to create new producer: %s\n", errstr); exit(1); } /* Add brokers */ if (rd_kafka_brokers_add(rk, brokers) == 0) { fprintf(stderr, "%% No valid brokers specified\n"); exit(1); } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, topic_conf); size_t len = strlen(buf); if (buf[len-1] == '\n') buf[--len] = '\0'; /* Send/Produce message. */ if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, /* Payload and length */ buf, len, /* Optional key and its length */ NULL, 0, /* Message opaque, provided in * delivery report callback as * msg_opaque. */ NULL) == -1) { fprintf(stderr, "%% Failed to produce to topic %s " "partition %i: %s\n", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); exit(2); } if (!quiet) fprintf(stderr, "%% Sent %zd bytes to topic " "%s partition %i\n", len, rd_kafka_topic_name(rkt), partition); sendcnt++; /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Poll to handle delivery reports */ rd_kafka_poll(rk, 0); /* Wait for messages to be delivered */ while ( rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 100); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy the handle */ rd_kafka_destroy(rk); return (p); }
int main (int argc, char **argv) { char *topic = "rdkafkatest1"; int partition = 0; int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; char msg[100000]; int msgcnt = 10; int i; test_conf_init(&conf, &topic_conf, 10); /* Set a small maximum message size. */ if (rd_kafka_conf_set(conf, "message.max.bytes", "100000", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); /* Set delivery report callback */ rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Create kafka instance */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); memset(msg, 0, sizeof(msg)); /* Produce 'msgcnt' messages, size odd ones larger than max.bytes, * and even ones smaller than max.bytes. */ for (i = 0 ; i < msgcnt ; i++) { int *msgidp = malloc(sizeof(*msgidp)); size_t len; int toobig = i & 1; *msgidp = i; if (toobig) { /* Too big */ len = 200000; } else { /* Good size */ len = 5000; msgs_wait |= (1 << i); } snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, len, NULL, 0, msgidp); if (toobig) { if (r != -1) TEST_FAIL("Succeeded to produce too " "large message #%i\n", i); free(msgidp); } else if (r == -1) TEST_FAIL("Failed to produce message #%i: %s\n", i, strerror(errno)); } /* Wait for messages to be delivered. */ while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 50); if (msgs_wait != 0) TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); /* Wait for everything to be cleaned up since broker destroys are * handled in its own thread. */ test_wait_exit(10); /* If we havent failed at this point then * there were no threads leaked */ return 0; }
int kafka_produce_batch(rd_kafka_t *r, char *topic, char **msg, int *msg_len, int msg_cnt, int report, long timeout) { char errstr[512]; rd_kafka_topic_t *rkt; struct produce_cb_params pcb = {msg_cnt, 0, 0, 0, 0, NULL}; void *opaque; int partition = RD_KAFKA_PARTITION_UA; int i, err_cnt = 0; if (report) opaque = &pcb; else opaque = NULL; rd_kafka_topic_conf_t *topic_conf; if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to produce to topic: %s", topic); } return -2; } /* Topic configuration */ topic_conf = rd_kafka_topic_conf_new(); char timeoutStr[64]; snprintf(timeoutStr, 64, "%lu", timeout); if (rd_kafka_topic_conf_set(topic_conf, "message.timeout.ms", timeoutStr, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_ERR, "Failed to configure topic param 'message.timeout.ms' to %lu before producing; config err was: %s", timeout, errstr ); } rd_kafka_topic_conf_destroy(topic_conf); return -3; } /* Create topic */ rkt = rd_kafka_topic_new(r, topic, topic_conf); //do we have VLA? rd_kafka_message_t *messages = calloc(sizeof *messages, msg_cnt); if (messages == NULL) {//fallback to individual produce calls for (i=0;i<msg_cnt;++i) { if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg[i], msg_len[i], NULL, 0, opaque) == -1) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - %% Failed to produce to topic %s " "partition %i: %s", rd_kafka_topic_name(rkt), partition, rd_kafka_err2str( rd_kafka_errno2err(errno))); } } } } else { for (i=0;i<msg_cnt;++i) { messages[i].payload = msg[i]; messages[i].len = msg_len[i]; } i = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_COPY, messages, msg_cnt); if (i < msg_cnt) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_WARNING, "Failed to queue full message batch, %d of %d were put in queue", i, msg_cnt); } } err_cnt = msg_cnt - i; free(messages); messages = NULL; } /* Poll to handle delivery reports */ rd_kafka_poll(r, 0); /* Wait for messages to be delivered */ while (report && pcb.msg_count && rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 10); //set global to NULL again rd_kafka_topic_destroy(rkt); if (report) err_cnt = pcb.err_count; return err_cnt; }
int main (int argc, char **argv) { rd_kafka_t *rk; char *broker = NULL; char mode = 'C'; char *topic = NULL; int partition = 0; int opt; int msgsize = 1024; int msgcnt = -1; int sendflags = 0; int dispintvl = 1000; struct { rd_ts_t t_start; rd_ts_t t_end; rd_ts_t t_end_send; uint64_t msgs; uint64_t bytes; rd_ts_t t_latency; rd_ts_t t_last; rd_ts_t t_total; } cnt = {}; rd_ts_t now; char *dirstr = ""; while ((opt = getopt(argc, argv, "PCt:p:b:s:c:fi:D")) != -1) { switch (opt) { case 'P': case 'C': mode = opt; break; case 't': topic = optarg; break; case 'p': partition = atoi(optarg); break; case 'b': broker = optarg; break; case 's': msgsize = atoi(optarg); break; case 'c': msgcnt = atoi(optarg); break; case 'D': sendflags |= RD_KAFKA_OP_F_FREE; break; case 'i': dispintvl = atoi(optarg); break; default: goto usage; } } if (!topic || optind != argc) { usage: fprintf(stderr, "Usage: %s [-C|-P] -t <topic> " "[-p <partition>] [-b <broker>] [options..]\n" "\n" " Options:\n" " -C | -P Consumer or Producer mode\n" " -t <topic> Topic to fetch / produce\n" " -p <num> Partition (defaults to 0)\n" " -b <broker> Broker address (localhost:9092)\n" " -s <size> Message size (producer)\n" " -c <cnt> Messages to transmit/receive\n" " -D Copy/Duplicate data buffer (producer)\n" " -i <ms> Display interval\n" "\n" " In Consumer mode:\n" " consumes messages and prints thruput\n" " In Producer mode:\n" " writes messages of size -s <..> and prints thruput\n" "\n", argv[0]); exit(1); } dispintvl *= 1000; /* us */ signal(SIGINT, stop); /* Socket hangups are gracefully handled in librdkafka on socket error * without the use of signals, so SIGPIPE should be ignored by the * calling program. */ signal(SIGPIPE, SIG_IGN); if (mode == 'P') { /* * Producer */ char *sbuf = malloc(msgsize); int endwait; int outq; int i; memset(sbuf, 'R', msgsize); if (msgcnt == -1) printf("%% Sending messages of size %i bytes\n", msgsize); else printf("%% Sending %i messages of size %i bytes\n", msgcnt ,msgsize); /* Create Kafka handle */ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, broker, NULL))) { perror("kafka_new producer"); exit(1); } cnt.t_start = rd_clock(); while (run && (msgcnt == -1 || cnt.msgs < msgcnt)) { char *pbuf = sbuf; /* Send/Produce message. */ if (sendflags & RD_KAFKA_OP_F_FREE) { /* Duplicate memory */ pbuf = malloc(msgsize); memcpy(pbuf, sbuf, msgsize); } rd_kafka_produce(rk, topic, partition, sendflags, pbuf, msgsize); cnt.msgs++; cnt.bytes += msgsize; now = rd_clock(); if (cnt.t_last + dispintvl <= now) { printf("%% %"PRIu64" messages and %"PRIu64 "bytes: %"PRIu64" msgs/s and " "%.2f Mb/s\n", cnt.msgs, cnt.bytes, (cnt.msgs / (now - cnt.t_start)) * 1000000, (float)(cnt.bytes / (now - cnt.t_start))); cnt.t_last = now; } } /* Wait for messaging to finish. */ i = 0; while (run && rd_kafka_outq_len(rk) > 0) { if (!(i++ % (dispintvl/1000))) printf("%% Waiting for %i messages in outq " "to be sent. Abort with Ctrl-c\n", rd_kafka_outq_len(rk)); usleep(1000); } cnt.t_end_send = rd_clock(); outq = rd_kafka_outq_len(rk); cnt.msgs -= outq; cnt.bytes -= msgsize * outq; cnt.t_end = rd_clock(); /* Since there is no ack for produce messages in 0.7 * we wait some more for any packets in the socket buffers * to be sent. * This is fixed in protocol version 0.8 */ endwait = cnt.msgs * 10; printf("%% Test timers stopped, but waiting %ims more " "for the %"PRIu64 " messages to be transmitted from " "socket buffers.\n" "%% End with Ctrl-c\n", endwait / 1000, cnt.msgs); run = 1; while (run && endwait > 0) { usleep(10000); endwait -= 10000; } /* Destroy the handle */ rd_kafka_destroy(rk); dirstr = "sent"; } else if (mode == 'C') { /* * Consumer */ rd_kafka_op_t *rko; /* Base our configuration on the default config. */ rd_kafka_conf_t conf = rd_kafka_defaultconf; /* The offset storage file is optional but its presence * avoids starting all over from offset 0 again when * the program restarts. * ZooKeeper functionality will be implemented in future * versions and then the offset will be stored there instead. */ conf.consumer.offset_file = "."; /* current directory */ /* Indicate to rdkafka that the application is responsible * for storing the offset. This allows the application to * succesfully handle a message before storing the offset. * If this flag is not set rdkafka will store the offset * just prior to returning the message from rd_kafka_consume(). */ conf.flags |= RD_KAFKA_CONF_F_APP_OFFSET_STORE; /* Tell rdkafka to (try to) maintain 10000 messages * in its internal receive buffers. This is to avoid * application -> rdkafka -> broker per-message ping-pong * latency. */ conf.consumer.replyq_low_thres = 100000; /* Use the consumer convenience function * to create a Kafka handle. */ if (!(rk = rd_kafka_new_consumer(broker, topic, (uint32_t)partition, 0, &conf))) { perror("kafka_new_consumer"); exit(1); } cnt.t_start = rd_clock(); while (run && (msgcnt == -1 || msgcnt > cnt.msgs)) { /* Fetch an "op" which is one of: * - a kafka message (if rko_len>0 && rko_err==0) * - an error (if rko_err) */ uint64_t latency; latency = rd_clock(); if (!(rko = rd_kafka_consume(rk, 1000/*timeout ms*/))) continue; cnt.t_latency += rd_clock() - latency; if (rko->rko_err) fprintf(stderr, "%% Error: %.*s\n", rko->rko_len, rko->rko_payload); else if (rko->rko_len) { cnt.msgs++; cnt.bytes += rko->rko_len; } /* rko_offset contains the offset of the _next_ * message. We store it when we're done processing * the current message. */ if (rko->rko_offset) rd_kafka_offset_store(rk, rko->rko_offset); /* Destroy the op */ rd_kafka_op_destroy(rk, rko); now = rd_clock(); if (cnt.t_last + dispintvl <= now && cnt.t_start + 1000000 < now) { printf("%% %"PRIu64" messages and %"PRIu64 " bytes: %"PRIu64" msgs/s and " "%.2f Mb/s\n", cnt.msgs, cnt.bytes, (cnt.msgs / ((now - cnt.t_start)/1000)) * 1000, (float)(cnt.bytes / ((now - cnt.t_start) / 1000))); cnt.t_last = now; } } cnt.t_end = rd_clock(); /* Destroy the handle */ rd_kafka_destroy(rk); dirstr = "received"; } if (cnt.t_end_send) cnt.t_total = cnt.t_end_send - cnt.t_start; else cnt.t_total = cnt.t_end - cnt.t_start; printf("%% %"PRIu64" messages and %"PRIu64" bytes " "%s in %"PRIu64"ms: %"PRIu64" msgs/s and %.02f Mb/s\n", cnt.msgs, cnt.bytes, dirstr, cnt.t_total / 1000, (cnt.msgs / (cnt.t_total / 1000)) * 1000, (float)(cnt.bytes / (cnt.t_total / 1000))); if (cnt.t_latency) printf("%% Average application fetch latency: %"PRIu64"us\n", cnt.t_latency / cnt.msgs); return 0; }