Exemple #1
0
int main (int argc, char **argv) {

        if (argc < 0 /* always false */) {
                rd_kafka_version();
                rd_kafka_version_str();
                rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR);
                rd_kafka_errno2err(EINVAL);
                rd_kafka_conf_new();
                rd_kafka_conf_destroy(NULL);
                rd_kafka_conf_dup(NULL);
                rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_conf_set_dr_cb(NULL, NULL);
                rd_kafka_conf_set_error_cb(NULL, NULL);
                rd_kafka_conf_set_stats_cb(NULL, NULL);
                rd_kafka_conf_set_opaque(NULL, NULL);
                rd_kafka_conf_dump(NULL, NULL);
                rd_kafka_topic_conf_dump(NULL, NULL);
                rd_kafka_conf_dump_free(NULL, 0);
                rd_kafka_conf_properties_show(NULL);
                rd_kafka_topic_conf_new();
                rd_kafka_topic_conf_dup(NULL);
                rd_kafka_topic_conf_destroy(NULL);
                rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_topic_conf_set_opaque(NULL, NULL);
                rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL);
                rd_kafka_topic_partition_available(NULL, 0);
                rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_new(0, NULL, NULL, 0);
                rd_kafka_destroy(NULL);
                rd_kafka_name(NULL);
                rd_kafka_topic_new(NULL, NULL, NULL);
                rd_kafka_topic_destroy(NULL);
                rd_kafka_topic_name(NULL);
                rd_kafka_message_destroy(NULL);
                rd_kafka_message_errstr(NULL);
                rd_kafka_consume_start(NULL, 0, 0);
                rd_kafka_consume_stop(NULL, 0);
                rd_kafka_consume(NULL, 0, 0);
                rd_kafka_consume_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL);
                rd_kafka_offset_store(NULL, 0, 0);
                rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL);
                rd_kafka_poll(NULL, 0);
                rd_kafka_brokers_add(NULL, NULL);
                rd_kafka_set_logger(NULL, NULL);
                rd_kafka_set_log_level(NULL, 0);
                rd_kafka_log_print(NULL, 0, NULL, NULL);
                rd_kafka_log_syslog(NULL, 0, NULL, NULL);
                rd_kafka_outq_len(NULL);
                rd_kafka_dump(NULL, NULL);
                rd_kafka_thread_cnt();
                rd_kafka_wait_destroyed(0);
        }


	return 0;
}
Exemple #2
0
/**
 * Enable statistics with a set interval, make sure the stats callbacks are
 * called within reasonable intervals.
 */
static void do_test_stats_timer (void) {
    rd_kafka_t *rk;
    rd_kafka_conf_t *conf;
    const int exp_calls = 10;
    char errstr[512];
    struct state state;
    test_timing_t t_new;

    memset(&state, 0, sizeof(state));

    state.interval = 600*1000;

    test_conf_init(&conf, NULL, 200);

    test_conf_set(conf, "statistics.interval.ms", "600");
    rd_kafka_conf_set_stats_cb(conf, stats_cb);
    rd_kafka_conf_set_opaque(conf, &state);


    TIMING_START(&t_new, "rd_kafka_new()");
    rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
    TIMING_STOP(&t_new);
    if (!rk)
        TEST_FAIL("Failed to create instance: %s\n", errstr);

    TEST_SAY("Starting wait loop for %d expected stats_cb calls "
             "with an interval of %dms\n",
             exp_calls, state.interval/1000);


    while (state.calls < exp_calls) {
        test_timing_t t_poll;
        TIMING_START(&t_poll, "rd_kafka_poll()");
        rd_kafka_poll(rk, 100);
        TIMING_STOP(&t_poll);

        if (TIMING_DURATION(&t_poll) > 150*1000)
            TEST_WARN("rd_kafka_poll(rk,100) "
                      "took more than 50%% extra\n");
    }

    rd_kafka_destroy(rk);

    if (state.calls > exp_calls)
        TEST_SAY("Got more calls than expected: %d > %d\n",
                 state.calls, exp_calls);

    if (state.fails)
        TEST_FAIL("%d/%d intervals failed\n", state.fails, state.calls);
    else
        TEST_SAY("All %d intervals okay\n", state.calls);
}
Exemple #3
0
void RdKafka::HandleImpl::set_common_config (RdKafka::ConfImpl *confimpl) {

    rd_kafka_conf_set_opaque(confimpl->rk_conf_, this);

    if (confimpl->event_cb_) {
        rd_kafka_conf_set_log_cb(confimpl->rk_conf_,
                                 RdKafka::log_cb_trampoline);
        rd_kafka_conf_set_error_cb(confimpl->rk_conf_,
                                   RdKafka::error_cb_trampoline);
        rd_kafka_conf_set_throttle_cb(confimpl->rk_conf_,
                                      RdKafka::throttle_cb_trampoline);
        rd_kafka_conf_set_stats_cb(confimpl->rk_conf_,
                                   RdKafka::stats_cb_trampoline);
        event_cb_ = confimpl->event_cb_;
    }

    if (confimpl->socket_cb_) {
        rd_kafka_conf_set_socket_cb(confimpl->rk_conf_,
                                    RdKafka::socket_cb_trampoline);
        socket_cb_ = confimpl->socket_cb_;
    }

    if (confimpl->open_cb_) {
#ifndef _MSC_VER
        rd_kafka_conf_set_open_cb(confimpl->rk_conf_, RdKafka::open_cb_trampoline);
        open_cb_ = confimpl->open_cb_;
#endif
    }

    if (confimpl->rebalance_cb_) {
        rd_kafka_conf_set_rebalance_cb(confimpl->rk_conf_,
                                       RdKafka::rebalance_cb_trampoline);
        rebalance_cb_ = confimpl->rebalance_cb_;
    }

    if (confimpl->offset_commit_cb_) {
        rd_kafka_conf_set_offset_commit_cb(confimpl->rk_conf_,
                                           RdKafka::offset_commit_cb_trampoline);
        offset_commit_cb_ = confimpl->offset_commit_cb_;
    }

    if (confimpl->consume_cb_) {
        rd_kafka_conf_set_consume_cb(confimpl->rk_conf_,
                                     RdKafka::consume_cb_trampoline);
        consume_cb_ = confimpl->consume_cb_;
    }

}
Exemple #4
0
/**
 * Creates and sets up kafka configuration objects.
 * Will read "test.conf" file if it exists.
 */
void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf,
		     int timeout) {
        char buf[512];
	const char *test_conf =
#ifndef _MSC_VER
		getenv("RDKAFKA_TEST_CONF") ? getenv("RDKAFKA_TEST_CONF") : 
#endif
		"test.conf";

        if (conf) {
#ifndef _MSC_VER
                char *tmp;
#endif

                *conf = rd_kafka_conf_new();
                rd_kafka_conf_set_error_cb(*conf, test_error_cb);
                rd_kafka_conf_set_stats_cb(*conf, test_stats_cb);

#ifndef _MSC_VER
                if ((tmp = getenv("TEST_DEBUG")) && *tmp)
                        test_conf_set(*conf, "debug", tmp);
#endif

#ifdef SIGIO
                /* Quick termination */
                rd_snprintf(buf, sizeof(buf), "%i", SIGIO);
                rd_kafka_conf_set(*conf, "internal.termination.signal",
                                  buf, NULL, 0);
                signal(SIGIO, SIG_IGN);
#endif
        }

	if (topic_conf)
		*topic_conf = rd_kafka_topic_conf_new();

	/* Open and read optional local test configuration file, if any. */
        test_read_conf_file(test_conf,
                            conf ? *conf : NULL,
                            topic_conf ? *topic_conf : NULL, &timeout);

        if (timeout)
                test_timeout_set(timeout);
}
int main (int argc, char **argv) {
	char *brokers = "localhost";
	char mode = 'C';
	char *topic = NULL;
	const char *key = NULL;
	int partition = RD_KAFKA_PARTITION_UA; /* random */
	int opt;
	int msgcnt = -1;
	int sendflags = 0;
	char *msgpattern = "librdkafka_performance testing!";
	int msgsize = strlen(msgpattern);
	const char *debug = NULL;
	rd_ts_t now;
	char errstr[512];
	uint64_t seq = 0;
	int seed = time(NULL);
	rd_kafka_topic_t *rkt;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *topic_conf;
	const char *compression = "no";
	int64_t start_offset = 0;
	int batch_size = 0;

	/* Kafka configuration */
	conf = rd_kafka_conf_new();
	rd_kafka_conf_set_error_cb(conf, err_cb);
	rd_kafka_conf_set_dr_cb(conf, msg_delivered);

	/* Producer config */
	rd_kafka_conf_set(conf, "queue.buffering.max.messages", "500000",
			  NULL, 0);
	rd_kafka_conf_set(conf, "message.send.max.retries", "3", NULL, 0);
	rd_kafka_conf_set(conf, "retry.backoff.ms", "500", NULL, 0);
	
	/* Consumer config */
	/* Tell rdkafka to (try to) maintain 1M messages
	 * in its internal receive buffers. This is to avoid
	 * application -> rdkafka -> broker  per-message ping-pong
	 * latency.
	 * The larger the local queue, the higher the performance.
	 * Try other values with: ... -X queued.min.messages=1000
	 */
	rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0);



	/* Kafka topic configuration */
	topic_conf = rd_kafka_topic_conf_new();
	rd_kafka_topic_conf_set(topic_conf, "message.timeout.ms", "5000",
				NULL, 0);

	while ((opt =
		getopt(argc, argv,
		       "PCt:p:b:s:k:c:fi:Dd:m:S:x:R:a:z:o:X:B:eT:q")) != -1) {
		switch (opt) {
		case 'P':
		case 'C':
			mode = opt;
			break;
		case 't':
			topic = optarg;
			break;
		case 'p':
			partition = atoi(optarg);
			break;
		case 'b':
			brokers = optarg;
			break;
		case 's':
			msgsize = atoi(optarg);
			break;
		case 'k':
			key = optarg;
			break;
		case 'c':
			msgcnt = atoi(optarg);
			break;
		case 'D':
			sendflags |= RD_KAFKA_MSG_F_FREE;
			break;
		case 'i':
			dispintvl = atoi(optarg);
			break;
		case 'm':
			msgpattern = optarg;
			break;
		case 'S':
			seq = strtoull(optarg, NULL, 10);
			do_seq = 1;
			break;
		case 'x':
			exit_after = atoi(optarg);
			break;
		case 'R':
			seed = atoi(optarg);
			break;
		case 'a':
			if (rd_kafka_topic_conf_set(topic_conf,
						    "request.required.acks",
						    optarg,
						    errstr, sizeof(errstr)) !=
			    RD_KAFKA_CONF_OK) {
				fprintf(stderr, "%% %s\n", errstr);
				exit(1);
			}
			break;
		case 'B':
			batch_size = atoi(optarg);
			break;
		case 'z':
			if (rd_kafka_conf_set(conf, "compression.codec",
					      optarg,
					      errstr, sizeof(errstr)) !=
			    RD_KAFKA_CONF_OK) {
				fprintf(stderr, "%% %s\n", errstr);
				exit(1);
			}
			compression = optarg;
			break;
		case 'o':
			start_offset = strtoll(optarg, NULL, 10);
			break;
		case 'e':
			exit_eof = 1;
			break;
		case 'd':
			debug = optarg;
			break;
		case 'X':
		{
			char *name, *val;
			rd_kafka_conf_res_t res;

			if (!strcmp(optarg, "list") ||
			    !strcmp(optarg, "help")) {
				rd_kafka_conf_properties_show(stdout);
				exit(0);
			}

			name = optarg;
			if (!(val = strchr(name, '='))) {
				fprintf(stderr, "%% Expected "
					"-X property=value, not %s\n", name);
				exit(1);
			}

			*val = '\0';
			val++;

			res = RD_KAFKA_CONF_UNKNOWN;
			/* Try "topic." prefixed properties on topic
			 * conf first, and then fall through to global if
			 * it didnt match a topic configuration property. */
			if (!strncmp(name, "topic.", strlen("topic.")))
				res = rd_kafka_topic_conf_set(topic_conf,
							      name+
							      strlen("topic"),
							      val,
							      errstr,
							      sizeof(errstr));

			if (res == RD_KAFKA_CONF_UNKNOWN)
				res = rd_kafka_conf_set(conf, name, val,
							errstr, sizeof(errstr));

			if (res != RD_KAFKA_CONF_OK) {
				fprintf(stderr, "%% %s\n", errstr);
				exit(1);
			}
		}
		break;

		case 'T':
			if (rd_kafka_conf_set(conf, "statistics.interval.ms",
					      optarg, errstr, sizeof(errstr)) !=
			    RD_KAFKA_CONF_OK) {
				fprintf(stderr, "%% %s\n", errstr);
				exit(1);
			}
			rd_kafka_conf_set_stats_cb(conf, stats_cb);
			break;

		case 'q':
			quiet = 1;
			break;

		default:
			goto usage;
		}
	}

	if (!topic || optind != argc) {
	usage:
		fprintf(stderr,
			"Usage: %s [-C|-P] -t <topic> "
			"[-p <partition>] [-b <broker,broker..>] [options..]\n"
			"\n"
			" Options:\n"
			"  -C | -P      Consumer or Producer mode\n"
			"  -t <topic>   Topic to fetch / produce\n"
			"  -p <num>     Partition (defaults to random)\n"
			"  -b <brokers> Broker address list (host[:port],..)\n"
			"  -s <size>    Message size (producer)\n"
			"  -k <key>     Message key (producer)\n"
			"  -c <cnt>     Messages to transmit/receive\n"
			"  -D           Copy/Duplicate data buffer (producer)\n"
			"  -i <ms>      Display interval\n"
			"  -m <msg>     Message payload pattern\n"
			"  -S <start>   Send a sequence number starting at "
			"<start> as payload\n"
			"  -R <seed>    Random seed value (defaults to time)\n"
			"  -a <acks>    Required acks (producer): "
			"-1, 0, 1, >1\n"
			"  -B <size>    Consume batch size (# of msgs)\n"
			"  -z <codec>   Enable compression:\n"
			"               none|gzip|snappy\n"
			"  -o <offset>  Start offset (consumer)\n"
			"  -d [facs..]  Enable debugging contexts:\n"
			"               %s\n"
			"  -X <prop=name> Set arbitrary librdkafka "
			"configuration property\n"
			"               Properties prefixed with \"topic.\" "
			"will be set on topic object.\n"
			"               Use '-X list' to see the full list\n"
			"               of supported properties.\n"
			"  -T <intvl>   Enable statistics from librdkafka at "
			"specified interval (ms)\n"
			"  -q           Be more quiet\n"
			"\n"
			" In Consumer mode:\n"
			"  consumes messages and prints thruput\n"
			"  If -B <..> is supplied the batch consumer\n"
			"  mode is used, else the callback mode is used.\n"
			"\n"
			" In Producer mode:\n"
			"  writes messages of size -s <..> and prints thruput\n"
			"\n",
			argv[0],
			RD_KAFKA_DEBUG_CONTEXTS);
		exit(1);
	}


	dispintvl *= 1000; /* us */

	printf("%% Using random seed %i\n", seed);
	srand(seed);
	signal(SIGINT, stop);
	signal(SIGUSR1, sig_usr1);


	if (debug &&
	    rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) !=
	    RD_KAFKA_CONF_OK) {
		printf("%% Debug configuration failed: %s: %s\n",
		       errstr, debug);
		exit(1);
	}

	/* Socket hangups are gracefully handled in librdkafka on socket error
	 * without the use of signals, so SIGPIPE should be ignored by the
	 * calling program. */
	signal(SIGPIPE, SIG_IGN);

	if (msgcnt != -1)
		forever = 0;

	if (mode == 'P') {
		/*
		 * Producer
		 */
		char *sbuf;
		char *pbuf;
		int outq;
		int i;
		int keylen = key ? strlen(key) : 0;
		off_t rof = 0;
		size_t plen = strlen(msgpattern);

		if (do_seq) {
			if (msgsize < strlen("18446744073709551615: ")+1)
				msgsize = strlen("18446744073709551615: ")+1;
			/* Force duplication of payload */
			sendflags |= RD_KAFKA_MSG_F_FREE;
		}

		sbuf = malloc(msgsize);

		/* Copy payload content to new buffer */
		while (rof < msgsize) {
			size_t xlen = RD_MIN(msgsize-rof, plen);
			memcpy(sbuf+rof, msgpattern, xlen);
			rof += xlen;
		}

		if (msgcnt == -1)
			printf("%% Sending messages of size %i bytes\n",
			       msgsize);
		else
			printf("%% Sending %i messages of size %i bytes\n",
			       msgcnt, msgsize);

		/* Create Kafka handle */
		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
 					errstr, sizeof(errstr)))) {
			fprintf(stderr,
				"%% Failed to create Kafka producer: %s\n",
				errstr);
			exit(1);
		}

		if (debug)
			rd_kafka_set_log_level(rk, 7);

		/* Add broker(s) */
		if (rd_kafka_brokers_add(rk, brokers) < 1) {
			fprintf(stderr, "%% No valid brokers specified\n");
			exit(1);
		}

		/* Explicitly create topic to avoid per-msg lookups. */
		rkt = rd_kafka_topic_new(rk, topic, topic_conf);

		cnt.t_start = rd_clock();

		while (run && (msgcnt == -1 || cnt.msgs < msgcnt)) {
			/* Send/Produce message. */

			if (do_seq) {
				snprintf(sbuf, msgsize-1, "%"PRIu64": ", seq);
				seq++;
			}

			if (sendflags & RD_KAFKA_MSG_F_FREE) {
				/* Duplicate memory */
				pbuf = malloc(msgsize);
				memcpy(pbuf, sbuf, msgsize);
			} else
				pbuf = sbuf;

			cnt.tx++;
			while (run &&
			       rd_kafka_produce(rkt, partition,
						sendflags, pbuf, msgsize,
						key, keylen, NULL) == -1) {
				if (!quiet || errno != ENOBUFS)
					printf("produce error: %s%s\n",
					       strerror(errno),
					       errno == ENOBUFS ?
					       " (backpressure)":"");
				cnt.tx_err++;
				if (errno != ENOBUFS) {
					run = 0;
					break;
				}
				now = rd_clock();
				if (cnt.t_last + dispintvl <= now) {
					printf("%% Backpressure %i "
					       "(tx %"PRIu64", "
					       "txerr %"PRIu64")\n",
					       rd_kafka_outq_len(rk),
					       cnt.tx, cnt.tx_err);
					cnt.t_last = now;
				}
				/* Poll to handle delivery reports */
				rd_kafka_poll(rk, 10);
			}

			msgs_wait_cnt++;
			cnt.msgs++;
			cnt.bytes += msgsize;

			print_stats(mode, 0, compression);

			/* Must poll to handle delivery reports */
			rd_kafka_poll(rk, 0);
			
		}

		forever = 0;
		printf("All messages produced, "
		       "now waiting for %li deliveries\n",
		       msgs_wait_cnt);
		rd_kafka_dump(stdout, rk);

		/* Wait for messages to be delivered */
		i = 0;
		while (run && rd_kafka_poll(rk, 1000) != -1) {
			if (!(i++ % (dispintvl/1000)))
				printf("%% Waiting for %li, "
				       "%i messages in outq "
				       "to be sent. Abort with Ctrl-c\n",
				       msgs_wait_cnt,
				       rd_kafka_outq_len(rk));
		}


		outq = rd_kafka_outq_len(rk);
		printf("%% %i messages in outq\n", outq);
		cnt.msgs -= outq;
		cnt.bytes -= msgsize * outq;

		cnt.t_end = t_end;

		if (cnt.tx_err > 0)
			printf("%% %"PRIu64" backpressures for %"PRIu64
			       " produce calls: %.3f%% backpressure rate\n",
			       cnt.tx_err, cnt.tx,
			       ((double)cnt.tx_err / (double)cnt.tx) * 100.0);

		rd_kafka_dump(stdout, rk);

		/* Destroy the handle */
		rd_kafka_destroy(rk);

	} else if (mode == 'C') {
		/*
		 * Consumer
		 */

		rd_kafka_message_t **rkmessages = NULL;

#if 0 /* Future API */
		/* The offset storage file is optional but its presence
		 * avoids starting all over from offset 0 again when
		 * the program restarts.
		 * ZooKeeper functionality will be implemented in future
		 * versions and then the offset will be stored there instead. */
		conf.consumer.offset_file = "."; /* current directory */

		/* Indicate to rdkafka that the application is responsible
		 * for storing the offset. This allows the application to
		 * successfully handle a message before storing the offset.
		 * If this flag is not set rdkafka will store the offset
		 * just prior to returning the message from rd_kafka_consume().
		 */
		conf.flags |= RD_KAFKA_CONF_F_APP_OFFSET_STORE;
#endif

		/* Create Kafka handle */
		if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
					errstr, sizeof(errstr)))) {
			fprintf(stderr,
				"%% Failed to create Kafka producer: %s\n",
				errstr);
			exit(1);
		}

		if (debug)
			rd_kafka_set_log_level(rk, 7);

		/* Add broker(s) */
		if (rd_kafka_brokers_add(rk, brokers) < 1) {
			fprintf(stderr, "%% No valid brokers specified\n");
			exit(1);
		}

		/* Create topic to consume from */
		rkt = rd_kafka_topic_new(rk, topic, topic_conf);

		/* Batch consumer */
		if (batch_size)
			rkmessages = malloc(sizeof(*rkmessages) * batch_size);

		/* Start consuming */
		if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){
			fprintf(stderr, "%% Failed to start consuming: %s\n",
				strerror(errno));
			exit(1);
		}
		
		cnt.t_start = rd_clock();
		while (run && (msgcnt == -1 || msgcnt > cnt.msgs)) {
			/* Consume messages.
			 * A message may either be a real message, or
			 * an error signaling (if rkmessage->err is set).
			 */
			uint64_t latency;
			int r;

			latency = rd_clock();
			
			if (batch_size) {
				int i;

				/* Batch fetch mode */
				r = rd_kafka_consume_batch(rkt, partition,
							   1000,
							   rkmessages,
							   batch_size);
				if (r != -1) {
					for (i = 0 ; i < r ; i++) {
						msg_consume(rkmessages[i],NULL);
						rd_kafka_message_destroy(
							rkmessages[i]);
					}
				}
			} else {
				/* Callback mode */
				r = rd_kafka_consume_callback(rkt, partition,
							      1000/*timeout*/,
							      msg_consume,
							      NULL);
			}

			cnt.t_latency += rd_clock() - latency;
			
			if (r == -1)
				fprintf(stderr, "%% Error: %s\n",
					strerror(errno));

			print_stats(mode, 0, compression);

			/* Poll to handle stats callbacks */
			rd_kafka_poll(rk, 0);
		}
		cnt.t_end = rd_clock();

		/* Stop consuming */
		rd_kafka_consume_stop(rkt, partition);

		/* Destroy topic */
		rd_kafka_topic_destroy(rkt);

		if (batch_size)
			free(rkmessages);

		/* Destroy the handle */
		rd_kafka_destroy(rk);

	}

	print_stats(mode, 1, compression);

	if (cnt.t_latency && cnt.msgs)
		printf("%% Average application fetch latency: %"PRIu64"us\n",
		       cnt.t_latency / cnt.msgs);

	/* Let background threads clean up and terminate cleanly. */
	rd_kafka_wait_destroyed(2000);

	return 0;
}
Exemple #6
0
/* transmitter worker */
void *kaf_worker(void *thread_id) {
  char buf[MAX_BUF], *b;
  int rc=-1, nr, len, l, count=0,kr;

  /* kafka connection setup */
  char errstr[512];
  rd_kafka_t *k;
  rd_kafka_topic_t *t;
  rd_kafka_conf_t *conf;
  rd_kafka_topic_conf_t *topic_conf;
  int partition = RD_KAFKA_PARTITION_UA;
  char *key = NULL;
  int keylen = key ? strlen(key) : 0;

  /* set up global options */
  conf = rd_kafka_conf_new();
  rd_kafka_conf_set_error_cb(conf, err_cb);
  //rd_kafka_conf_set_throttle_cb(conf, throttle_cb);
  rd_kafka_conf_set_stats_cb(conf, stats_cb);
  kr = rd_kafka_conf_set(conf, "statistics.interval.ms", "60000", errstr, sizeof(errstr));
  if (kr != RD_KAFKA_CONF_OK) {
    fprintf(stderr,"error: rd_kafka_conf_set: statistics.interval.ms 60000 => %s\n", errstr);
    goto done;
  }
  char **opt=NULL;
  while( (opt=(char **)utarray_next(CF.rdkafka_options,opt))) {
    char *eq = strchr(*opt,'=');
    if (eq == NULL) {
      fprintf(stderr,"error: specify rdkafka params as key=value\n");
      goto done;
    }
    char *k = strdup(*opt), *v;
    k[eq-*opt] = '\0';
    v = &k[eq-*opt + 1];
    if (CF.verbose) fprintf(stderr,"setting %s %s\n", k, v); 
    kr = rd_kafka_conf_set(conf, k, v, errstr, sizeof(errstr));
    if (kr != RD_KAFKA_CONF_OK) {
      fprintf(stderr,"error: rd_kafka_conf_set: %s %s => %s\n", k, v, errstr);
      goto done;
    }
    free(k);
  }

  
  /* set up topic options */
  topic_conf = rd_kafka_topic_conf_new();
  opt=NULL;
  while( (opt=(char **)utarray_next(CF.rdkafka_topic_options,opt))) {
    char *eq = strchr(*opt,'=');
    if (eq == NULL) {
      fprintf(stderr,"error: specify rdkafka topic params as key=value\n");
      goto done;
    }
    char *k = strdup(*opt), *v;
    k[eq-*opt] = '\0';
    v = &k[eq-*opt + 1];
    if (CF.verbose) fprintf(stderr,"setting %s %s\n", k, v); 
    kr = rd_kafka_topic_conf_set(topic_conf, k, v, errstr, sizeof(errstr));
    if (kr != RD_KAFKA_CONF_OK) {
      fprintf(stderr,"error: rd_kafka_conf_set: %s %s => %s\n", k, v, errstr);
      goto done;
    }
    free(k);
  }


  k = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
  if (k == NULL) {
    fprintf(stderr, "rd_kafka_new: %s\n", errstr);
    goto done;
  }

  if (rd_kafka_brokers_add(k, CF.broker) < 1) {
    fprintf(stderr, "invalid broker\n");
    goto done;
  }

  t = rd_kafka_topic_new(k, CF.topic, topic_conf);

  while (CF.shutdown == 0) {
    len = nn_recv(CF.egress_socket_pull, buf, MAX_BUF, 0);
    if (len < 0) {
      fprintf(stderr,"nn_recv: %s\n", nn_strerror(errno));
      goto done;
    }

    rc = rd_kafka_produce(t, partition, RD_KAFKA_MSG_F_COPY, buf, len, 
                     key, keylen, NULL);
    if (rc == -1) {
      fprintf(stderr,"rd_kafka_produce: %s %s\n", 
        rd_kafka_err2str( rd_kafka_errno2err(errno)), 
        ((errno == ENOBUFS) ? "(backpressure)" : ""));
      goto done;
    }

    // cause rdkafka to invoke optional callbacks (msg delivery reports or error)
    if ((++count % 1000) == 0) rd_kafka_poll(k, 0);
    
    if (thread_id == 0) {
      /* only emit these stats from the first worker thread (not thread safe) */
      ts_add(CF.kaf_bytes_ts, CF.now, &len);
      ts_add(CF.kaf_msgs_ts, CF.now, NULL);
    }
  }

  rc = 0;

 done:
  CF.shutdown = 1;
  return NULL;
}
Exemple #7
0
int main_0006_symbols (int argc, char **argv) {

        if (argc < 0 /* always false */) {
                rd_kafka_version();
                rd_kafka_version_str();
		rd_kafka_get_debug_contexts();
		rd_kafka_get_err_descs(NULL, NULL);
                rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR);
		rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR);
                rd_kafka_errno2err(EINVAL);
                rd_kafka_errno();
		rd_kafka_last_error();
                rd_kafka_conf_new();
                rd_kafka_conf_destroy(NULL);
                rd_kafka_conf_dup(NULL);
                rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_conf_set_dr_cb(NULL, NULL);
                rd_kafka_conf_set_dr_msg_cb(NULL, NULL);
                rd_kafka_conf_set_error_cb(NULL, NULL);
                rd_kafka_conf_set_stats_cb(NULL, NULL);
                rd_kafka_conf_set_log_cb(NULL, NULL);
                rd_kafka_conf_set_socket_cb(NULL, NULL);
		rd_kafka_conf_set_rebalance_cb(NULL, NULL);
		rd_kafka_conf_set_offset_commit_cb(NULL, NULL);
		rd_kafka_conf_set_throttle_cb(NULL, NULL);
		rd_kafka_conf_set_default_topic_conf(NULL, NULL);
		rd_kafka_conf_get(NULL, NULL, NULL, NULL);
#ifndef _MSC_VER
		rd_kafka_conf_set_open_cb(NULL, NULL);
#endif
		rd_kafka_conf_set_opaque(NULL, NULL);
                rd_kafka_opaque(NULL);
                rd_kafka_conf_dump(NULL, NULL);
                rd_kafka_topic_conf_dump(NULL, NULL);
                rd_kafka_conf_dump_free(NULL, 0);
                rd_kafka_conf_properties_show(NULL);
                rd_kafka_topic_conf_new();
                rd_kafka_topic_conf_dup(NULL);
                rd_kafka_topic_conf_destroy(NULL);
                rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_topic_conf_set_opaque(NULL, NULL);
		rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL);
                rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL);
                rd_kafka_topic_partition_available(NULL, 0);
		rd_kafka_topic_opaque(NULL);
                rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_new(0, NULL, NULL, 0);
                rd_kafka_destroy(NULL);
                rd_kafka_name(NULL);
		rd_kafka_memberid(NULL);
                rd_kafka_topic_new(NULL, NULL, NULL);
                rd_kafka_topic_destroy(NULL);
                rd_kafka_topic_name(NULL);
                rd_kafka_message_destroy(NULL);
                rd_kafka_message_errstr(NULL);
		rd_kafka_message_timestamp(NULL, NULL);
                rd_kafka_consume_start(NULL, 0, 0);
                rd_kafka_consume_stop(NULL, 0);
                rd_kafka_consume(NULL, 0, 0);
                rd_kafka_consume_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL);
                rd_kafka_offset_store(NULL, 0, 0);
                rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL);
                rd_kafka_produce_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_poll(NULL, 0);
                rd_kafka_brokers_add(NULL, NULL);
                /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */
                rd_kafka_set_log_level(NULL, 0);
                rd_kafka_log_print(NULL, 0, NULL, NULL);
#ifndef _MSC_VER
                rd_kafka_log_syslog(NULL, 0, NULL, NULL);
#endif
                rd_kafka_outq_len(NULL);
                rd_kafka_dump(NULL, NULL);
                rd_kafka_thread_cnt();
                rd_kafka_wait_destroyed(0);
                rd_kafka_metadata(NULL, 0, NULL, NULL, 0);
                rd_kafka_metadata_destroy(NULL);
                rd_kafka_queue_destroy(NULL);
                rd_kafka_consume_start_queue(NULL, 0, 0, NULL);
                rd_kafka_consume_queue(NULL, 0);
                rd_kafka_consume_batch_queue(NULL, 0, NULL, 0);
                rd_kafka_consume_callback_queue(NULL, 0, NULL, NULL);
                rd_kafka_seek(NULL, 0, 0, 0);
                rd_kafka_yield(NULL);
                rd_kafka_mem_free(NULL, NULL);
                rd_kafka_list_groups(NULL, NULL, NULL, 0);
                rd_kafka_group_list_destroy(NULL);

		/* KafkaConsumer API */
		rd_kafka_subscribe(NULL, NULL);
		rd_kafka_unsubscribe(NULL);
		rd_kafka_subscription(NULL, NULL);
		rd_kafka_consumer_poll(NULL, 0);
		rd_kafka_consumer_close(NULL);
		rd_kafka_assign(NULL, NULL);
		rd_kafka_assignment(NULL, NULL);
		rd_kafka_commit(NULL, NULL, 0);
		rd_kafka_commit_message(NULL, NULL, 0);
                rd_kafka_committed(NULL, NULL, 0);
		rd_kafka_position(NULL, NULL);

		/* TopicPartition */
		rd_kafka_topic_partition_list_new(0);
		rd_kafka_topic_partition_list_destroy(NULL);
		rd_kafka_topic_partition_list_add(NULL, NULL, 0);
		rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0);
		rd_kafka_topic_partition_list_del(NULL, NULL, 0);
		rd_kafka_topic_partition_list_del_by_idx(NULL, 0);
		rd_kafka_topic_partition_list_copy(NULL);
		rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0);
		rd_kafka_topic_partition_list_find(NULL, NULL, 0);
		rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0);
		rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL);
        }


	return 0;
}