예제 #1
0
파일: kafka.c 프로젝트: dwieland/phpkafka
static
void offset_queue_consume(rd_kafka_message_t *message, void *opaque)
{
    struct consume_cb_params *params = opaque;
    if (params->eop == 0)
        return;
    if (message->err)
    {
        params->error_count += 1;
        if (params->auto_commit == 0)
            rd_kafka_offset_store(
                message->rkt,
                message->partition,
                message->offset == 0 ? 0 : message->offset -1
            );
        if (message->err == RD_KAFKA_RESP_ERR__PARTITION_EOF)
        {
            if (params->partition_offset[message->partition] == -2)
            {//no previous message read from this partition
             //set offset value to last possible value (-1 or last existing)
             //reduce eop count
                params->eop -= 1;
                params->read_count += 1;
                params->partition_offset[message->partition] = message->offset -1;
            }
            if (log_level)
            {
                openlog("phpkafka", 0, LOG_USER);
                syslog(LOG_INFO,
                    "phpkafka - %% Consumer reached end of %s [%"PRId32"] "
                    "message queue at offset %"PRId64"\n",
                    rd_kafka_topic_name(message->rkt),
                    message->partition, message->offset);
            }
        }
        return;
    }
    if (params->partition_offset[message->partition] == -1)
        params->eop -= 1;
    //we have an offset, save it
    params->partition_offset[message->partition] = message->offset;
    //tally read_count
    params->read_count += 1;
    if (params->auto_commit == 0)
        rd_kafka_offset_store(
            message->rkt,
            message->partition,
            message->offset == 0 ? 0 : message->offset -1
        );
}
예제 #2
0
int main (int argc, char **argv) {

        if (argc < 0 /* always false */) {
                rd_kafka_version();
                rd_kafka_version_str();
                rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR);
                rd_kafka_errno2err(EINVAL);
                rd_kafka_conf_new();
                rd_kafka_conf_destroy(NULL);
                rd_kafka_conf_dup(NULL);
                rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_conf_set_dr_cb(NULL, NULL);
                rd_kafka_conf_set_error_cb(NULL, NULL);
                rd_kafka_conf_set_stats_cb(NULL, NULL);
                rd_kafka_conf_set_opaque(NULL, NULL);
                rd_kafka_conf_dump(NULL, NULL);
                rd_kafka_topic_conf_dump(NULL, NULL);
                rd_kafka_conf_dump_free(NULL, 0);
                rd_kafka_conf_properties_show(NULL);
                rd_kafka_topic_conf_new();
                rd_kafka_topic_conf_dup(NULL);
                rd_kafka_topic_conf_destroy(NULL);
                rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_topic_conf_set_opaque(NULL, NULL);
                rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL);
                rd_kafka_topic_partition_available(NULL, 0);
                rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_new(0, NULL, NULL, 0);
                rd_kafka_destroy(NULL);
                rd_kafka_name(NULL);
                rd_kafka_topic_new(NULL, NULL, NULL);
                rd_kafka_topic_destroy(NULL);
                rd_kafka_topic_name(NULL);
                rd_kafka_message_destroy(NULL);
                rd_kafka_message_errstr(NULL);
                rd_kafka_consume_start(NULL, 0, 0);
                rd_kafka_consume_stop(NULL, 0);
                rd_kafka_consume(NULL, 0, 0);
                rd_kafka_consume_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL);
                rd_kafka_offset_store(NULL, 0, 0);
                rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL);
                rd_kafka_poll(NULL, 0);
                rd_kafka_brokers_add(NULL, NULL);
                rd_kafka_set_logger(NULL, NULL);
                rd_kafka_set_log_level(NULL, 0);
                rd_kafka_log_print(NULL, 0, NULL, NULL);
                rd_kafka_log_syslog(NULL, 0, NULL, NULL);
                rd_kafka_outq_len(NULL);
                rd_kafka_dump(NULL, NULL);
                rd_kafka_thread_cnt();
                rd_kafka_wait_destroyed(0);
        }


	return 0;
}
예제 #3
0
static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) {

	if (rkmessage->err) {
		if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			printf("%% Consumer reached end of %s [%"PRId32"] "
			       "message queue at offset %"PRId64"\n",
			       rd_kafka_topic_name(rkmessage->rkt),
			       rkmessage->partition, rkmessage->offset);

			if (exit_eof)
				run = 0;

			return;
		}

		printf("%% Consume error for topic \"%s\" [%"PRId32"] "
		       "offset %"PRId64": %s\n",
		       rd_kafka_topic_name(rkmessage->rkt),
		       rkmessage->partition,
		       rkmessage->offset,
		       rd_kafka_message_errstr(rkmessage));

		msgs_failed++;
		return;
	}

	cnt.msgs++;
	cnt.bytes += rkmessage->len;

	if (!(cnt.msgs % 1000000))
		printf("@%"PRId64": %.*s\n",
		       rkmessage->offset,
		       (int)rkmessage->len, (char *)rkmessage->payload);
		
#if 0 /* Future API */
	/* We store offset when we're done processing
	 * the current message. */
	rd_kafka_offset_store(rkmessage->rkt, rkmessage->partition,
			      rd_kafka_offset_next(rkmessage));
#endif

}
예제 #4
0
파일: kafka.c 프로젝트: dwieland/phpkafka
static
void queue_consume(rd_kafka_message_t *message, void *opaque)
{
    struct consume_cb_params *params = opaque;
    zval *return_value = params->return_value;
    //all partitions EOF
    if (params->eop < 1)
        return;
    //nothing more to read...
    if (params->read_count == 0)
        return;
    if (message->err)
    {
        params->error_count += 1;
        //if auto-commit is disabled:
        if (params->auto_commit == 0)
            //store offset
            rd_kafka_offset_store(
                message->rkt,
                message->partition,
                message->offset == 0 ? 0 : message->offset -1
            );
        if (message->err == RD_KAFKA_RESP_ERR__PARTITION_EOF)
        {
            if (params->partition_ends[message->partition] == 0)
            {
                params->eop -= 1;
                params->partition_ends[message->partition] = 1;
            }
            if (log_level)
            {
                openlog("phpkafka", 0, LOG_USER);
                syslog(LOG_INFO,
                    "phpkafka - %% Consumer reached end of %s [%"PRId32"] "
                    "message queue at offset %"PRId64"\n",
                    rd_kafka_topic_name(message->rkt),
                    message->partition, message->offset);
            }
            return;
        }
        //add_next_index_string(return_value, rd_kafka_message_errstr(message), 1);
        if (log_level)
        {
            openlog("phpkafka", 0, LOG_USER);
            syslog(LOG_INFO, "phpkafka - %% Consume error for topic \"%s\" [%"PRId32"] "
                "offset %"PRId64": %s\n",
                rd_kafka_topic_name(message->rkt),
                message->partition,
                message->offset,
                rd_kafka_message_errstr(message)
            );
        }
        return;
    }
    //only count successful reads!
    //-1 means read all from offset until end
    if (params->read_count != -1)
        params->read_count -= 1;
    //add message to return value (perhaps add as array -> offset + msg?
    if (message->len > 0) {
        //ensure there is a payload
        char payload[(int) message->len];
        sprintf(payload, "%.*s", (int) message->len, (char *) message->payload);
        //add_index_string(return_value, (int) message->offset, payload, 1);
        add_next_index_string(return_value, payload, 1);
    } else {
        add_next_index_string(return_value, "", 1);
    }

    //store offset if autocommit is disabled
    if (params->auto_commit == 0)
        rd_kafka_offset_store(
            message->rkt,
            message->partition,
            message->offset
        );
}
예제 #5
0
int main (int argc, char **argv) {
	rd_kafka_t *rk;
	char *broker = NULL;
	char mode = 'C';
	char *topic = NULL;
	int partition = 0;
	int opt;
	int msgsize = 1024;
	int msgcnt = -1;
	int sendflags = 0;
	int dispintvl = 1000;
	struct {
		rd_ts_t  t_start;
		rd_ts_t  t_end;
		rd_ts_t  t_end_send;
		uint64_t msgs;
		uint64_t bytes;
		rd_ts_t  t_latency;
		rd_ts_t  t_last;
		rd_ts_t  t_total;
	} cnt = {};
	rd_ts_t now;
	char *dirstr = "";

	while ((opt = getopt(argc, argv, "PCt:p:b:s:c:fi:D")) != -1) {
		switch (opt) {
		case 'P':
		case 'C':
			mode = opt;
			break;
		case 't':
			topic = optarg;
			break;
		case 'p':
			partition = atoi(optarg);
			break;
		case 'b':
			broker = optarg;
			break;
		case 's':
			msgsize = atoi(optarg);
			break;
		case 'c':
			msgcnt = atoi(optarg);
			break;
		case 'D':
			sendflags |= RD_KAFKA_OP_F_FREE;
			break;
		case 'i':
			dispintvl = atoi(optarg);
			break;
		default:
			goto usage;
		}
	}

	if (!topic || optind != argc) {
	usage:
		fprintf(stderr,
			"Usage: %s [-C|-P] -t <topic> "
			"[-p <partition>] [-b <broker>] [options..]\n"
			"\n"
			" Options:\n"
			"  -C | -P      Consumer or Producer mode\n"
			"  -t <topic>   Topic to fetch / produce\n"
			"  -p <num>     Partition (defaults to 0)\n"
			"  -b <broker>  Broker address (localhost:9092)\n"
			"  -s <size>    Message size (producer)\n"
			"  -c <cnt>     Messages to transmit/receive\n"
			"  -D           Copy/Duplicate data buffer (producer)\n"
			"  -i <ms>      Display interval\n"
			"\n"
			" In Consumer mode:\n"
			"  consumes messages and prints thruput\n"
			" In Producer mode:\n"
			"  writes messages of size -s <..> and prints thruput\n"
			"\n",
			argv[0]);
		exit(1);
	}

	dispintvl *= 1000; /* us */

	signal(SIGINT, stop);

	/* Socket hangups are gracefully handled in librdkafka on socket error
	 * without the use of signals, so SIGPIPE should be ignored by the
	 * calling program. */
	signal(SIGPIPE, SIG_IGN);

	if (mode == 'P') {
		/*
		 * Producer
		 */
		char *sbuf = malloc(msgsize);
		int endwait;
		int outq;
		int i;

		memset(sbuf, 'R', msgsize);

		if (msgcnt == -1)
			printf("%% Sending messages of size %i bytes\n",
			       msgsize);
		else
			printf("%% Sending %i messages of size %i bytes\n",
			       msgcnt ,msgsize);

		/* Create Kafka handle */
		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, broker, NULL))) {
			perror("kafka_new producer");
			exit(1);
		}

		cnt.t_start = rd_clock();

		while (run && (msgcnt == -1 || cnt.msgs < msgcnt)) {
			char *pbuf = sbuf;
			/* Send/Produce message. */

			if (sendflags & RD_KAFKA_OP_F_FREE) {
				/* Duplicate memory */
				pbuf = malloc(msgsize);
				memcpy(pbuf, sbuf, msgsize);
			}

			rd_kafka_produce(rk, topic, partition,
					 sendflags, pbuf, msgsize);
			cnt.msgs++;
			cnt.bytes += msgsize;
			
			now = rd_clock();
			if (cnt.t_last + dispintvl <= now) {
				printf("%% %"PRIu64" messages and %"PRIu64 
				       "bytes: %"PRIu64" msgs/s and "
				       "%.2f Mb/s\n",
				       cnt.msgs, cnt.bytes,
				       (cnt.msgs / (now - cnt.t_start)) *
				       1000000,
				       (float)(cnt.bytes /
					       (now - cnt.t_start)));
				cnt.t_last = now;
			}

		}



		/* Wait for messaging to finish. */
		i = 0;
		while (run && rd_kafka_outq_len(rk) > 0) {
			if (!(i++ % (dispintvl/1000)))
				printf("%% Waiting for %i messages in outq "
				       "to be sent. Abort with Ctrl-c\n",
				       rd_kafka_outq_len(rk));
			usleep(1000);
		}

		cnt.t_end_send = rd_clock();

		outq = rd_kafka_outq_len(rk);
		cnt.msgs -= outq;
		cnt.bytes -= msgsize * outq;

		cnt.t_end = rd_clock();

		/* Since there is no ack for produce messages in 0.7 
		 * we wait some more for any packets in the socket buffers
		 * to be sent.
		 * This is fixed in protocol version 0.8 */
		endwait = cnt.msgs * 10;
		printf("%% Test timers stopped, but waiting %ims more "
		       "for the %"PRIu64 " messages to be transmitted from "
		       "socket buffers.\n"
		       "%% End with Ctrl-c\n",
		       endwait / 1000,
		       cnt.msgs);
		run = 1;
		while (run && endwait > 0) {
			usleep(10000);
			endwait -= 10000;
		}

		/* Destroy the handle */
		rd_kafka_destroy(rk);

		dirstr = "sent";

	} else if (mode == 'C') {
		/*
		 * Consumer
		 */
		rd_kafka_op_t *rko;
		/* Base our configuration on the default config. */
		rd_kafka_conf_t conf = rd_kafka_defaultconf;

		/* The offset storage file is optional but its presence
		 * avoids starting all over from offset 0 again when
		 * the program restarts.
		 * ZooKeeper functionality will be implemented in future
		 * versions and then the offset will be stored there instead. */
		conf.consumer.offset_file = "."; /* current directory */

		/* Indicate to rdkafka that the application is responsible
		 * for storing the offset. This allows the application to
		 * succesfully handle a message before storing the offset.
		 * If this flag is not set rdkafka will store the offset
		 * just prior to returning the message from rd_kafka_consume().
		 */
		conf.flags |= RD_KAFKA_CONF_F_APP_OFFSET_STORE;


		/* Tell rdkafka to (try to) maintain 10000 messages
		 * in its internal receive buffers. This is to avoid
		 * application -> rdkafka -> broker  per-message ping-pong
		 * latency. */
		conf.consumer.replyq_low_thres = 100000;

		/* Use the consumer convenience function
		 * to create a Kafka handle. */
		if (!(rk = rd_kafka_new_consumer(broker, topic,
						 (uint32_t)partition,
						 0, &conf))) {
			perror("kafka_new_consumer");
			exit(1);
		}
		
		cnt.t_start = rd_clock();
		while (run && (msgcnt == -1 || msgcnt > cnt.msgs)) {
			/* Fetch an "op" which is one of:
			 *  - a kafka message (if rko_len>0 && rko_err==0)
			 *  - an error (if rko_err)
			 */
			uint64_t latency;

			latency = rd_clock();
			if (!(rko = rd_kafka_consume(rk, 1000/*timeout ms*/)))
				continue;
			cnt.t_latency += rd_clock() - latency;
			
			if (rko->rko_err)
				fprintf(stderr, "%% Error: %.*s\n",
					rko->rko_len, rko->rko_payload);
			else if (rko->rko_len) {
				cnt.msgs++;
				cnt.bytes += rko->rko_len;
			}

			/* rko_offset contains the offset of the _next_
			 * message. We store it when we're done processing
			 * the current message. */
			if (rko->rko_offset)
				rd_kafka_offset_store(rk, rko->rko_offset);

			/* Destroy the op */
			rd_kafka_op_destroy(rk, rko);

			now = rd_clock();
			if (cnt.t_last + dispintvl <= now &&
				cnt.t_start + 1000000 < now) {
				printf("%% %"PRIu64" messages and %"PRIu64 
				       " bytes: %"PRIu64" msgs/s and "
				       "%.2f Mb/s\n",
				       cnt.msgs, cnt.bytes,
				       (cnt.msgs / ((now - cnt.t_start)/1000))
				       * 1000,
				       (float)(cnt.bytes /
					       ((now - cnt.t_start) / 1000)));
				cnt.t_last = now;
			}

		}
		cnt.t_end = rd_clock();

		/* Destroy the handle */
		rd_kafka_destroy(rk);

		dirstr = "received";
	}

	if (cnt.t_end_send)
		cnt.t_total = cnt.t_end_send - cnt.t_start;
	else
		cnt.t_total = cnt.t_end - cnt.t_start;

	printf("%% %"PRIu64" messages and %"PRIu64" bytes "
	       "%s in %"PRIu64"ms: %"PRIu64" msgs/s and %.02f Mb/s\n",
	       cnt.msgs, cnt.bytes,
	       dirstr,
	       cnt.t_total / 1000,
	       (cnt.msgs / (cnt.t_total / 1000)) * 1000,
	       (float)(cnt.bytes / (cnt.t_total / 1000)));

	if (cnt.t_latency)
		printf("%% Average application fetch latency: %"PRIu64"us\n",
		       cnt.t_latency / cnt.msgs);


	return 0;
}
예제 #6
0
static void do_offset_test (const char *what, int auto_commit, int auto_store,
			    int async) {
	test_timing_t t_all;
	char groupid[64];
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	int cnt = 0;
	const int extra_cnt = 5;
	rd_kafka_resp_err_t err;
	rd_kafka_topic_partition_list_t *parts;
	rd_kafka_topic_partition_t *rktpar;
	int64_t next_offset = -1;

	test_conf_init(&conf, &tconf, 20);
	test_conf_set(conf, "enable.auto.commit", auto_commit ? "true":"false");
	test_conf_set(conf, "enable.auto.offset.store", auto_store ?"true":"false");
	test_conf_set(conf, "auto.commit.interval.ms", "500");
	rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
	test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
	test_str_id_generate(groupid, sizeof(groupid));
	test_conf_set(conf, "group.id", groupid);
	rd_kafka_conf_set_default_topic_conf(conf, tconf);

	TEST_SAY(_C_MAG "[ do_offset_test: %s with group.id %s ]\n",
		 what, groupid);

	TIMING_START(&t_all, what);

	expected_offset  = 0;
	committed_offset = -1;

	/* MO:
	 *  - Create consumer.
	 *  - Start consuming from beginning
	 *  - Perform store & commits according to settings
	 *  - Stop storing&committing when half of the messages are consumed,
	 *  - but consume 5 more to check against.
	 *  - Query position.
	 *  - Destroy consumer.
	 *  - Create new consumer with same group.id using stored offsets
	 *  - Should consume the expected message.
	 */

	/* Create kafka instance */
	rk = test_create_handle(RD_KAFKA_CONSUMER, rd_kafka_conf_dup(conf));

	rd_kafka_poll_set_consumer(rk);

	test_consumer_subscribe(rk, topic);

	while (cnt - extra_cnt < msgcnt / 2) {
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, 10*1000);
		if (!rkm)
			continue;

		if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
			TEST_FAIL("%s: Timed out waiting for message %d", what,cnt);
		else if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			rd_kafka_message_destroy(rkm);
			continue;
		} else if (rkm->err)
			TEST_FAIL("%s: Consumer error: %s",
				  what, rd_kafka_message_errstr(rkm));

		/* Offset of next message. */
		next_offset = rkm->offset + 1;

		if (cnt < msgcnt / 2) {
			if (!auto_store) {
				err = rd_kafka_offset_store(rkm->rkt,rkm->partition,
							    rkm->offset);
				if (err)
					TEST_FAIL("%s: offset_store failed: %s\n",
						  what, rd_kafka_err2str(err));
			}
			expected_offset = rkm->offset+1;
			if (!auto_commit) {
				test_timing_t t_commit;
				TIMING_START(&t_commit,
					     async?"commit.async":"commit.sync");
				err = rd_kafka_commit_message(rk, rkm, async);
				TIMING_STOP(&t_commit);
				if (err)
					TEST_FAIL("%s: commit failed: %s\n",
						  what, rd_kafka_err2str(err));
			}

		} else if (auto_store && auto_commit)
			expected_offset = rkm->offset+1;

		rd_kafka_message_destroy(rkm);
		cnt++;
	}

	TEST_SAY("%s: done consuming after %d messages, at offset %"PRId64"\n",
		 what, cnt, expected_offset);

	if ((err = rd_kafka_assignment(rk, &parts)))
		TEST_FAIL("%s: failed to get assignment(): %s\n",
			  what, rd_kafka_err2str(err));

	/* Verify position */
	if ((err = rd_kafka_position(rk, parts)))
		TEST_FAIL("%s: failed to get position(): %s\n",
			  what, rd_kafka_err2str(err));
	if (!(rktpar = rd_kafka_topic_partition_list_find(parts,
							  topic, partition)))
		TEST_FAIL("%s: position(): topic lost\n", what);
	if (rktpar->offset != next_offset)
		TEST_FAIL("%s: Expected position() offset %"PRId64", got %"PRId64,
			  what, next_offset, rktpar->offset);
	TEST_SAY("%s: Position is at %"PRId64", good!\n",
		 what, rktpar->offset);

	/* Pause messages while waiting so we can serve callbacks
	 * without having more messages received. */
	if ((err = rd_kafka_pause_partitions(rk, parts)))
		TEST_FAIL("%s: failed to pause partitions: %s\n",
			  what, rd_kafka_err2str(err));
	rd_kafka_topic_partition_list_destroy(parts);

	/* Fire off any enqueued offset_commit_cb */
	test_consumer_poll_no_msgs(what, rk, testid, 0);

	TEST_SAY("%s: committed_offset %"PRId64", expected_offset %"PRId64"\n",
		 what, committed_offset, expected_offset);

	if (!auto_commit && !async) {
		/* Sync commits should be up to date at this point. */
		if (committed_offset != expected_offset)
			TEST_FAIL("%s: Sync commit: committed offset %"PRId64
				  " should be same as expected offset "
				  "%"PRId64,
				  what, committed_offset, expected_offset);
	} else {

		/* Wait for offset commits to catch up */
		while (committed_offset < expected_offset) {
			TEST_SAYL(3, "%s: Wait for committed offset %"PRId64
				  " to reach expected offset %"PRId64"\n",
				  what, committed_offset, expected_offset);
			test_consumer_poll_no_msgs(what, rk, testid, 1000);
		}

	}

	TEST_SAY("%s: phase 1 complete, %d messages consumed, "
		 "next expected offset is %"PRId64"\n",
		 what, cnt, expected_offset);

        /* Issue #827: cause committed() to return prematurely by specifying
         *             low timeout. The bug (use after free) will only
         *             be catched by valgrind. */
        do {
                parts = rd_kafka_topic_partition_list_new(1);
                rd_kafka_topic_partition_list_add(parts, topic, partition);
                err = rd_kafka_committed(rk, parts, 1);
                rd_kafka_topic_partition_list_destroy(parts);
                TEST_SAY("Issue #827: committed() returned %s\n",
                         rd_kafka_err2str(err));
        } while (err != RD_KAFKA_RESP_ERR__TIMED_OUT);

	/* Query position */
	parts = rd_kafka_topic_partition_list_new(1);
	rd_kafka_topic_partition_list_add(parts, topic, partition);

	err = rd_kafka_committed(rk, parts, tmout_multip(5*1000));
	if (err)
		TEST_FAIL("%s: committed() failed: %s", what, rd_kafka_err2str(err));
	if (!(rktpar = rd_kafka_topic_partition_list_find(parts,
							  topic, partition)))
		TEST_FAIL("%s: committed(): topic lost\n", what);
	if (rktpar->offset != expected_offset)
		TEST_FAIL("%s: Expected committed() offset %"PRId64", got %"PRId64,
			  what, expected_offset, rktpar->offset);
	TEST_SAY("%s: Committed offset is at %"PRId64", good!\n",
		 what, rktpar->offset);

	rd_kafka_topic_partition_list_destroy(parts);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);



	/* Fire up a new consumer and continue from where we left off. */
	TEST_SAY("%s: phase 2: starting new consumer to resume consumption\n",what);
	rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
	rd_kafka_poll_set_consumer(rk);

	test_consumer_subscribe(rk, topic);

	while (cnt < msgcnt) {
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, 10*1000);
		if (!rkm)
			continue;

		if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
			TEST_FAIL("%s: Timed out waiting for message %d", what,cnt);
		else if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
			rd_kafka_message_destroy(rkm);
			continue;
		} else if (rkm->err)
			TEST_FAIL("%s: Consumer error: %s",
				  what, rd_kafka_message_errstr(rkm));

		if (rkm->offset != expected_offset)
			TEST_FAIL("%s: Received message offset %"PRId64
				  ", expected %"PRId64" at msgcnt %d/%d\n",
				  what, rkm->offset, expected_offset,
				  cnt, msgcnt);

		rd_kafka_message_destroy(rkm);
		expected_offset++;
		cnt++;
	}


	TEST_SAY("%s: phase 2: complete\n", what);
	test_consumer_close(rk);
	rd_kafka_destroy(rk);
	

	TIMING_STOP(&t_all);
}
예제 #7
0
int main_0006_symbols (int argc, char **argv) {

        if (argc < 0 /* always false */) {
                rd_kafka_version();
                rd_kafka_version_str();
		rd_kafka_get_debug_contexts();
		rd_kafka_get_err_descs(NULL, NULL);
                rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR);
		rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR);
                rd_kafka_errno2err(EINVAL);
                rd_kafka_errno();
		rd_kafka_last_error();
                rd_kafka_conf_new();
                rd_kafka_conf_destroy(NULL);
                rd_kafka_conf_dup(NULL);
                rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_conf_set_dr_cb(NULL, NULL);
                rd_kafka_conf_set_dr_msg_cb(NULL, NULL);
                rd_kafka_conf_set_error_cb(NULL, NULL);
                rd_kafka_conf_set_stats_cb(NULL, NULL);
                rd_kafka_conf_set_log_cb(NULL, NULL);
                rd_kafka_conf_set_socket_cb(NULL, NULL);
		rd_kafka_conf_set_rebalance_cb(NULL, NULL);
		rd_kafka_conf_set_offset_commit_cb(NULL, NULL);
		rd_kafka_conf_set_throttle_cb(NULL, NULL);
		rd_kafka_conf_set_default_topic_conf(NULL, NULL);
		rd_kafka_conf_get(NULL, NULL, NULL, NULL);
#ifndef _MSC_VER
		rd_kafka_conf_set_open_cb(NULL, NULL);
#endif
		rd_kafka_conf_set_opaque(NULL, NULL);
                rd_kafka_opaque(NULL);
                rd_kafka_conf_dump(NULL, NULL);
                rd_kafka_topic_conf_dump(NULL, NULL);
                rd_kafka_conf_dump_free(NULL, 0);
                rd_kafka_conf_properties_show(NULL);
                rd_kafka_topic_conf_new();
                rd_kafka_topic_conf_dup(NULL);
                rd_kafka_topic_conf_destroy(NULL);
                rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0);
                rd_kafka_topic_conf_set_opaque(NULL, NULL);
		rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL);
                rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL);
                rd_kafka_topic_partition_available(NULL, 0);
		rd_kafka_topic_opaque(NULL);
                rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, NULL, NULL);
                rd_kafka_new(0, NULL, NULL, 0);
                rd_kafka_destroy(NULL);
                rd_kafka_name(NULL);
		rd_kafka_memberid(NULL);
                rd_kafka_topic_new(NULL, NULL, NULL);
                rd_kafka_topic_destroy(NULL);
                rd_kafka_topic_name(NULL);
                rd_kafka_message_destroy(NULL);
                rd_kafka_message_errstr(NULL);
		rd_kafka_message_timestamp(NULL, NULL);
                rd_kafka_consume_start(NULL, 0, 0);
                rd_kafka_consume_stop(NULL, 0);
                rd_kafka_consume(NULL, 0, 0);
                rd_kafka_consume_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL);
                rd_kafka_offset_store(NULL, 0, 0);
                rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL);
                rd_kafka_produce_batch(NULL, 0, 0, NULL, 0);
                rd_kafka_poll(NULL, 0);
                rd_kafka_brokers_add(NULL, NULL);
                /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */
                rd_kafka_set_log_level(NULL, 0);
                rd_kafka_log_print(NULL, 0, NULL, NULL);
#ifndef _MSC_VER
                rd_kafka_log_syslog(NULL, 0, NULL, NULL);
#endif
                rd_kafka_outq_len(NULL);
                rd_kafka_dump(NULL, NULL);
                rd_kafka_thread_cnt();
                rd_kafka_wait_destroyed(0);
                rd_kafka_metadata(NULL, 0, NULL, NULL, 0);
                rd_kafka_metadata_destroy(NULL);
                rd_kafka_queue_destroy(NULL);
                rd_kafka_consume_start_queue(NULL, 0, 0, NULL);
                rd_kafka_consume_queue(NULL, 0);
                rd_kafka_consume_batch_queue(NULL, 0, NULL, 0);
                rd_kafka_consume_callback_queue(NULL, 0, NULL, NULL);
                rd_kafka_seek(NULL, 0, 0, 0);
                rd_kafka_yield(NULL);
                rd_kafka_mem_free(NULL, NULL);
                rd_kafka_list_groups(NULL, NULL, NULL, 0);
                rd_kafka_group_list_destroy(NULL);

		/* KafkaConsumer API */
		rd_kafka_subscribe(NULL, NULL);
		rd_kafka_unsubscribe(NULL);
		rd_kafka_subscription(NULL, NULL);
		rd_kafka_consumer_poll(NULL, 0);
		rd_kafka_consumer_close(NULL);
		rd_kafka_assign(NULL, NULL);
		rd_kafka_assignment(NULL, NULL);
		rd_kafka_commit(NULL, NULL, 0);
		rd_kafka_commit_message(NULL, NULL, 0);
                rd_kafka_committed(NULL, NULL, 0);
		rd_kafka_position(NULL, NULL);

		/* TopicPartition */
		rd_kafka_topic_partition_list_new(0);
		rd_kafka_topic_partition_list_destroy(NULL);
		rd_kafka_topic_partition_list_add(NULL, NULL, 0);
		rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0);
		rd_kafka_topic_partition_list_del(NULL, NULL, 0);
		rd_kafka_topic_partition_list_del_by_idx(NULL, 0);
		rd_kafka_topic_partition_list_copy(NULL);
		rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0);
		rd_kafka_topic_partition_list_find(NULL, NULL, 0);
		rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0);
		rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL);
        }


	return 0;
}
예제 #8
0
int main (int argc, char **argv) {
	rd_kafka_t *rk;
	char *broker = NULL;
	char mode = 'C';
	char *topic = NULL;
	int partition = 0;
	int opt;


	while ((opt = getopt(argc, argv, "PCt:p:b:")) != -1) {
		switch (opt) {
		case 'P':
		case 'C':
			mode = opt;
			break;
		case 't':
			topic = optarg;
			break;
		case 'p':
			partition = atoi(optarg);
			break;
		case 'b':
			broker = optarg;
			break;
		default:
			goto usage;
		}
	}

	if (!topic || optind != argc) {
	usage:
		fprintf(stderr,
			"Usage: %s [-C|-P] -t <topic> "
			"[-p <partition>] [-b <broker>]\n"
			"\n"
			" Options:\n"
			"  -C | -P      Consumer or Producer mode\n"
			"  -t <topic>   Topic to fetch / produce\n"
			"  -p <num>     Partition (defaults to 0)\n"
			"  -b <broker>  Broker address (localhost:9092)\n"
			"\n"
			" In Consumer mode:\n"
			"  writes fetched messages to stdout\n"
			" In Producer mode:\n"
			"  reads messages from stdin and sends to broker\n"
			"\n",
			argv[0]);
		exit(1);
	}


	signal(SIGINT, stop);

	if (mode == 'P') {
		/*
		 * Producer
		 */
		char buf[2048];
		int sendcnt = 0;

		/* Create Kafka handle */
		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, broker, NULL))) {
			perror("kafka_new producer");
			exit(1);
		}

		fprintf(stderr, "%% Type stuff and hit enter to send\n");
		while (run && (fgets(buf, sizeof(buf), stdin))) {
			int len = strlen(buf);
			char *opbuf = malloc(len + 1);
			strncpy(opbuf, buf, len + 1);

			/* Send/Produce message. */
			rd_kafka_produce(rk, topic, partition, RD_KAFKA_OP_F_FREE, opbuf, len);
			fprintf(stderr, "%% Sent %i bytes to topic "
				"%s partition %i\n", len, topic, partition);
			sendcnt++;
		}

		/* Wait for messaging to finish. */
		while (rd_kafka_outq_len(rk) > 0)
			usleep(50000);

		/* Since there is no ack for produce messages in 0.7 
		 * we wait some more for any packets to be sent.
		 * This is fixed in protocol version 0.8 */
		if (sendcnt > 0)
			usleep(500000);

		/* Destroy the handle */
		rd_kafka_destroy(rk);

	} else if (mode == 'C') {
		/*
		 * Consumer
		 */
		rd_kafka_op_t *rko;
		/* Base our configuration on the default config. */
		rd_kafka_conf_t conf = rd_kafka_defaultconf;


		/* The offset storage file is optional but its presence
		 * avoids starting all over from offset 0 again when
		 * the program restarts.
		 * ZooKeeper functionality will be implemented in future
		 * versions and then the offset will be stored there instead. */
		conf.consumer.offset_file = "."; /* current directory */

		/* Indicate to rdkafka that the application is responsible
		 * for storing the offset. This allows the application to
		 * succesfully handle a message before storing the offset.
		 * If this flag is not set rdkafka will store the offset
		 * just prior to returning the message from rd_kafka_consume().
		 */
		conf.flags |= RD_KAFKA_CONF_F_APP_OFFSET_STORE;



		/* Use the consumer convenience function
		 * to create a Kafka handle. */
		if (!(rk = rd_kafka_new_consumer(broker, topic,
						 (uint32_t)partition,
						 0, &conf))) {
			perror("kafka_new_consumer");
			exit(1);
		}

		while (run) {
			/* Fetch an "op" which is one of:
			 *  - a kafka message (if rko_len>0 && rko_err==0)
			 *  - an error (if rko_err)
			 */
			if (!(rko = rd_kafka_consume(rk, 1000/*timeout ms*/)))
				continue;
			
			if (rko->rko_err)
				fprintf(stderr, "%% Error: %.*s\n",
					rko->rko_len, rko->rko_payload);
			else if (rko->rko_len) {
				fprintf(stderr, "%% Message with "
					"next-offset %"PRIu64" is %i bytes\n",
					rko->rko_offset, rko->rko_len);
				hexdump(stdout, "Message",
					rko->rko_payload, rko->rko_len);
			}

			/* rko_offset contains the offset of the _next_
			 * message. We store it when we're done processing
			 * the current message. */
			if (rko->rko_offset)
				rd_kafka_offset_store(rk, rko->rko_offset);

			/* Destroy the op */
			rd_kafka_op_destroy(rk, rko);
		}

		/* Destroy the handle */
		rd_kafka_destroy(rk);
	}

	return 0;
}
예제 #9
0
int main (int argc, char **argv) {
	rd_kafka_t *rk;
	char *broker = NULL;
	int mode_p = 0;
	int mode_c = 0;
	char *topic;
	int partition;
	/* Command line argument option definition. */
	rd_opt_t opts[] = {
		{ RD_OPT_BOOL|RD_OPT_MUT1|RD_OPT_REQ, 'P', "produce",
		  0, &mode_p, "Run as producer" },
		{ RD_OPT_BOOL|RD_OPT_MUT1|RD_OPT_REQ, 'C', "consume",
		  0, &mode_c, "Run as consumer" },
		{ RD_OPT_STR|RD_OPT_REQ, 't', "topic", 1, &topic, "Topic" },
		{ RD_OPT_INT|RD_OPT_REQ, 'p', "partition", 1, &partition,
		  "Partition" },
		{ RD_OPT_STR, 'b', "broker", 1, &broker, "Broker host:port" },
		{ RD_OPT_END },
	};
	

	/* Initialize librd */
	rd_init();

	/* Parse command line arguments. */
	if (!rd_opt_get(opts, argc, argv, NULL, NULL))
		exit(1);

	signal(SIGINT, stop);

	if (mode_p) {
		/*
		 * Producer
		 */
		char buf[1024];

		/* Create Kafka handle */
		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, broker, NULL))) {
			perror("kafka_new producer");
			exit(1);
		}

		fprintf(stderr, "%% Type stuff and hit enter to send\n");
		while (run && (fgets(buf, sizeof(buf), stdin))) {
			int len = strlen(buf);
			/* Send/Produce message. */
			rd_kafka_produce(rk, topic, partition, 0, buf, len);
			fprintf(stderr, "%% Sent %i bytes to topic "
				"%s partition %i\n", len, topic, partition);
		}

		/* Destroy the handle */
		rd_kafka_destroy(rk);

	} else {
		/*
		 * Consumer
		 */
		rd_kafka_op_t *rko;
		/* Base our configuration on the default config. */
		rd_kafka_conf_t conf = rd_kafka_defaultconf;


		/* The offset storage file is optional but its presence
		 * avoids starting all over from offset 0 again when
		 * the program restarts.
		 * ZooKeeper functionality will be implemented in future
		 * versions and then the offset will be stored there instead. */
		conf.consumer.offset_file = "."; /* current directory */

		/* Indicate to rdkafka that the application is responsible
		 * for storing the offset. This allows the application to
		 * succesfully handle a message before storing the offset.
		 * If this flag is not set rdkafka will store the offset
		 * just prior to returning the message from rd_kafka_consume().
		 */
		conf.flags |= RD_KAFKA_CONF_F_APP_OFFSET_STORE;



		/* Use the consumer convenience function
		 * to create a Kafka handle. */
		if (!(rk = rd_kafka_new_consumer(broker, topic,
						 (uint32_t)partition,
						 0, &conf))) {
			perror("kafka_new_consumer");
			exit(1);
		}

		while (run) {
			/* Fetch an "op" which is one of:
			 *  - a kafka message (if rko_len>0 && rko_err==0)
			 *  - an error (if rko_err)
			 */
			if (!(rko = rd_kafka_consume(rk, 1000/*timeout ms*/)))
				continue;
			
			if (rko->rko_err)
				fprintf(stderr, "%% Error: %.*s\n",
					rko->rko_len, rko->rko_payload);
			else if (rko->rko_len) {
				fprintf(stderr, "%% Message with "
					"next-offset %"PRIu64" is %i bytes\n",
					rko->rko_offset, rko->rko_len);
				rd_hexdump(stdout, "Message",
					   rko->rko_payload, rko->rko_len);
			}

			/* rko_offset contains the offset of the _next_
			 * message. We store it when we're done processing
			 * the current message. */
			if (rko->rko_offset)
				rd_kafka_offset_store(rk, rko->rko_offset);

			/* Destroy the op */
			rd_kafka_op_destroy(rk, rko);
		}

		/* Destroy the handle */
		rd_kafka_destroy(rk);
	}

	return 0;
}