static PyObject *Producer_flush (Handle *self, PyObject *args,
                                 PyObject *kwargs) {
        double tmout = -1;
        int qlen;
        static char *kws[] = { "timeout", NULL };
#if RD_KAFKA_VERSION >= 0x00090300
        CallState cs;
#endif

        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|d", kws, &tmout))
                return NULL;

#if RD_KAFKA_VERSION >= 0x00090300
        CallState_begin(self, &cs);
        rd_kafka_flush(self->rk, tmout < 0 ? -1 : (int)(tmout * 1000));
        if (!CallState_end(self, &cs))
                return NULL;
        qlen = rd_kafka_outq_len(self->rk);
#else
        while ((qlen = rd_kafka_outq_len(self->rk)) > 0) {
                if (Producer_poll0(self, 500) == -1)
                        return NULL;
        }
#endif
        return cfl_PyInt_FromInt(qlen);
}
Пример #2
0
/**
 * @brief Test handling of implicit acks.
 *
 * @param batch_cnt Total number of batches, ProduceRequests, sent.
 * @param initial_fail_batch_cnt How many of the initial batches should
 *                               fail with an emulated network timeout.
 */
static void do_test_implicit_ack (const char *what,
                                  int batch_cnt, int initial_fail_batch_cnt) {
        rd_kafka_t *rk;
        const char *topic = test_mk_topic_name("0090_idempotence_impl_ack", 1);
        const int32_t partition = 0;
        uint64_t testid;
        int msgcnt = 10*batch_cnt;
        rd_kafka_conf_t *conf;
        rd_kafka_topic_t *rkt;
        test_msgver_t mv;

        TEST_SAY(_C_MAG "[ Test implicit ack: %s ]\n", what);

        rd_atomic32_init(&state.produce_cnt, 0);
        state.batch_cnt = batch_cnt;
        state.initial_fail_batch_cnt = initial_fail_batch_cnt;

        testid = test_id_generate();

        test_conf_init(&conf, NULL, 60);
        rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
        test_conf_set(conf, "enable.idempotence", "true");
        test_conf_set(conf, "batch.num.messages", "10");
        test_conf_set(conf, "linger.ms", "500");
        test_conf_set(conf, "retry.backoff.ms", "2000");

        /* The ProduceResponse handler will inject timed-out-in-flight
         * errors for the first N ProduceRequests, which will trigger retries
         * that in turn will result in OutOfSequence errors. */
        test_conf_set(conf, "ut_handle_ProduceResponse",
                      (char *)handle_ProduceResponse);

        test_create_topic(topic, 1, 1);

        rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
        rkt = test_create_producer_topic(rk, topic, NULL);


        TEST_SAY("Producing %d messages\n", msgcnt);
        test_produce_msgs(rk, rkt, testid, -1, 0, msgcnt, NULL, 0);

        TEST_SAY("Flushing..\n");
        rd_kafka_flush(rk, 10000);

        rd_kafka_topic_destroy(rkt);
        rd_kafka_destroy(rk);

        TEST_SAY("Verifying messages with consumer\n");
        test_msgver_init(&mv, testid);
        test_consume_msgs_easy_mv(NULL, topic, partition,
                                  testid, 1, msgcnt, NULL, &mv);
        test_msgver_verify("verify", &mv, TEST_MSGVER_ALL, 0, msgcnt);
        test_msgver_clear(&mv);

        TEST_SAY(_C_GRN "[ Test implicit ack: %s : PASS ]\n", what);
}
Пример #3
0
static void do_produce (const char *topic, int msgcnt) {
        rd_kafka_t *rk;
        rd_kafka_conf_t *conf;
        int i;
        rd_kafka_resp_err_t err;

        test_conf_init(&conf, NULL, 0);
        test_conf_set(conf, "acks", "all");
        rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);

        rd_kafka_conf_interceptor_add_on_new(conf, __FILE__, on_new, NULL);

        rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

        /* First message is without headers (negative testing) */
        i = 0;
        err = rd_kafka_producev(
                rk,
                RD_KAFKA_V_TOPIC(topic),
                RD_KAFKA_V_PARTITION(0),
                RD_KAFKA_V_VALUE(&i, sizeof(i)),
                RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
                RD_KAFKA_V_END);
        TEST_ASSERT(!err,
                    "producev() failed: %s", rd_kafka_err2str(err));
        exp_msgid++;

        for (i = 1 ; i < msgcnt ; i++, exp_msgid++) {
                err = rd_kafka_producev(
                        rk,
                        RD_KAFKA_V_TOPIC(topic),
                        RD_KAFKA_V_PARTITION(0),
                        RD_KAFKA_V_VALUE(&i, sizeof(i)),
                        RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
                        RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)),
                        RD_KAFKA_V_HEADER("static", "hey", -1),
                        RD_KAFKA_V_HEADER("multi", "multi1", -1),
                        RD_KAFKA_V_HEADER("multi", "multi2", 6),
                        RD_KAFKA_V_HEADER("multi", "multi3", strlen("multi3")),
                        RD_KAFKA_V_HEADER("null", NULL, 0),
                        RD_KAFKA_V_HEADER("empty", "", 0),
                        RD_KAFKA_V_END);
                TEST_ASSERT(!err,
                            "producev() failed: %s", rd_kafka_err2str(err));
        }

        /* Reset expected message id for dr */
        exp_msgid = 0;

        /* Wait for timeouts and delivery reports */
        rd_kafka_flush(rk, tmout_multip(5000));

        rd_kafka_destroy(rk);
}
Пример #4
0
int main_0001_multiobj (int argc, char **argv) {
	int partition = RD_KAFKA_PARTITION_UA; /* random */
	int i;
	const int NUM_ITER = 10;
        const char *topic = NULL;

	TEST_SAY("Creating and destroying %i kafka instances\n", NUM_ITER);

	/* Create, use and destroy NUM_ITER kafka instances. */
	for (i = 0 ; i < NUM_ITER ; i++) {
		rd_kafka_t *rk;
		rd_kafka_topic_t *rkt;
		rd_kafka_conf_t *conf;
		rd_kafka_topic_conf_t *topic_conf;
		char msg[128];
                test_timing_t t_destroy;

		test_conf_init(&conf, &topic_conf, 30);

                if (!topic)
                        topic = test_mk_topic_name("0001", 0);

		rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
		if (!rkt)
			TEST_FAIL("Failed to create topic for "
				  "rdkafka instance #%i: %s\n",
				  i, rd_kafka_err2str(rd_kafka_errno2err(errno)));

		rd_snprintf(msg, sizeof(msg), "%s test message for iteration #%i",
			 argv[0], i);

		/* Produce a message */
		rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY,
				 msg, strlen(msg), NULL, 0, NULL);
		
		/* Wait for it to be sent (and possibly acked) */
		rd_kafka_flush(rk, -1);

		/* Destroy topic */
		rd_kafka_topic_destroy(rkt);

		/* Destroy rdkafka instance */
                TIMING_START(&t_destroy, "rd_kafka_destroy()");
		rd_kafka_destroy(rk);
                TIMING_STOP(&t_destroy);
	}

	return 0;
}
Пример #5
0
int main_0002_unkpart (int argc, char **argv) {
	int partition = 99; /* non-existent */
	int r;
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *topic_conf;
	char msg[128];
	int msgcnt = 10;
	int i;
	int fails = 0;
        const struct rd_kafka_metadata *metadata;

	test_conf_init(&conf, &topic_conf, 10);

	/* Set delivery report callback */
	rd_kafka_conf_set_dr_cb(conf, dr_cb);

	/* Create kafka instance */
	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

	rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0002", 0),
                                 topic_conf);
	if (!rkt)
		TEST_FAIL("Failed to create topic: %s\n",
			  rd_strerror(errno));

        /* Request metadata so that we know the cluster is up before producing
         * messages, otherwise erroneous partitions will not fail immediately.*/
        if ((r = rd_kafka_metadata(rk, 0, rkt, &metadata,
				   tmout_multip(15000))) !=
            RD_KAFKA_RESP_ERR_NO_ERROR)
                TEST_FAIL("Failed to acquire metadata: %s\n",
                          rd_kafka_err2str(r));

        rd_kafka_metadata_destroy(metadata);

	/* Produce a message */
	for (i = 0 ; i < msgcnt ; i++) {
		int *msgidp = malloc(sizeof(*msgidp));
		*msgidp = i;
		rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i);
		r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY,
				     msg, strlen(msg), NULL, 0, msgidp);
		if (r == -1) {
			if (errno == ESRCH)
				TEST_SAY("Failed to produce message #%i: "
					 "unknown partition: good!\n", i);
			else
				TEST_FAIL("Failed to produce message #%i: %s\n",
					  i, rd_kafka_err2str(
						  rd_kafka_errno2err(errno)));
                        free(msgidp);
		} else {
			if (i > 5) {
				fails++;
				TEST_SAY("Message #%i produced: "
					 "should've failed\n", i);
			}
			msgs_wait |= (1 << i);
		}

		/* After half the messages: sleep to allow the metadata
		 * to be fetched from broker and update the actual partition
		 * count: this will make subsequent produce() calls fail
		 * immediately. */
		if (i == 5)
			rd_sleep(2);
	}

	/* Wait for messages to time out */
	rd_kafka_flush(rk, -1);

	if (msgs_wait != 0)
		TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait);


	if (fails > 0)
		TEST_FAIL("See previous error(s)\n");

	/* Destroy topic */
	rd_kafka_topic_destroy(rkt);
		
	/* Destroy rdkafka instance */
	TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
	rd_kafka_destroy(rk);

	return 0;
}