static void do_test_apis (rd_kafka_type_t cltype) { rd_kafka_t *rk; char errstr[512]; rd_kafka_queue_t *mainq, *backgroundq; rd_kafka_conf_t *conf; mtx_init(&last_event_lock, mtx_plain); cnd_init(&last_event_cnd); do_test_unclean_destroy(cltype, 0/*tempq*/); do_test_unclean_destroy(cltype, 1/*mainq*/); test_conf_init(&conf, NULL, 0); /* Remove brokers, if any, since this is a local test and we * rely on the controller not being found. */ test_conf_set(conf, "bootstrap.servers", ""); test_conf_set(conf, "socket.timeout.ms", MY_SOCKET_TIMEOUT_MS_STR); /* For use with the background queue */ rd_kafka_conf_set_background_event_cb(conf, background_event_cb); rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr)); TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr); mainq = rd_kafka_queue_get_main(rk); backgroundq = rd_kafka_queue_get_background(rk); do_test_options(rk); do_test_CreateTopics("temp queue, no options", rk, NULL, 0, 0); do_test_CreateTopics("temp queue, no options, background_event_cb", rk, backgroundq, 1, 0); do_test_CreateTopics("temp queue, options", rk, NULL, 0, 1); do_test_CreateTopics("main queue, options", rk, mainq, 0, 1); do_test_DeleteTopics("temp queue, no options", rk, NULL, 0); do_test_DeleteTopics("temp queue, options", rk, NULL, 1); do_test_DeleteTopics("main queue, options", rk, mainq, 1); do_test_mix(rk, mainq); do_test_configs(rk, mainq); rd_kafka_queue_destroy(backgroundq); rd_kafka_queue_destroy(mainq); rd_kafka_destroy(rk); mtx_destroy(&last_event_lock); cnd_destroy(&last_event_cnd); }
static PyObject *Consumer_close (Handle *self, PyObject *ignore) { CallState cs; if (!self->rk) { PyErr_SetString(PyExc_RuntimeError, "Consumer already closed"); return NULL; } CallState_begin(self, &cs); rd_kafka_consumer_close(self->rk); if (self->u.Consumer.rkqu) { rd_kafka_queue_destroy(self->u.Consumer.rkqu); self->u.Consumer.rkqu = NULL; } rd_kafka_destroy(self->rk); self->rk = NULL; if (!CallState_end(self, &cs)) return NULL; Py_RETURN_NONE; }
static void do_test_non_exist_and_partchange (void) { char *topic_a = rd_strdup(test_mk_topic_name("topic_a", 1)); rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *queue; /** * Test #1: * - Subscribe to non-existing topic. * - Verify empty assignment * - Create topic * - Verify new assignment containing topic */ TEST_SAY("#1 & #2 testing\n"); test_conf_init(&conf, NULL, 60); /* Decrease metadata interval to speed up topic change discovery. */ test_conf_set(conf, "metadata.max.age.ms", "5000"); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL, NULL); queue = rd_kafka_queue_get_consumer(rk); TEST_SAY("#1: Subscribing to %s\n", topic_a); test_consumer_subscribe(rk, topic_a); /* Should not see a rebalance since no topics are matched. */ await_no_rebalance("#1: empty", rk, queue, 10000); TEST_SAY("#1: creating topic %s\n", topic_a); test_create_topic(topic_a, 2, 1); await_assignment("#1: proper", rk, queue, 1, topic_a, 2); /** * Test #2 (continue with #1 consumer) * - Increase the partition count * - Verify updated assignment */ test_kafka_topics("--alter --topic %s --partitions 4", topic_a); await_revoke("#2", rk, queue); await_assignment("#2: more partitions", rk, queue, 1, topic_a, 4); test_consumer_close(rk); rd_kafka_queue_destroy(queue); rd_kafka_destroy(rk); rd_free(topic_a); }
static void do_test_apis (rd_kafka_type_t cltype) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *mainq; /* Get the available brokers, but use a separate rd_kafka_t instance * so we don't jinx the tests by having up-to-date metadata. */ avail_brokers = test_get_broker_ids(NULL, &avail_broker_cnt); TEST_SAY("%"PRIusz" brokers in cluster " "which will be used for replica sets\n", avail_broker_cnt); do_test_unclean_destroy(cltype, 0/*tempq*/); do_test_unclean_destroy(cltype, 1/*mainq*/); test_conf_init(&conf, NULL, 60); test_conf_set(conf, "socket.timeout.ms", "10000"); rk = test_create_handle(cltype, conf); mainq = rd_kafka_queue_get_main(rk); /* Create topics */ do_test_CreateTopics("temp queue, op timeout 0", rk, NULL, 0, 0); do_test_CreateTopics("temp queue, op timeout 15000", rk, NULL, 15000, 0); do_test_CreateTopics("temp queue, op timeout 300, " "validate only", rk, NULL, 300, rd_true); do_test_CreateTopics("temp queue, op timeout 9000, validate_only", rk, NULL, 9000, rd_true); do_test_CreateTopics("main queue, options", rk, mainq, -1, 0); /* Delete topics */ do_test_DeleteTopics("temp queue, op timeout 0", rk, NULL, 0); do_test_DeleteTopics("main queue, op timeout 15000", rk, mainq, 1500); /* Create Partitions */ do_test_CreatePartitions("temp queue, op timeout 6500", rk, NULL, 6500); do_test_CreatePartitions("main queue, op timeout 0", rk, mainq, 0); /* AlterConfigs */ do_test_AlterConfigs(rk, mainq); /* DescribeConfigs */ do_test_DescribeConfigs(rk, mainq); rd_kafka_queue_destroy(mainq); rd_kafka_destroy(rk); free(avail_brokers); }
static int partition_consume (void *args) { part_consume_info_t *info = (part_consume_info_t *)args; rd_kafka_queue_t *rkqu = info->rkqu; int partition = info->partition; int64_t ts_start = test_clock(); int max_time = (test_session_timeout_ms + 3000) * 1000; int running = 1; free(args); /* Free the parameter struct dynamically allocated for us */ while (ts_start + max_time > test_clock() && running && is_consuming()) { rd_kafka_message_t *rkmsg; rkmsg = rd_kafka_consume_queue(rkqu, 500); if (!rkmsg) continue; else if (rkmsg->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) running = 0; else if (rkmsg->err) { mtx_lock(&lock); TEST_FAIL("Message error " "(at offset %" PRId64 " after " "%d/%d messages and %dms): %s", rkmsg->offset, consumed_msg_cnt, exp_msg_cnt, (int)(test_clock() - ts_start) / 1000, rd_kafka_message_errstr(rkmsg)); mtx_unlock(&lock); } else { if (rkmsg->partition != partition) { mtx_lock(&lock); TEST_FAIL("Message consumed has partition %d " "but we expected partition %d.", rkmsg->partition, partition); mtx_unlock(&lock); } } rd_kafka_message_destroy(rkmsg); mtx_lock(&lock); if (running && ++consumed_msg_cnt >= exp_msg_cnt) { TEST_SAY("All messages consumed\n"); running = 0; } mtx_unlock(&lock); } rd_kafka_queue_destroy(rkqu); return thrd_success; }
/** * @brief Local test: test event generation */ int main_0039_event (int argc, char **argv) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *eventq; int waitevent = 1; /* Set up a config with ERROR events enabled and * configure an invalid broker so that _TRANSPORT or ALL_BROKERS_DOWN * is promptly generated. */ conf = rd_kafka_conf_new(); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_ERROR); rd_kafka_conf_set(conf, "bootstrap.servers", "0:65534", NULL, 0); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); eventq = rd_kafka_queue_get_main(rk); while (waitevent) { rd_kafka_event_t *rkev; rkev = rd_kafka_queue_poll(eventq, 1000); switch (rd_kafka_event_type(rkev)) { case RD_KAFKA_EVENT_ERROR: TEST_SAY("Got %s%s event: %s: %s\n", rd_kafka_event_error_is_fatal(rkev) ? "FATAL " : "", rd_kafka_event_name(rkev), rd_kafka_err2name(rd_kafka_event_error(rkev)), rd_kafka_event_error_string(rkev)); waitevent = 0; break; default: TEST_SAY("Unhandled event: %s\n", rd_kafka_event_name(rkev)); break; } rd_kafka_event_destroy(rkev); } rd_kafka_queue_destroy(eventq); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); return 0; }
/** * @brief Verify that an unclean rd_kafka_destroy() does not hang. */ static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { rd_kafka_t *rk; char errstr[512]; rd_kafka_conf_t *conf; rd_kafka_queue_t *q; rd_kafka_event_t *rkev; rd_kafka_DeleteTopic_t *topic; test_timing_t t_destroy; test_conf_init(&conf, NULL, 0); /* Remove brokers, if any, since this is a local test and we * rely on the controller not being found. */ test_conf_set(conf, "bootstrap.servers", ""); test_conf_set(conf, "socket.timeout.ms", "60000"); rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr)); TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr); TEST_SAY(_C_MAG "[ Test unclean destroy for %s using %s]\n", rd_kafka_name(rk), with_mainq ? "mainq" : "tempq"); if (with_mainq) q = rd_kafka_queue_get_main(rk); else q = rd_kafka_queue_new(rk); topic = rd_kafka_DeleteTopic_new("test"); rd_kafka_DeleteTopics(rk, &topic, 1, NULL, q); rd_kafka_DeleteTopic_destroy(topic); /* We're not expecting a result yet since DeleteTopics will attempt * to look up the controller for socket.timeout.ms (1 minute). */ rkev = rd_kafka_queue_poll(q, 100); TEST_ASSERT(!rkev, "Did not expect result: %s", rd_kafka_event_name(rkev)); rd_kafka_queue_destroy(q); TEST_SAY("Giving rd_kafka_destroy() 5s to finish, " "despite Admin API request being processed\n"); test_timeout_set(5); TIMING_START(&t_destroy, "rd_kafka_destroy()"); rd_kafka_destroy(rk); TIMING_STOP(&t_destroy); /* Restore timeout */ test_timeout_set(60); }
static void Consumer_clear0 (Handle *self) { if (self->u.Consumer.on_assign) { Py_DECREF(self->u.Consumer.on_assign); self->u.Consumer.on_assign = NULL; } if (self->u.Consumer.on_revoke) { Py_DECREF(self->u.Consumer.on_revoke); self->u.Consumer.on_revoke = NULL; } if (self->u.Consumer.on_commit) { Py_DECREF(self->u.Consumer.on_commit); self->u.Consumer.on_commit = NULL; } if (self->u.Consumer.rkqu) { rd_kafka_queue_destroy(self->u.Consumer.rkqu); self->u.Consumer.rkqu = NULL; } }
static PyObject * RdkHandle_stop(RdkHandle *self) { /* We'll only ever get a locking error if we programmed ourselves into a * deadlock. We'd have to admit defeat, abort, and leak this RdkHandle */ if (RdkHandle_excl_lock(self)) return NULL; Py_BEGIN_ALLOW_THREADS /* avoid callbacks deadlocking */ if (self->rdk_queue_handle) { rd_kafka_queue_destroy(self->rdk_queue_handle); self->rdk_queue_handle = NULL; } if (self->rdk_topic_handle) { rd_kafka_topic_destroy(self->rdk_topic_handle); self->rdk_topic_handle = NULL; } if (self->rdk_handle) { PyObject *opaque = (PyObject *)rd_kafka_opaque(self->rdk_handle); Py_XDECREF(opaque); rd_kafka_destroy(self->rdk_handle); self->rdk_handle = NULL; } if (self->rdk_conf) { rd_kafka_conf_destroy(self->rdk_conf); self->rdk_conf = NULL; } if (self->rdk_topic_conf) { rd_kafka_topic_conf_destroy(self->rdk_topic_conf); self->rdk_topic_conf = NULL; } Py_END_ALLOW_THREADS Py_CLEAR(self->partition_ids); if (RdkHandle_unlock(self)) return NULL; Py_INCREF(Py_None); return Py_None; }
/** * @brief Verify that an unclean rd_kafka_destroy() does not hang. */ static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { rd_kafka_t *rk; char errstr[512]; rd_kafka_conf_t *conf; rd_kafka_queue_t *q; rd_kafka_NewTopic_t *topic; test_timing_t t_destroy; test_conf_init(&conf, NULL, 0); rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr)); TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr); TEST_SAY(_C_MAG "[ Test unclean destroy for %s using %s]\n", rd_kafka_name(rk), with_mainq ? "mainq" : "tempq"); if (with_mainq) q = rd_kafka_queue_get_main(rk); else q = rd_kafka_queue_new(rk); topic = rd_kafka_NewTopic_new(test_mk_topic_name(__FUNCTION__, 1), 3, 1, NULL, 0); rd_kafka_CreateTopics(rk, &topic, 1, NULL, q); rd_kafka_NewTopic_destroy(topic); rd_kafka_queue_destroy(q); TEST_SAY("Giving rd_kafka_destroy() 5s to finish, " "despite Admin API request being processed\n"); test_timeout_set(5); TIMING_START(&t_destroy, "rd_kafka_destroy()"); rd_kafka_destroy(rk); TIMING_STOP(&t_destroy); /* Restore timeout */ test_timeout_set(60);; }
/** * @brief CreateTopics tests * * * */ static void do_test_CreateTopics (const char *what, rd_kafka_t *rk, rd_kafka_queue_t *useq, int with_background_event_cb, int with_options) { rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); #define MY_NEW_TOPICS_CNT 6 rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT]; rd_kafka_AdminOptions_t *options = NULL; int exp_timeout = MY_SOCKET_TIMEOUT_MS; int i; char errstr[512]; const char *errstr2; rd_kafka_resp_err_t err; test_timing_t timing; rd_kafka_event_t *rkev; const rd_kafka_CreateTopics_result_t *res; const rd_kafka_topic_result_t **restopics; size_t restopic_cnt; void *my_opaque = NULL, *opaque; TEST_SAY(_C_MAG "[ %s CreateTopics with %s, timeout %dms ]\n", rd_kafka_name(rk), what, exp_timeout); /** * Construct NewTopic array with different properties for * different partitions. */ for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); int num_parts = i * 51 + 1; int num_replicas = jitter(1, MY_NEW_TOPICS_CNT-1); int set_config = (i & 2); int set_replicas = !(i % 1); new_topics[i] = rd_kafka_NewTopic_new(topic, num_parts, set_replicas ? -1 : num_replicas, NULL, 0); if (set_config) { /* * Add various (unverified) configuration properties */ err = rd_kafka_NewTopic_set_config(new_topics[i], "dummy.doesntexist", "butThere'sNothing " "to verify that"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_NewTopic_set_config(new_topics[i], "try.a.null.value", NULL); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_NewTopic_set_config(new_topics[i], "or.empty", ""); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } if (set_replicas) { int32_t p; int32_t replicas[MY_NEW_TOPICS_CNT]; int j; for (j = 0 ; j < num_replicas ; j++) replicas[j] = j; /* * Set valid replica assignments */ for (p = 0 ; p < num_parts ; p++) { /* Try adding an existing out of order, * should fail */ if (p == 1) { err = rd_kafka_NewTopic_set_replica_assignment( new_topics[i], p+1, replicas, num_replicas, errstr, sizeof(errstr)); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, "%s", rd_kafka_err2str(err)); } err = rd_kafka_NewTopic_set_replica_assignment( new_topics[i], p, replicas, num_replicas, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); } /* Try to add an existing partition, should fail */ err = rd_kafka_NewTopic_set_replica_assignment( new_topics[i], 0, replicas, num_replicas, NULL, 0); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, "%s", rd_kafka_err2str(err)); } else { int32_t dummy_replicas[1] = {1}; /* Test invalid partition */ err = rd_kafka_NewTopic_set_replica_assignment( new_topics[i], num_parts+1, dummy_replicas, 1, errstr, sizeof(errstr)); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, "%s: %s", rd_kafka_err2str(err), err == RD_KAFKA_RESP_ERR_NO_ERROR ? "" : errstr); /* Setting replicas with with default replicas != -1 * is an error. */ err = rd_kafka_NewTopic_set_replica_assignment( new_topics[i], 0, dummy_replicas, 1, errstr, sizeof(errstr)); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, "%s: %s", rd_kafka_err2str(err), err == RD_KAFKA_RESP_ERR_NO_ERROR ? "" : errstr); } } if (with_options) { options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; err = rd_kafka_AdminOptions_set_request_timeout( options, exp_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); my_opaque = (void *)123; rd_kafka_AdminOptions_set_opaque(options, my_opaque); } TIMING_START(&timing, "CreateTopics"); TEST_SAY("Call CreateTopics, timeout is %dms\n", exp_timeout); rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); if (with_background_event_cb) { /* Result event will be triggered by callback from * librdkafka background queue thread. */ TIMING_START(&timing, "CreateTopics.wait_background_event_cb"); rkev = wait_background_event_cb(); } else { /* Poll result queue */ TIMING_START(&timing, "CreateTopics.queue_poll"); rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); } TIMING_ASSERT_LATER(&timing, exp_timeout-100, exp_timeout+100); TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); TEST_SAY("CreateTopics: got %s in %.3fs\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); /* Convert event to proper result */ res = rd_kafka_event_CreateTopics_result(rkev); TEST_ASSERT(res, "expected CreateTopics_result, not %s", rd_kafka_event_name(rkev)); opaque = rd_kafka_event_opaque(rkev); TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", my_opaque, opaque); /* Expecting error */ err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, "expected CreateTopics to return error %s, not %s (%s)", rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), rd_kafka_err2str(err), err ? errstr2 : "n/a"); /* Attempt to extract topics anyway, should return NULL. */ restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt); TEST_ASSERT(!restopics && restopic_cnt == 0, "expected no result_topics, got %p cnt %"PRIusz, restopics, restopic_cnt); rd_kafka_event_destroy(rkev); rd_kafka_NewTopic_destroy_array(new_topics, MY_NEW_TOPICS_CNT); if (options) rd_kafka_AdminOptions_destroy(options); if (!useq) rd_kafka_queue_destroy(q); }
void kafka_consume_all(rd_kafka_t *rk, zval *return_value, const char *topic, const char *offset, int item_count) { char errstr[512]; rd_kafka_topic_t *rkt; rd_kafka_topic_conf_t *conf; const struct rd_kafka_metadata *meta = NULL; rd_kafka_queue_t *rkqu = NULL; int current, p, i = 0; int32_t partition = 0; int64_t start; struct consume_cb_params cb_params = {item_count, return_value, NULL, 0, 0, 0}; //check for NULL pointers, all arguments are required! if (rk == NULL || return_value == NULL || topic == NULL || offset == NULL || strlen(offset) == 0) return; if (!strcmp(offset, "end")) start = RD_KAFKA_OFFSET_END; else if (!strcmp(offset, "beginning")) start = RD_KAFKA_OFFSET_BEGINNING; else if (!strcmp(offset, "stored")) start = RD_KAFKA_OFFSET_STORED; else start = strtoll(offset, NULL, 10); /* Topic configuration */ conf = rd_kafka_topic_conf_new(); /* Disable autocommit, queue_consume sets offsets automatically */ if (rd_kafka_topic_conf_set(conf, "auto.commit.enable", "false", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog( LOG_WARNING, "failed to turn autocommit off consuming %d messages (start offset %"PRId64") from topic %s: %s", item_count, start, topic, errstr ); } cb_params.auto_commit = 1; } /* Create topic */ rkt = rd_kafka_topic_new(rk, topic, conf); if (!rkt) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - Failed to read %s from %"PRId64" (%s)", topic, start, offset); } return; } rkqu = rd_kafka_queue_new(rk); if (RD_KAFKA_RESP_ERR_NO_ERROR == rd_kafka_metadata(rk, 0, rkt, &meta, 5)) { p = meta->topics->partition_cnt; cb_params.partition_ends = calloc(sizeof *cb_params.partition_ends, p); if (cb_params.partition_ends == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_INFO, "phpkafka - Failed to read %s from %"PRId64" (%s)", topic, start, offset); } rd_kafka_metadata_destroy(meta); meta = NULL; rd_kafka_queue_destroy(rkqu); rd_kafka_topic_destroy(rkt); return; } cb_params.eop = p; for (i=0;i<p;++i) { partition = meta->topics[0].partitions[i].id; if (rd_kafka_consume_start_queue(rkt, partition, start, rkqu)) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "Failed to start consuming topic %s [%"PRId32"]: %s", topic, partition, offset ); } continue; } } while(cb_params.read_count && cb_params.eop) rd_kafka_consume_callback_queue(rkqu, 200, queue_consume, &cb_params); free(cb_params.partition_ends); cb_params.partition_ends = NULL; for (i=0;i<p;++i) { partition = meta->topics[0].partitions[i].id; rd_kafka_consume_stop(rkt, partition); } rd_kafka_metadata_destroy(meta); meta = NULL; rd_kafka_queue_destroy(rkqu); while(rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 50); rd_kafka_topic_destroy(rkt); } if (meta) rd_kafka_metadata_destroy(meta); }
static void consume_messages_with_queues (uint64_t testid, const char *topic, int partition_cnt, int msgcnt) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; rd_kafka_queue_t *rkqu; int i; int32_t partition; int batch_cnt = msgcnt / partition_cnt; test_conf_init(&conf, &topic_conf, 20); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_CONSUMER, conf); /* Create queue */ rkqu = rd_kafka_queue_new(rk); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); TEST_SAY("Consuming %i messages from one queue serving %i partitions\n", msgcnt, partition_cnt); /* Start consuming each partition */ for (partition = 0 ; partition < partition_cnt ; partition++) { /* Consume messages */ TEST_SAY("Start consuming partition %i at offset -%i\n", partition, batch_cnt); if (rd_kafka_consume_start_queue(rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt), rkqu) == -1) TEST_FAIL("consume_start_queue(%i) failed: %s", (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); } /* Consume messages from queue */ for (i = 0 ; i < msgcnt ; ) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000)); if (!rkmessage) TEST_FAIL("Failed to consume message %i/%i from " "queue: %s", i, msgcnt, rd_kafka_err2str(rd_kafka_errno2err(errno))); if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF){ TEST_SAY("Topic %s [%"PRId32"] reached " "EOF at offset %"PRId64"\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); rd_kafka_message_destroy(rkmessage); continue; } TEST_FAIL("Consume message %i/%i from queue " "has error (offset %"PRId64 ", partition %"PRId32"): %s", i, msgcnt, rkmessage->offset, rkmessage->partition, rd_kafka_err2str(rkmessage->err)); } verify_consumed_msg(testid, -1, -1, rkmessage); rd_kafka_message_destroy(rkmessage); i++; } /* Stop consuming each partition */ for (partition = 0 ; partition < partition_cnt ; partition++) rd_kafka_consume_stop(rkt, partition); /* Destroy queue */ rd_kafka_queue_destroy(rkqu); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); }
/* @remark This test will fail if auto topic creation is enabled on the broker * since the client will issue a topic-creating metadata request to find * a new leader when the topic is removed. * * To run with trivup, do: * ./interactive_broker_version.py .. -conf '{"auto_create_topics":"false"}' .. * TESTS=0045 ./run-test.sh -k ./merged */ static void do_test_topic_remove (void) { char *topic_f = rd_strdup(test_mk_topic_name("topic_f", 1)); char *topic_g = rd_strdup(test_mk_topic_name("topic_g", 1)); int parts_f = 5; int parts_g = 9; rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *queue; rd_kafka_topic_partition_list_t *topics; rd_kafka_resp_err_t err; /** * Topic removal test: * - Create topic f & g * - Subscribe to f & g * - Verify f & g assignment * - Remove topic f * - Verify g assignment * - Remove topic g * - Verify empty assignment */ TEST_SAY("Topic removal testing\n"); test_conf_init(&conf, NULL, 60); /* Decrease metadata interval to speed up topic change discovery. */ test_conf_set(conf, "metadata.max.age.ms", "5000"); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL, NULL); queue = rd_kafka_queue_get_consumer(rk); TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_f); test_create_topic(topic_f, parts_f, 1); TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_g); test_create_topic(topic_g, parts_g, 1); rd_sleep(1); // FIXME: do check&wait loop instead TEST_SAY("Topic removal: Subscribing to %s & %s\n", topic_f, topic_g); topics = rd_kafka_topic_partition_list_new(2); rd_kafka_topic_partition_list_add(topics, topic_f, RD_KAFKA_PARTITION_UA); rd_kafka_topic_partition_list_add(topics, topic_g, RD_KAFKA_PARTITION_UA); err = rd_kafka_subscribe(rk, topics); TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, "%s", rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(topics); await_assignment("Topic removal: both topics exist", rk, queue, 2, topic_f, parts_f, topic_g, parts_g); TEST_SAY("Topic removal: removing %s\n", topic_f); test_kafka_topics("--delete --topic %s", topic_f); await_revoke("Topic removal: rebalance after topic removal", rk, queue); await_assignment("Topic removal: one topic exists", rk, queue, 1, topic_g, parts_g); TEST_SAY("Topic removal: removing %s\n", topic_g); test_kafka_topics("--delete --topic %s", topic_g); await_revoke("Topic removal: rebalance after 2nd topic removal", rk, queue); /* Should not see another rebalance since all topics now removed */ await_no_rebalance("Topic removal: empty", rk, queue, 10000); test_consumer_close(rk); rd_kafka_queue_destroy(queue); rd_kafka_destroy(rk); rd_free(topic_f); rd_free(topic_g); }
/** * @brief Get all partitions for topic and their beginning offsets, useful * if we're consuming messages without knowing the actual partition beforehand * @param int **partitions should be pointer to NULL, will be allocated here * @param const char * topic topic name * @return int (0 == meta error, -2: no connection, -1: allocation error, all others indicate success (nr of elems in array)) */ int kafka_partition_offsets(rd_kafka_t *r, long **partitions, const char *topic) { rd_kafka_topic_t *rkt = NULL; rd_kafka_topic_conf_t *conf = NULL; rd_kafka_queue_t *rkqu = NULL; struct consume_cb_params cb_params = {0, NULL, NULL, 0, 0, 0}; int i = 0; //make life easier, 1 level of indirection... long *values = *partitions; //connect as consumer if required if (r == NULL) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "phpkafka - no connection to get offsets of topic: %s", topic); } return -2; } /* Topic configuration */ conf = rd_kafka_topic_conf_new(); /* Create topic */ rkt = rd_kafka_topic_new(r, topic, conf); rkqu = rd_kafka_queue_new(rk); const struct rd_kafka_metadata *meta = NULL; if (RD_KAFKA_RESP_ERR_NO_ERROR == rd_kafka_metadata(r, 0, rkt, &meta, 5)) { values = realloc(values, meta->topics->partition_cnt * sizeof *values); if (values == NULL) { *partitions = values;//possible corrupted pointer now //free metadata, return error rd_kafka_metadata_destroy(meta); return -1; } //we need eop to reach 0, if there are 4 partitions, start at 3 (0, 1, 2, 3) cb_params.eop = meta->topics->partition_cnt -1; cb_params.partition_offset = values; for (i=0;i<meta->topics->partition_cnt;++i) { //initialize: set to -2 for callback values[i] = -2; if (rd_kafka_consume_start_queue(rkt, meta->topics->partitions[i].id, RD_KAFKA_OFFSET_BEGINNING, rkqu)) { if (log_level) { openlog("phpkafka", 0, LOG_USER); syslog(LOG_ERR, "Failed to start consuming topic %s [%"PRId32"]", topic, meta->topics->partitions[i].id ); } continue; } } //eiter eop reached 0, or the read errors >= nr of partitions //either way, we've consumed a message from each partition, and therefore, we're done while(cb_params.eop && cb_params.error_count < meta->topics->partition_cnt) rd_kafka_consume_callback_queue(rkqu, 100, offset_queue_consume, &cb_params); //stop consuming for all partitions for (i=0;i<meta->topics->partition_cnt;++i) rd_kafka_consume_stop(rkt, meta->topics[0].partitions[i].id); rd_kafka_queue_destroy(rkqu); //do we need this poll here? while(rd_kafka_outq_len(r) > 0) rd_kafka_poll(r, 5); //let's be sure to pass along the correct values here... *partitions = values; i = meta->topics->partition_cnt; } if (meta) rd_kafka_metadata_destroy(meta); rd_kafka_topic_destroy(rkt); return i; }
/** * @brief Test delivery report events */ int main_0039_event_dr (int argc, char **argv) { int partition = 0; int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char msg[128]; int msgcnt = test_on_ci ? 5000 : 50000; int i; test_timing_t t_produce, t_delivery; rd_kafka_queue_t *eventq; test_conf_init(&conf, &topic_conf, 10); /* Set delivery report callback */ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_DR); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); eventq = rd_kafka_queue_get_main(rk); rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Produce messages */ TIMING_START(&t_produce, "PRODUCE"); for (i = 0 ; i < msgcnt ; i++) { int *msgidp = malloc(sizeof(*msgidp)); *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, strlen(msg), NULL, 0, msgidp); if (r == -1) TEST_FAIL("Failed to produce message #%i: %s\n", i, rd_strerror(errno)); } TIMING_STOP(&t_produce); TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt); /* Wait for messages to be delivered */ TIMING_START(&t_delivery, "DELIVERY"); while (rd_kafka_outq_len(rk) > 0) { rd_kafka_event_t *rkev; rkev = rd_kafka_queue_poll(eventq, 1000); switch (rd_kafka_event_type(rkev)) { case RD_KAFKA_EVENT_DR: TEST_SAYL(3, "%s event with %zd messages\n", rd_kafka_event_name(rkev), rd_kafka_event_message_count(rkev)); handle_drs(rkev); break; default: TEST_SAY("Unhandled event: %s\n", rd_kafka_event_name(rkev)); break; } rd_kafka_event_destroy(rkev); } TIMING_STOP(&t_delivery); if (fails) TEST_FAIL("%i failures, see previous errors", fails); if (msgid_next != msgcnt) TEST_FAIL("Still waiting for messages: next %i != end %i\n", msgid_next, msgcnt); rd_kafka_queue_destroy(eventq); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); return 0; }
static void do_test_regex (void) { char *base_topic = rd_strdup(test_mk_topic_name("topic", 1)); char *topic_b = rd_strdup(tsprintf("%s_b", base_topic)); char *topic_c = rd_strdup(tsprintf("%s_c", base_topic)); char *topic_d = rd_strdup(tsprintf("%s_d", base_topic)); char *topic_e = rd_strdup(tsprintf("%s_e", base_topic)); rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *queue; /** * Regex test: * - Create topic b * - Subscribe to b & d & e * - Verify b assignment * - Create topic c * - Verify no rebalance * - Create topic d * - Verify b & d assignment */ TEST_SAY("Regex testing\n"); test_conf_init(&conf, NULL, 60); /* Decrease metadata interval to speed up topic change discovery. */ test_conf_set(conf, "metadata.max.age.ms", "5000"); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL, NULL); queue = rd_kafka_queue_get_consumer(rk); TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_b); test_create_topic(topic_b, 2, 1); rd_sleep(1); // FIXME: do check&wait loop instead TEST_SAY("Regex: Subscribing to %s & %s & %s\n", topic_b, topic_d, topic_e); test_consumer_subscribe(rk, tsprintf("^%s_[bde]$", base_topic)); await_assignment("Regex: just one topic exists", rk, queue, 1, topic_b, 2); TEST_SAY("Regex: creating topic %s (not subscribed)\n", topic_c); test_create_topic(topic_c, 4, 1); /* Should not see a rebalance since no topics are matched. */ await_no_rebalance("Regex: empty", rk, queue, 10000); TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_d); test_create_topic(topic_d, 1, 1); await_revoke("Regex: rebalance after topic creation", rk, queue); await_assignment("Regex: two topics exist", rk, queue, 2, topic_b, 2, topic_d, 1); test_consumer_close(rk); rd_kafka_queue_destroy(queue); rd_kafka_destroy(rk); rd_free(base_topic); rd_free(topic_b); rd_free(topic_c); rd_free(topic_d); rd_free(topic_e); }
int main_0062_stats_event (int argc, char **argv) { rd_kafka_t *rk; rd_kafka_conf_t *conf; test_timing_t t_delivery; rd_kafka_queue_t *eventq; const int iterations = 5; int i; test_conf_init(NULL, NULL, 10); /* Set up a global config object */ conf = rd_kafka_conf_new(); rd_kafka_conf_set(conf,"statistics.interval.ms", "100", NULL, 0); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_STATS); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); eventq = rd_kafka_queue_get_main(rk); /* Wait for stats event */ for (i = 0 ; i < iterations ; i++) { TIMING_START(&t_delivery, "STATS_EVENT"); stats_count = 0; while (stats_count == 0) { rd_kafka_event_t *rkev; rkev = rd_kafka_queue_poll(eventq, 100); switch (rd_kafka_event_type(rkev)) { case RD_KAFKA_EVENT_STATS: TEST_SAY("%s event\n", rd_kafka_event_name(rkev)); handle_stats(rkev); break; case RD_KAFKA_EVENT_NONE: break; default: TEST_SAY("Ignore event: %s\n", rd_kafka_event_name(rkev)); break; } rd_kafka_event_destroy(rkev); } TIMING_STOP(&t_delivery); if (!strcmp(test_mode, "bare")) { /* valgrind is too slow to make this meaningful. */ if (TIMING_DURATION(&t_delivery) < 1000 * 100 * 0.8 || TIMING_DURATION(&t_delivery) > 1000 * 100 * 1.2) TEST_FAIL("Stats duration %.3fms is >= 20%% " "outside statistics.interval.ms 100", (float)TIMING_DURATION(&t_delivery)/ 1000.0f); } } rd_kafka_queue_destroy(eventq); rd_kafka_destroy(rk); return 0; }
/** * @brief Test creation of partitions * * */ static void do_test_CreatePartitions (const char *what, rd_kafka_t *rk, rd_kafka_queue_t *useq, int op_timeout) { rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); #define MY_CRP_TOPICS_CNT 9 char *topics[MY_CRP_TOPICS_CNT]; rd_kafka_NewTopic_t *new_topics[MY_CRP_TOPICS_CNT]; rd_kafka_NewPartitions_t *crp_topics[MY_CRP_TOPICS_CNT]; rd_kafka_AdminOptions_t *options = NULL; /* Expected topics in metadata */ rd_kafka_metadata_topic_t exp_mdtopics[MY_CRP_TOPICS_CNT] = {{0}}; rd_kafka_metadata_partition_t exp_mdparts[2] = {{0}}; int exp_mdtopic_cnt = 0; int i; char errstr[512]; rd_kafka_resp_err_t err; test_timing_t timing; int metadata_tmout; int num_replicas = (int)avail_broker_cnt; TEST_SAY(_C_MAG "[ %s CreatePartitions with %s, op_timeout %d ]\n", rd_kafka_name(rk), what, op_timeout); /* Set up two expected partitions with different replication sets * so they can be matched by the metadata checker later. * Even partitions use exp_mdparts[0] while odd partitions * use exp_mdparts[1]. */ /* Set valid replica assignments (even, and odd (reverse) ) */ exp_mdparts[0].replicas = rd_alloca(sizeof(*exp_mdparts[0].replicas) * num_replicas); exp_mdparts[1].replicas = rd_alloca(sizeof(*exp_mdparts[1].replicas) * num_replicas); exp_mdparts[0].replica_cnt = num_replicas; exp_mdparts[1].replica_cnt = num_replicas; for (i = 0 ; i < num_replicas ; i++) { exp_mdparts[0].replicas[i] = avail_brokers[i]; exp_mdparts[1].replicas[i] = avail_brokers[num_replicas-i-1]; } /** * Construct CreatePartitions array */ for (i = 0 ; i < MY_CRP_TOPICS_CNT ; i++) { char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); int initial_part_cnt = 1 + (i * 2); int new_part_cnt = 1 + (i / 2); int final_part_cnt = initial_part_cnt + new_part_cnt; int set_replicas = !(i % 2); int pi; topics[i] = topic; /* Topic to create with initial partition count */ new_topics[i] = rd_kafka_NewTopic_new(topic, initial_part_cnt, set_replicas ? -1 : num_replicas, NULL, 0); /* .. and later add more partitions to */ crp_topics[i] = rd_kafka_NewPartitions_new(topic, final_part_cnt, errstr, sizeof(errstr)); if (set_replicas) { exp_mdtopics[exp_mdtopic_cnt].partitions = rd_alloca(final_part_cnt * sizeof(*exp_mdtopics[exp_mdtopic_cnt]. partitions)); for (pi = 0 ; pi < final_part_cnt ; pi++) { const rd_kafka_metadata_partition_t *exp_mdp = &exp_mdparts[pi & 1]; exp_mdtopics[exp_mdtopic_cnt]. partitions[pi] = *exp_mdp; /* copy */ exp_mdtopics[exp_mdtopic_cnt]. partitions[pi].id = pi; if (pi < initial_part_cnt) { /* Set replica assignment * for initial partitions */ err = rd_kafka_NewTopic_set_replica_assignment( new_topics[i], pi, exp_mdp->replicas, (size_t)exp_mdp->replica_cnt, errstr, sizeof(errstr)); TEST_ASSERT(!err, "NewTopic_set_replica_assignment: %s", errstr); } else { /* Set replica assignment for new * partitions */ err = rd_kafka_NewPartitions_set_replica_assignment( crp_topics[i], pi - initial_part_cnt, exp_mdp->replicas, (size_t)exp_mdp->replica_cnt, errstr, sizeof(errstr)); TEST_ASSERT(!err, "NewPartitions_set_replica_assignment: %s", errstr); } } } TEST_SAY(_C_YEL "Topic %s with %d initial partitions will grow " "by %d to %d total partitions with%s replicas set\n", topics[i], initial_part_cnt, new_part_cnt, final_part_cnt, set_replicas ? "" : "out"); exp_mdtopics[exp_mdtopic_cnt].topic = topic; exp_mdtopics[exp_mdtopic_cnt].partition_cnt = final_part_cnt; exp_mdtopic_cnt++; } if (op_timeout != -1) { options = rd_kafka_AdminOptions_new( rk, RD_KAFKA_ADMIN_OP_ANY); err = rd_kafka_AdminOptions_set_operation_timeout( options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } /* * Create topics with initial partition count */ TIMING_START(&timing, "CreateTopics"); TEST_SAY("Creating topics with initial partition counts\n"); rd_kafka_CreateTopics(rk, new_topics, MY_CRP_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); err = test_wait_topic_admin_result(q, RD_KAFKA_EVENT_CREATETOPICS_RESULT, NULL, 15000); TEST_ASSERT(!err, "CreateTopics failed: %s", rd_kafka_err2str(err)); rd_kafka_NewTopic_destroy_array(new_topics, MY_CRP_TOPICS_CNT); /* * Create new partitions */ TIMING_START(&timing, "CreatePartitions"); TEST_SAY("Creating partitions\n"); rd_kafka_CreatePartitions(rk, crp_topics, MY_CRP_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); err = test_wait_topic_admin_result(q, RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, NULL, 15000); TEST_ASSERT(!err, "CreatePartitions failed: %s", rd_kafka_err2str(err)); rd_kafka_NewPartitions_destroy_array(crp_topics, MY_CRP_TOPICS_CNT); /** * Verify that the expected topics are deleted and the non-expected * are not. Allow it some time to propagate. */ if (op_timeout > 0) metadata_tmout = op_timeout + 1000; else metadata_tmout = 10 * 1000; test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, metadata_tmout); for (i = 0 ; i < MY_CRP_TOPICS_CNT ; i++) rd_free(topics[i]); if (options) rd_kafka_AdminOptions_destroy(options); if (!useq) rd_kafka_queue_destroy(q); #undef MY_CRP_TOPICS_CNT }
/** * @brief Test deletion of topics * * */ static void do_test_DeleteTopics (const char *what, rd_kafka_t *rk, rd_kafka_queue_t *useq, int op_timeout) { rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); const int skip_topic_cnt = 2; #define MY_DEL_TOPICS_CNT 9 char *topics[MY_DEL_TOPICS_CNT]; rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT]; rd_kafka_AdminOptions_t *options = NULL; rd_kafka_resp_err_t exp_topicerr[MY_DEL_TOPICS_CNT] = {0}; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Expected topics in metadata */ rd_kafka_metadata_topic_t exp_mdtopics[MY_DEL_TOPICS_CNT] = {{0}}; int exp_mdtopic_cnt = 0; /* Not expected topics in metadata */ rd_kafka_metadata_topic_t exp_not_mdtopics[MY_DEL_TOPICS_CNT] = {{0}}; int exp_not_mdtopic_cnt = 0; int i; char errstr[512]; const char *errstr2; rd_kafka_resp_err_t err; test_timing_t timing; rd_kafka_event_t *rkev; const rd_kafka_DeleteTopics_result_t *res; const rd_kafka_topic_result_t **restopics; size_t restopic_cnt; int metadata_tmout; TEST_SAY(_C_MAG "[ %s DeleteTopics with %s, op_timeout %d ]\n", rd_kafka_name(rk), what, op_timeout); /** * Construct DeleteTopic array */ for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) { char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); int notexist_topic = i >= MY_DEL_TOPICS_CNT - skip_topic_cnt; topics[i] = topic; del_topics[i] = rd_kafka_DeleteTopic_new(topic); if (notexist_topic) exp_topicerr[i] = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; else { exp_topicerr[i] = RD_KAFKA_RESP_ERR_NO_ERROR; exp_mdtopics[exp_mdtopic_cnt++].topic = topic; } exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic; } if (op_timeout != -1) { options = rd_kafka_AdminOptions_new( rk, RD_KAFKA_ADMIN_OP_ANY); err = rd_kafka_AdminOptions_set_operation_timeout( options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } /* Create the topics first, minus the skip count. */ test_CreateTopics_simple(rk, NULL, topics, MY_DEL_TOPICS_CNT-skip_topic_cnt, 2/*num_partitions*/, NULL); /* Verify that topics are reported by metadata */ test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, 15*1000); TIMING_START(&timing, "DeleteTopics"); TEST_SAY("Call DeleteTopics\n"); rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); /* Poll result queue for DeleteTopics result. * Print but otherwise ignore other event types * (typically generic Error events). */ TIMING_START(&timing, "DeleteTopics.queue_poll"); while (1) { rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000)); TEST_SAY("DeleteTopics: got %s in %.3fms\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); if (rd_kafka_event_error(rkev)) TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); if (rd_kafka_event_type(rkev) == RD_KAFKA_EVENT_DELETETOPICS_RESULT) break; rd_kafka_event_destroy(rkev); } /* Convert event to proper result */ res = rd_kafka_event_DeleteTopics_result(rkev); TEST_ASSERT(res, "expected DeleteTopics_result, not %s", rd_kafka_event_name(rkev)); /* Expecting error */ err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == exp_err, "expected DeleteTopics to return %s, not %s (%s)", rd_kafka_err2str(exp_err), rd_kafka_err2str(err), err ? errstr2 : "n/a"); TEST_SAY("DeleteTopics: returned %s (%s)\n", rd_kafka_err2str(err), err ? errstr2 : "n/a"); /* Extract topics */ restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt); /* Scan topics for proper fields and expected failures. */ for (i = 0 ; i < (int)restopic_cnt ; i++) { const rd_kafka_topic_result_t *terr = restopics[i]; /* Verify that topic order matches our request. */ if (strcmp(rd_kafka_topic_result_name(terr), topics[i])) TEST_FAIL_LATER("Topic result order mismatch at #%d: " "expected %s, got %s", i, topics[i], rd_kafka_topic_result_name(terr)); TEST_SAY("DeleteTopics result: #%d: %s: %s: %s\n", i, rd_kafka_topic_result_name(terr), rd_kafka_err2name(rd_kafka_topic_result_error(terr)), rd_kafka_topic_result_error_string(terr)); if (rd_kafka_topic_result_error(terr) != exp_topicerr[i]) TEST_FAIL_LATER( "Expected %s, not %d: %s", rd_kafka_err2name(exp_topicerr[i]), rd_kafka_topic_result_error(terr), rd_kafka_err2name(rd_kafka_topic_result_error( terr))); } /** * Verify that the expected topics are deleted and the non-expected * are not. Allow it some time to propagate. */ if (op_timeout > 0) metadata_tmout = op_timeout + 1000; else metadata_tmout = 10 * 1000; test_wait_metadata_update(rk, NULL, 0, exp_not_mdtopics, exp_not_mdtopic_cnt, metadata_tmout); rd_kafka_event_destroy(rkev); for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) { rd_kafka_DeleteTopic_destroy(del_topics[i]); rd_free(topics[i]); } if (options) rd_kafka_AdminOptions_destroy(options); if (!useq) rd_kafka_queue_destroy(q); #undef MY_DEL_TOPICS_CNT }
int main_0006_symbols (int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); rd_kafka_get_debug_contexts(); rd_kafka_get_err_descs(NULL, NULL); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR); rd_kafka_errno2err(EINVAL); rd_kafka_errno(); rd_kafka_last_error(); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_conf_set_dr_cb(NULL, NULL); rd_kafka_conf_set_dr_msg_cb(NULL, NULL); rd_kafka_conf_set_error_cb(NULL, NULL); rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_log_cb(NULL, NULL); rd_kafka_conf_set_socket_cb(NULL, NULL); rd_kafka_conf_set_rebalance_cb(NULL, NULL); rd_kafka_conf_set_offset_commit_cb(NULL, NULL); rd_kafka_conf_set_throttle_cb(NULL, NULL); rd_kafka_conf_set_default_topic_conf(NULL, NULL); rd_kafka_conf_get(NULL, NULL, NULL, NULL); #ifndef _MSC_VER rd_kafka_conf_set_open_cb(NULL, NULL); #endif rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_opaque(NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); rd_kafka_conf_dump_free(NULL, 0); rd_kafka_conf_properties_show(NULL); rd_kafka_topic_conf_new(); rd_kafka_topic_conf_dup(NULL); rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); rd_kafka_topic_opaque(NULL); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); rd_kafka_name(NULL); rd_kafka_memberid(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); rd_kafka_message_timestamp(NULL, NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); rd_kafka_consume_batch(NULL, 0, 0, NULL, 0); rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL); rd_kafka_offset_store(NULL, 0, 0); rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL); rd_kafka_produce_batch(NULL, 0, 0, NULL, 0); rd_kafka_poll(NULL, 0); rd_kafka_brokers_add(NULL, NULL); /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */ rd_kafka_set_log_level(NULL, 0); rd_kafka_log_print(NULL, 0, NULL, NULL); #ifndef _MSC_VER rd_kafka_log_syslog(NULL, 0, NULL, NULL); #endif rd_kafka_outq_len(NULL); rd_kafka_dump(NULL, NULL); rd_kafka_thread_cnt(); rd_kafka_wait_destroyed(0); rd_kafka_metadata(NULL, 0, NULL, NULL, 0); rd_kafka_metadata_destroy(NULL); rd_kafka_queue_destroy(NULL); rd_kafka_consume_start_queue(NULL, 0, 0, NULL); rd_kafka_consume_queue(NULL, 0); rd_kafka_consume_batch_queue(NULL, 0, NULL, 0); rd_kafka_consume_callback_queue(NULL, 0, NULL, NULL); rd_kafka_seek(NULL, 0, 0, 0); rd_kafka_yield(NULL); rd_kafka_mem_free(NULL, NULL); rd_kafka_list_groups(NULL, NULL, NULL, 0); rd_kafka_group_list_destroy(NULL); /* KafkaConsumer API */ rd_kafka_subscribe(NULL, NULL); rd_kafka_unsubscribe(NULL); rd_kafka_subscription(NULL, NULL); rd_kafka_consumer_poll(NULL, 0); rd_kafka_consumer_close(NULL); rd_kafka_assign(NULL, NULL); rd_kafka_assignment(NULL, NULL); rd_kafka_commit(NULL, NULL, 0); rd_kafka_commit_message(NULL, NULL, 0); rd_kafka_committed(NULL, NULL, 0); rd_kafka_position(NULL, NULL); /* TopicPartition */ rd_kafka_topic_partition_list_new(0); rd_kafka_topic_partition_list_destroy(NULL); rd_kafka_topic_partition_list_add(NULL, NULL, 0); rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0); rd_kafka_topic_partition_list_del(NULL, NULL, 0); rd_kafka_topic_partition_list_del_by_idx(NULL, 0); rd_kafka_topic_partition_list_copy(NULL); rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0); rd_kafka_topic_partition_list_find(NULL, NULL, 0); rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0); rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL); } return 0; }
/** * @brief DeleteTopics tests * * * */ static void do_test_DeleteTopics (const char *what, rd_kafka_t *rk, rd_kafka_queue_t *useq, int with_options) { rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); #define MY_DEL_TOPICS_CNT 4 rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT]; rd_kafka_AdminOptions_t *options = NULL; int exp_timeout = MY_SOCKET_TIMEOUT_MS; int i; char errstr[512]; const char *errstr2; rd_kafka_resp_err_t err; test_timing_t timing; rd_kafka_event_t *rkev; const rd_kafka_DeleteTopics_result_t *res; const rd_kafka_topic_result_t **restopics; size_t restopic_cnt; void *my_opaque = NULL, *opaque; TEST_SAY(_C_MAG "[ %s DeleteTopics with %s, timeout %dms ]\n", rd_kafka_name(rk), what, exp_timeout); for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) del_topics[i] = rd_kafka_DeleteTopic_new(test_mk_topic_name(__FUNCTION__, 1)); if (with_options) { options = rd_kafka_AdminOptions_new( rk, RD_KAFKA_ADMIN_OP_DELETETOPICS); exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; err = rd_kafka_AdminOptions_set_request_timeout( options, exp_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); if (useq) { my_opaque = (void *)456; rd_kafka_AdminOptions_set_opaque(options, my_opaque); } } TIMING_START(&timing, "DeleteTopics"); TEST_SAY("Call DeleteTopics, timeout is %dms\n", exp_timeout); rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); /* Poll result queue */ TIMING_START(&timing, "DeleteTopics.queue_poll"); rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); TIMING_ASSERT_LATER(&timing, exp_timeout-100, exp_timeout+100); TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); TEST_SAY("DeleteTopics: got %s in %.3fs\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); /* Convert event to proper result */ res = rd_kafka_event_DeleteTopics_result(rkev); TEST_ASSERT(res, "expected DeleteTopics_result, not %s", rd_kafka_event_name(rkev)); opaque = rd_kafka_event_opaque(rkev); TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", my_opaque, opaque); /* Expecting error */ err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, "expected DeleteTopics to return error %s, not %s (%s)", rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), rd_kafka_err2str(err), err ? errstr2 : "n/a"); /* Attempt to extract topics anyway, should return NULL. */ restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt); TEST_ASSERT(!restopics && restopic_cnt == 0, "expected no result_topics, got %p cnt %"PRIusz, restopics, restopic_cnt); rd_kafka_event_destroy(rkev); rd_kafka_DeleteTopic_destroy_array(del_topics, MY_DEL_TOPICS_CNT); if (options) rd_kafka_AdminOptions_destroy(options); if (!useq) rd_kafka_queue_destroy(q); }
static void do_test_CreateTopics (const char *what, rd_kafka_t *rk, rd_kafka_queue_t *useq, int op_timeout, rd_bool_t validate_only) { rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); #define MY_NEW_TOPICS_CNT 6 char *topics[MY_NEW_TOPICS_CNT]; rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT]; rd_kafka_AdminOptions_t *options = NULL; rd_kafka_resp_err_t exp_topicerr[MY_NEW_TOPICS_CNT] = {0}; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Expected topics in metadata */ rd_kafka_metadata_topic_t exp_mdtopics[MY_NEW_TOPICS_CNT] = {{0}}; int exp_mdtopic_cnt = 0; /* Not expected topics in metadata */ rd_kafka_metadata_topic_t exp_not_mdtopics[MY_NEW_TOPICS_CNT] = {{0}}; int exp_not_mdtopic_cnt = 0; int i; char errstr[512]; const char *errstr2; rd_kafka_resp_err_t err; test_timing_t timing; rd_kafka_event_t *rkev; const rd_kafka_CreateTopics_result_t *res; const rd_kafka_topic_result_t **restopics; size_t restopic_cnt; int metadata_tmout ; int num_replicas = (int)avail_broker_cnt; int32_t *replicas; /* Set up replicas */ replicas = rd_alloca(sizeof(*replicas) * num_replicas); for (i = 0 ; i < num_replicas ; i++) replicas[i] = avail_brokers[i]; TEST_SAY(_C_MAG "[ %s CreateTopics with %s, " "op_timeout %d, validate_only %d ]\n", rd_kafka_name(rk), what, op_timeout, validate_only); /** * Construct NewTopic array with different properties for * different partitions. */ for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); int num_parts = i * 7 + 1; int set_config = (i & 1); int add_invalid_config = (i == 1); int set_replicas = !(i % 3); rd_kafka_resp_err_t this_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; topics[i] = topic; new_topics[i] = rd_kafka_NewTopic_new(topic, num_parts, set_replicas ? -1 : num_replicas, NULL, 0); if (set_config) { /* * Add various configuration properties */ err = rd_kafka_NewTopic_set_config( new_topics[i], "compression.type", "lz4"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_NewTopic_set_config( new_topics[i], "delete.retention.ms", "900"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } if (add_invalid_config) { /* Add invalid config property */ err = rd_kafka_NewTopic_set_config( new_topics[i], "dummy.doesntexist", "broker is verifying this"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); this_exp_err = RD_KAFKA_RESP_ERR_INVALID_CONFIG; } TEST_SAY("Expected result for topic #%d: %s " "(set_config=%d, add_invalid_config=%d, " "set_replicas=%d)\n", i, rd_kafka_err2name(this_exp_err), set_config, add_invalid_config, set_replicas); if (set_replicas) { int32_t p; /* * Set valid replica assignments */ for (p = 0 ; p < num_parts ; p++) { err = rd_kafka_NewTopic_set_replica_assignment( new_topics[i], p, replicas, num_replicas, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); } } if (this_exp_err || validate_only) { exp_topicerr[i] = this_exp_err; exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic; } else { exp_mdtopics[exp_mdtopic_cnt].topic = topic; exp_mdtopics[exp_mdtopic_cnt].partition_cnt = num_parts; exp_mdtopic_cnt++; } } if (op_timeout != -1 || validate_only) { options = rd_kafka_AdminOptions_new( rk, RD_KAFKA_ADMIN_OP_CREATETOPICS); if (op_timeout != -1) { err = rd_kafka_AdminOptions_set_operation_timeout( options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } if (validate_only) { err = rd_kafka_AdminOptions_set_validate_only( options, validate_only, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } } TIMING_START(&timing, "CreateTopics"); TEST_SAY("Call CreateTopics\n"); rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); /* Poll result queue for CreateTopics result. * Print but otherwise ignore other event types * (typically generic Error events). */ TIMING_START(&timing, "CreateTopics.queue_poll"); do { rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000)); TEST_SAY("CreateTopics: got %s in %.3fms\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); if (rd_kafka_event_error(rkev)) TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); } while (rd_kafka_event_type(rkev) != RD_KAFKA_EVENT_CREATETOPICS_RESULT); /* Convert event to proper result */ res = rd_kafka_event_CreateTopics_result(rkev); TEST_ASSERT(res, "expected CreateTopics_result, not %s", rd_kafka_event_name(rkev)); /* Expecting error */ err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == exp_err, "expected CreateTopics to return %s, not %s (%s)", rd_kafka_err2str(exp_err), rd_kafka_err2str(err), err ? errstr2 : "n/a"); TEST_SAY("CreateTopics: returned %s (%s)\n", rd_kafka_err2str(err), err ? errstr2 : "n/a"); /* Extract topics */ restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt); /* Scan topics for proper fields and expected failures. */ for (i = 0 ; i < (int)restopic_cnt ; i++) { const rd_kafka_topic_result_t *terr = restopics[i]; /* Verify that topic order matches our request. */ if (strcmp(rd_kafka_topic_result_name(terr), topics[i])) TEST_FAIL_LATER("Topic result order mismatch at #%d: " "expected %s, got %s", i, topics[i], rd_kafka_topic_result_name(terr)); TEST_SAY("CreateTopics result: #%d: %s: %s: %s\n", i, rd_kafka_topic_result_name(terr), rd_kafka_err2name(rd_kafka_topic_result_error(terr)), rd_kafka_topic_result_error_string(terr)); if (rd_kafka_topic_result_error(terr) != exp_topicerr[i]) TEST_FAIL_LATER( "Expected %s, not %d: %s", rd_kafka_err2name(exp_topicerr[i]), rd_kafka_topic_result_error(terr), rd_kafka_err2name(rd_kafka_topic_result_error( terr))); } /** * Verify that the expecteded topics are created and the non-expected * are not. Allow it some time to propagate. */ if (validate_only) { /* No topics should have been created, give it some time * before checking. */ rd_sleep(2); metadata_tmout = 5 * 1000; } else { if (op_timeout > 0) metadata_tmout = op_timeout + 1000; else metadata_tmout = 10 * 1000; } test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, exp_not_mdtopics, exp_not_mdtopic_cnt, metadata_tmout); rd_kafka_event_destroy(rkev); for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { rd_kafka_NewTopic_destroy(new_topics[i]); rd_free(topics[i]); } if (options) rd_kafka_AdminOptions_destroy(options); if (!useq) rd_kafka_queue_destroy(q); #undef MY_NEW_TOPICS_CNT }
static int do_test_consume_batch (void) { #define topic_cnt 2 const char *topics[topic_cnt] = { test_mk_topic_name(__FUNCTION__, 1), test_mk_topic_name(__FUNCTION__, 1) }; const int partition_cnt = 2; rd_kafka_t *rk; rd_kafka_queue_t *rkq; rd_kafka_topic_t *rkts[topic_cnt]; rd_kafka_resp_err_t err; const int msgcnt = 10000; uint64_t testid; int i, p; int batch_cnt = 0; int remains; testid = test_id_generate(); /* Produce messages */ for (i = 0 ; i < topic_cnt ; i++) { for (p = 0 ; p < partition_cnt ; p++) test_produce_msgs_easy(topics[i], testid, p, msgcnt / topic_cnt / partition_cnt); } /* Create simple consumer */ rk = test_create_consumer(NULL, NULL, NULL, NULL); /* Create generic consume queue */ rkq = rd_kafka_queue_new(rk); for (i = 0 ; i < topic_cnt ; i++) { /* Create topic object */ rkts[i] = test_create_topic(rk, topics[i], "auto.offset.reset", "smallest", NULL); /* Start consuming each partition and redirect * messages to queue */ TEST_SAY("Start consuming topic %s partitions 0..%d\n", rd_kafka_topic_name(rkts[i]), partition_cnt); for (p = 0 ; p < partition_cnt ; p++) { err = rd_kafka_consume_start_queue( rkts[i], p, RD_KAFKA_OFFSET_BEGINNING, rkq); if (err) TEST_FAIL("Failed to start consuming: %s\n", rd_kafka_err2str(err)); } } remains = msgcnt; /* Consume messages from common queue using batch interface. */ TEST_SAY("Consume %d messages from queue\n", remains); while (remains > 0) { rd_kafka_message_t *rkmessage[1000]; ssize_t r; test_timing_t t_batch; TIMING_START(&t_batch, "CONSUME.BATCH"); r = rd_kafka_consume_batch_queue(rkq, 1000, rkmessage, 1000); TIMING_STOP(&t_batch); TEST_SAY("Batch consume iteration #%d: Consumed %"PRIdsz "/1000 messages\n", batch_cnt, r); if (r == -1) TEST_FAIL("Failed to consume messages: %s\n", rd_kafka_err2str(rd_kafka_errno2err(errno))); remains -= r; for (i = 0 ; i < r ; i++) rd_kafka_message_destroy(rkmessage[i]); batch_cnt++; } TEST_SAY("Stopping consumer\n"); for (i = 0 ; i < topic_cnt ; i++) { for (p = 0 ; p < partition_cnt ; p++) { err = rd_kafka_consume_stop(rkts[i], p); if (err) TEST_FAIL("Failed to stop consuming: %s\n", rd_kafka_err2str(err)); } rd_kafka_topic_destroy(rkts[i]); } rd_kafka_queue_destroy(rkq); rd_kafka_destroy(rk); return 0; }
int main_0040_io_event (int argc, char **argv) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf; rd_kafka_t *rk_p, *rk_c; const char *topic; rd_kafka_topic_t *rkt_p; rd_kafka_queue_t *queue; uint64_t testid; int msgcnt = 100; int recvd = 0; int fds[2]; int wait_multiplier = 1; struct pollfd pfd; int r; enum { _NOPE, _YEP, _REBALANCE } expecting_io = _REBALANCE; testid = test_id_generate(); topic = test_mk_topic_name(__FUNCTION__, 1); rk_p = test_create_producer(); rkt_p = test_create_producer_topic(rk_p, topic, NULL); test_auto_create_topic_rkt(rk_p, rkt_p); test_conf_init(&conf, &tconf, 0); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); test_conf_set(conf, "session.timeout.ms", "6000"); test_conf_set(conf, "enable.partition.eof", "false"); /* Speed up propagation of new topics */ test_conf_set(conf, "metadata.max.age.ms", "5000"); test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); rk_c = test_create_consumer(topic, NULL, conf, tconf); queue = rd_kafka_queue_get_consumer(rk_c); test_consumer_subscribe(rk_c, topic); #ifndef _MSC_VER r = pipe(fds); #else r = _pipe(fds, 2, _O_BINARY); #endif if (r == -1) TEST_FAIL("pipe() failed: %s\n", strerror(errno)); rd_kafka_queue_io_event_enable(queue, fds[1], "1", 1); pfd.fd = fds[0]; pfd.events = POLLIN; pfd.revents = 0; /** * 1) Wait for rebalance event * 2) Wait 1 interval (1s) expecting no IO (nothing produced). * 3) Produce half the messages * 4) Expect IO * 5) Consume the available messages * 6) Wait 1 interval expecting no IO. * 7) Produce remaing half * 8) Expect IO * 9) Done. */ while (recvd < msgcnt) { int r; #ifndef _MSC_VER r = poll(&pfd, 1, 1000 * wait_multiplier); #else r = WSAPoll(&pfd, 1, 1000 * wait_multiplier); #endif if (r == -1) { TEST_FAIL("poll() failed: %s", strerror(errno)); } else if (r == 1) { rd_kafka_event_t *rkev; char b; int eventcnt = 0; if (pfd.events & POLLERR) TEST_FAIL("Poll error\n"); if (!(pfd.events & POLLIN)) { TEST_SAY("Stray event 0x%x\n", (int)pfd.events); continue; } TEST_SAY("POLLIN\n"); /* Read signaling token to purge socket queue and * eventually silence POLLIN */ #ifndef _MSC_VER r = read(pfd.fd, &b, 1); #else r = _read((int)pfd.fd, &b, 1); #endif if (r == -1) TEST_FAIL("read failed: %s\n", strerror(errno)); if (!expecting_io) TEST_WARN("Got unexpected IO after %d/%d msgs\n", recvd, msgcnt); while ((rkev = rd_kafka_queue_poll(queue, 0))) { eventcnt++; switch (rd_kafka_event_type(rkev)) { case RD_KAFKA_EVENT_REBALANCE: TEST_SAY("Got %s: %s\n", rd_kafka_event_name(rkev), rd_kafka_err2str(rd_kafka_event_error(rkev))); if (expecting_io != _REBALANCE) TEST_FAIL("Got Rebalance when expecting message\n"); if (rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { rd_kafka_assign(rk_c, rd_kafka_event_topic_partition_list(rkev)); expecting_io = _NOPE; } else rd_kafka_assign(rk_c, NULL); break; case RD_KAFKA_EVENT_FETCH: if (expecting_io != _YEP) TEST_FAIL("Did not expect more messages at %d/%d\n", recvd, msgcnt); recvd++; if (recvd == (msgcnt / 2) || recvd == msgcnt) expecting_io = _NOPE; break; case RD_KAFKA_EVENT_ERROR: TEST_FAIL("Error: %s\n", rd_kafka_event_error_string(rkev)); break; default: TEST_SAY("Ignoring event %s\n", rd_kafka_event_name(rkev)); } rd_kafka_event_destroy(rkev); } TEST_SAY("%d events, Consumed %d/%d messages\n", eventcnt, recvd, msgcnt); wait_multiplier = 1; } else { if (expecting_io == _REBALANCE) { continue; } else if (expecting_io == _YEP) { TEST_FAIL("Did not see expected IO after %d/%d msgs\n", recvd, msgcnt); } TEST_SAY("IO poll timeout (good)\n"); TEST_SAY("Got idle period, producing\n"); test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, msgcnt/2, NULL, 10); expecting_io = _YEP; /* When running slowly (e.g., valgrind) it might take * some time before the first message is received * after producing. */ wait_multiplier = 3; } } TEST_SAY("Done\n"); rd_kafka_topic_destroy(rkt_p); rd_kafka_destroy(rk_p); rd_kafka_queue_destroy(queue); rd_kafka_consumer_close(rk_c); rd_kafka_destroy(rk_c); #ifndef _MSC_VER close(fds[0]); close(fds[1]); #else _close(fds[0]); _close(fds[1]); #endif return 0; }
static void consume_messages_with_queues (uint64_t testid, const char *topic, int partition_cnt, int msgcnt) { int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; rd_kafka_queue_t *rkqu; char errstr[512]; char msg[128]; int failcnt; int i; rd_kafka_message_t *rkmessages; int32_t partition; int cnt = 0; int batch_cnt = msgcnt / partition_cnt; test_conf_init(&conf, &topic_conf, 20); /* Create kafka instance */ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); /* Create queue */ rkqu = rd_kafka_queue_new(rk); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); TEST_SAY("Consuming %i messages from one queue serving %i partitions\n", msgcnt, partition_cnt); /* Start consuming each partition */ for (partition = 0 ; partition < partition_cnt ; partition++) { /* Consume messages */ TEST_SAY("Start consuming partition %i at offset -%i\n", partition, batch_cnt); if (rd_kafka_consume_start_queue(rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt), rkqu) == -1) TEST_FAIL("consume_start_queue(%i) failed: %s", (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); } /* Consume messages from queue */ for (i = 0 ; i < msgcnt ; i++) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consume_queue(rkqu, 5000); if (!rkmessage) TEST_FAIL("Failed to consume message %i/%i from " "queue: %s", i, msgcnt, rd_kafka_err2str(rd_kafka_errno2err(errno))); if (rkmessage->err) TEST_FAIL("Consume message %i/%i from queue " "has error (partition %"PRId32"): %s", i, msgcnt, rkmessage->partition, rd_kafka_err2str(rkmessage->err)); verify_consumed_msg(testid, -1, -1, rkmessage); rd_kafka_message_destroy(rkmessage); } /* Stop consuming each partition */ for (partition = 0 ; partition < partition_cnt ; partition++) rd_kafka_consume_stop(rkt, partition); /* Destroy queue */ rd_kafka_queue_destroy(rkqu); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); }