/** * Wait for REBALANCE ASSIGN event and perform assignment * * Va-args are \p topic_cnt tuples of the expected assignment: * { const char *topic, int partition_cnt } */ static void await_assignment (const char *pfx, rd_kafka_t *rk, rd_kafka_queue_t *queue, int topic_cnt, ...) { rd_kafka_event_t *rkev; rd_kafka_topic_partition_list_t *tps; int i; va_list ap; int fails = 0; int exp_part_cnt = 0; TEST_SAY("%s: waiting for assignment\n", pfx); rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000); if (!rkev) TEST_FAIL("timed out waiting for assignment"); TEST_ASSERT(rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, "expected ASSIGN, got %s", rd_kafka_err2str(rd_kafka_event_error(rkev))); tps = rd_kafka_event_topic_partition_list(rkev); TEST_SAY("%s: assignment:\n", pfx); test_print_partition_list(tps); va_start(ap, topic_cnt); for (i = 0 ; i < topic_cnt ; i++) { const char *topic = va_arg(ap, const char *); int partition_cnt = va_arg(ap, int); int p; TEST_SAY("%s: expecting %s with %d partitions\n", pfx, topic, partition_cnt); for (p = 0 ; p < partition_cnt ; p++) { if (!rd_kafka_topic_partition_list_find(tps, topic, p)) { TEST_FAIL_LATER("%s: expected partition %s [%d] " "not found in assginment", pfx, topic, p); fails++; } } exp_part_cnt += partition_cnt; } va_end(ap); TEST_ASSERT(exp_part_cnt == tps->cnt, "expected assignment of %d partitions, got %d", exp_part_cnt, tps->cnt); if (fails > 0) TEST_FAIL("%s: assignment mismatch: see above", pfx); rd_kafka_assign(rk, tps); rd_kafka_event_destroy(rkev); }
static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { msg_dr_cnt++; TEST_SAYL(3, "Delivery for message %.*s: %s\n", (int)rkmessage->len, (const char *)rkmessage->payload, rd_kafka_err2name(rkmessage->err)); if (rkmessage->err) { TEST_FAIL_LATER("Expected message to succeed, got %s", rd_kafka_err2str(rkmessage->err)); msg_dr_fail_cnt++; } }
int main_0004_conf (int argc, char **argv) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *ignore_conf, *conf, *conf2; rd_kafka_topic_conf_t *ignore_topic_conf, *tconf, *tconf2; char errstr[512]; const char **arr_orig, **arr_dup; size_t cnt_orig, cnt_dup; int i; const char *topic; static const char *gconfs[] = { "message.max.bytes", "12345", /* int property */ "client.id", "my id", /* string property */ "debug", "topic,metadata", /* S2F property */ "topic.blacklist", "__.*", /* #778 */ "auto.offset.reset", "earliest", /* Global->Topic fallthru */ #if WITH_ZLIB "compression.codec", "gzip", /* S2I property */ #endif NULL }; static const char *tconfs[] = { "request.required.acks", "-1", /* int */ "auto.commit.enable", "false", /* bool */ "auto.offset.reset", "error", /* S2I */ "offset.store.path", "my/path", /* string */ NULL }; test_conf_init(&ignore_conf, &ignore_topic_conf, 10); rd_kafka_conf_destroy(ignore_conf); rd_kafka_topic_conf_destroy(ignore_topic_conf); topic = test_mk_topic_name("0004", 0); /* Set up a global config object */ conf = rd_kafka_conf_new(); rd_kafka_conf_set_dr_cb(conf, dr_cb); rd_kafka_conf_set_error_cb(conf, error_cb); for (i = 0 ; gconfs[i] ; i += 2) { if (rd_kafka_conf_set(conf, gconfs[i], gconfs[i+1], errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); } /* Set up a topic config object */ tconf = rd_kafka_topic_conf_new(); rd_kafka_topic_conf_set_partitioner_cb(tconf, partitioner); rd_kafka_topic_conf_set_opaque(tconf, (void *)0xbeef); for (i = 0 ; tconfs[i] ; i += 2) { if (rd_kafka_topic_conf_set(tconf, tconfs[i], tconfs[i+1], errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); } /* Verify global config */ arr_orig = rd_kafka_conf_dump(conf, &cnt_orig); conf_verify(__LINE__, arr_orig, cnt_orig, gconfs); /* Verify copied global config */ conf2 = rd_kafka_conf_dup(conf); arr_dup = rd_kafka_conf_dump(conf2, &cnt_dup); conf_verify(__LINE__, arr_dup, cnt_dup, gconfs); conf_cmp("global", arr_orig, cnt_orig, arr_dup, cnt_dup); rd_kafka_conf_dump_free(arr_orig, cnt_orig); rd_kafka_conf_dump_free(arr_dup, cnt_dup); /* Verify topic config */ arr_orig = rd_kafka_topic_conf_dump(tconf, &cnt_orig); conf_verify(__LINE__, arr_orig, cnt_orig, tconfs); /* Verify copied topic config */ tconf2 = rd_kafka_topic_conf_dup(tconf); arr_dup = rd_kafka_topic_conf_dump(tconf2, &cnt_dup); conf_verify(__LINE__, arr_dup, cnt_dup, tconfs); conf_cmp("topic", arr_orig, cnt_orig, arr_dup, cnt_dup); rd_kafka_conf_dump_free(arr_orig, cnt_orig); rd_kafka_conf_dump_free(arr_dup, cnt_dup); /* * Create kafka instances using original and copied confs */ /* original */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, topic, tconf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); /* copied */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf2); rkt = rd_kafka_topic_new(rk, topic, tconf2); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); /* Incremental S2F property. * NOTE: The order of fields returned in get() is hardcoded here. */ { static const char *s2fs[] = { "generic,broker,queue,cgrp", "generic,broker,queue,cgrp", "-broker,+queue,topic", "generic,topic,queue,cgrp", "-all,security,-fetch,+metadata", "metadata,security", NULL }; TEST_SAY("Incremental S2F tests\n"); conf = rd_kafka_conf_new(); for (i = 0 ; s2fs[i] ; i += 2) { const char *val; TEST_SAY(" Set: %s\n", s2fs[i]); test_conf_set(conf, "debug", s2fs[i]); val = test_conf_get(conf, "debug"); TEST_SAY(" Now: %s\n", val); if (strcmp(val, s2fs[i+1])) TEST_FAIL_LATER("\n" "Expected: %s\n" " Got: %s", s2fs[i+1], val); } rd_kafka_conf_destroy(conf); } /* Canonical int values, aliases, s2i-verified strings */ { static const struct { const char *prop; const char *val; const char *exp; int is_global; } props[] = { { "request.required.acks", "0", "0" }, { "request.required.acks", "-1", "-1" }, { "request.required.acks", "1", "1" }, { "acks", "3", "3" }, /* alias test */ { "request.required.acks", "393", "393" }, { "request.required.acks", "bad", NULL }, { "request.required.acks", "all", "-1" }, { "request.required.acks", "all", "-1", 1/*fallthru*/ }, { "acks", "0", "0" }, /* alias test */ #if WITH_SASL { "sasl.mechanisms", "GSSAPI", "GSSAPI", 1 }, { "sasl.mechanisms", "PLAIN", "PLAIN", 1 }, { "sasl.mechanisms", "GSSAPI,PLAIN", NULL, 1 }, { "sasl.mechanisms", "", NULL, 1 }, #endif { NULL } }; TEST_SAY("Canonical tests\n"); tconf = rd_kafka_topic_conf_new(); conf = rd_kafka_conf_new(); for (i = 0 ; props[i].prop ; i++) { char dest[64]; size_t destsz; rd_kafka_conf_res_t res; TEST_SAY(" Set: %s=%s expect %s (%s)\n", props[i].prop, props[i].val, props[i].exp, props[i].is_global ? "global":"topic"); /* Set value */ if (props[i].is_global) res = rd_kafka_conf_set(conf, props[i].prop, props[i].val, errstr, sizeof(errstr)); else res = rd_kafka_topic_conf_set(tconf, props[i].prop, props[i].val, errstr, sizeof(errstr)); if ((res == RD_KAFKA_CONF_OK ? 1:0) != (props[i].exp ? 1:0)) TEST_FAIL("Expected %s, got %s", props[i].exp ? "success" : "failure", (res == RD_KAFKA_CONF_OK ? "OK" : (res == RD_KAFKA_CONF_INVALID ? "INVALID" : "UNKNOWN"))); if (!props[i].exp) continue; /* Get value and compare to expected result */ destsz = sizeof(dest); if (props[i].is_global) res = rd_kafka_conf_get(conf, props[i].prop, dest, &destsz); else res = rd_kafka_topic_conf_get(tconf, props[i].prop, dest, &destsz); TEST_ASSERT(res == RD_KAFKA_CONF_OK, ".._conf_get(%s) returned %d", props[i].prop, res); TEST_ASSERT(!strcmp(props[i].exp, dest), "Expected \"%s\", got \"%s\"", props[i].exp, dest); } rd_kafka_topic_conf_destroy(tconf); rd_kafka_conf_destroy(conf); } return 0; }
/** * @brief Test DescribeConfigs */ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { #define MY_CONFRES_CNT 3 char *topics[MY_CONFRES_CNT]; rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT]; rd_kafka_AdminOptions_t *options; rd_kafka_resp_err_t exp_err[MY_CONFRES_CNT]; rd_kafka_event_t *rkev; rd_kafka_resp_err_t err; const rd_kafka_DescribeConfigs_result_t *res; const rd_kafka_ConfigResource_t **rconfigs; size_t rconfig_cnt; char errstr[128]; const char *errstr2; int ci = 0; int i; int fails = 0; /* * Only create one topic, the others will be non-existent. */ rd_strdupa(&topics[0], test_mk_topic_name("DescribeConfigs_exist", 1)); for (i = 1 ; i < MY_CONFRES_CNT ; i++) rd_strdupa(&topics[i], test_mk_topic_name("DescribeConfigs_notexist", 1)); test_CreateTopics_simple(rk, NULL, topics, 1, 1, NULL); /* * ConfigResource #0: topic config, no config entries. */ configs[ci] = rd_kafka_ConfigResource_new( RD_KAFKA_RESOURCE_TOPIC, topics[ci]); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; /* * ConfigResource #1:broker config, no config entries */ configs[ci] = rd_kafka_ConfigResource_new( RD_KAFKA_RESOURCE_BROKER, tsprintf("%"PRId32, avail_brokers[0])); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; /* * ConfigResource #2: topic config, non-existent topic, no config entr. */ configs[ci] = rd_kafka_ConfigResource_new( RD_KAFKA_RESOURCE_TOPIC, topics[ci]); /* FIXME: This is a bug in the broker (<v2.0.0), it returns a full response * for unknown topics. * https://issues.apache.org/jira/browse/KAFKA-6778 */ if (test_broker_version < TEST_BRKVER(2,0,0,0)) exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; else exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; ci++; /* * Timeout options */ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); err = rd_kafka_AdminOptions_set_request_timeout(options, 10000, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); /* * Fire off request */ rd_kafka_DescribeConfigs(rk, configs, ci, options, rkqu); rd_kafka_AdminOptions_destroy(options); /* * Wait for result */ rkev = test_wait_admin_result(rkqu, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 10000+1000); /* * Extract result */ res = rd_kafka_event_DescribeConfigs_result(rkev); TEST_ASSERT(res, "Expected DescribeConfigs result, not %s", rd_kafka_event_name(rkev)); err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(!err, "Expected success, not %s: %s", rd_kafka_err2name(err), errstr2); rconfigs = rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt); TEST_ASSERT((int)rconfig_cnt == ci, "Expected %d result resources, got %"PRIusz"\n", ci, rconfig_cnt); /* * Verify status per resource */ for (i = 0 ; i < (int)rconfig_cnt ; i++) { const rd_kafka_ConfigEntry_t **entries; size_t entry_cnt; err = rd_kafka_ConfigResource_error(rconfigs[i]); errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[i]); entries = rd_kafka_ConfigResource_configs(rconfigs[i], &entry_cnt); TEST_SAY("ConfigResource #%d: type %s (%d), \"%s\": " "%"PRIusz" ConfigEntries, error %s (%s)\n", i, rd_kafka_ResourceType_name( rd_kafka_ConfigResource_type(rconfigs[i])), rd_kafka_ConfigResource_type(rconfigs[i]), rd_kafka_ConfigResource_name(rconfigs[i]), entry_cnt, rd_kafka_err2name(err), errstr2 ? errstr2 : ""); test_print_ConfigEntry_array(entries, entry_cnt, 1); if (rd_kafka_ConfigResource_type(rconfigs[i]) != rd_kafka_ConfigResource_type(configs[i]) || strcmp(rd_kafka_ConfigResource_name(rconfigs[i]), rd_kafka_ConfigResource_name(configs[i]))) { TEST_FAIL_LATER( "ConfigResource #%d: " "expected type %s name %s, " "got type %s name %s", i, rd_kafka_ResourceType_name(rd_kafka_ConfigResource_type(configs[i])), rd_kafka_ConfigResource_name(configs[i]), rd_kafka_ResourceType_name(rd_kafka_ConfigResource_type(rconfigs[i])), rd_kafka_ConfigResource_name(rconfigs[i])); fails++; continue; } if (err != exp_err[i]) { TEST_FAIL_LATER("ConfigResource #%d: " "expected %s (%d), got %s (%s)", i, rd_kafka_err2name(exp_err[i]), exp_err[i], rd_kafka_err2name(err), errstr2 ? errstr2 : ""); fails++; } } TEST_ASSERT(!fails, "See %d previous failure(s)", fails); rd_kafka_event_destroy(rkev); rd_kafka_ConfigResource_destroy_array(configs, ci); #undef MY_CONFRES_CNT }
/** * @brief Test AlterConfigs */ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { #define MY_CONFRES_CNT 3 char *topics[MY_CONFRES_CNT]; rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT]; rd_kafka_AdminOptions_t *options; rd_kafka_resp_err_t exp_err[MY_CONFRES_CNT]; rd_kafka_event_t *rkev; rd_kafka_resp_err_t err; const rd_kafka_AlterConfigs_result_t *res; const rd_kafka_ConfigResource_t **rconfigs; size_t rconfig_cnt; char errstr[128]; const char *errstr2; int ci = 0; int i; int fails = 0; /* * Only create one topic, the others will be non-existent. */ for (i = 0 ; i < MY_CONFRES_CNT ; i++) rd_strdupa(&topics[i], test_mk_topic_name(__FUNCTION__, 1)); test_CreateTopics_simple(rk, NULL, topics, 1, 1, NULL); /* * ConfigResource #0: valid topic config */ configs[ci] = rd_kafka_ConfigResource_new( RD_KAFKA_RESOURCE_TOPIC, topics[ci]); err = rd_kafka_ConfigResource_set_config(configs[ci], "compression.type", "gzip"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_ConfigResource_set_config(configs[ci], "flush.ms", "12345678"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; if (test_broker_version >= TEST_BRKVER(1, 1, 0, 0)) { /* * ConfigResource #1: valid broker config */ configs[ci] = rd_kafka_ConfigResource_new( RD_KAFKA_RESOURCE_BROKER, tsprintf("%"PRId32, avail_brokers[0])); err = rd_kafka_ConfigResource_set_config( configs[ci], "sasl.kerberos.min.time.before.relogin", "58000"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; } else { TEST_WARN("Skipping RESOURCE_BROKER test on unsupported " "broker version\n"); } /* * ConfigResource #2: valid topic config, non-existent topic */ configs[ci] = rd_kafka_ConfigResource_new( RD_KAFKA_RESOURCE_TOPIC, topics[ci]); err = rd_kafka_ConfigResource_set_config(configs[ci], "compression.type", "lz4"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_ConfigResource_set_config(configs[ci], "offset.metadata.max.bytes", "12345"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN; ci++; /* * Timeout options */ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ALTERCONFIGS); err = rd_kafka_AdminOptions_set_request_timeout(options, 10000, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); /* * Fire off request */ rd_kafka_AlterConfigs(rk, configs, ci, options, rkqu); rd_kafka_AdminOptions_destroy(options); /* * Wait for result */ rkev = test_wait_admin_result(rkqu, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, 10000+1000); /* * Extract result */ res = rd_kafka_event_AlterConfigs_result(rkev); TEST_ASSERT(res, "Expected AlterConfigs result, not %s", rd_kafka_event_name(rkev)); err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(!err, "Expected success, not %s: %s", rd_kafka_err2name(err), errstr2); rconfigs = rd_kafka_AlterConfigs_result_resources(res, &rconfig_cnt); TEST_ASSERT((int)rconfig_cnt == ci, "Expected %d result resources, got %"PRIusz"\n", ci, rconfig_cnt); /* * Verify status per resource */ for (i = 0 ; i < (int)rconfig_cnt ; i++) { const rd_kafka_ConfigEntry_t **entries; size_t entry_cnt; err = rd_kafka_ConfigResource_error(rconfigs[i]); errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[i]); entries = rd_kafka_ConfigResource_configs(rconfigs[i], &entry_cnt); TEST_SAY("ConfigResource #%d: type %s (%d), \"%s\": " "%"PRIusz" ConfigEntries, error %s (%s)\n", i, rd_kafka_ResourceType_name( rd_kafka_ConfigResource_type(rconfigs[i])), rd_kafka_ConfigResource_type(rconfigs[i]), rd_kafka_ConfigResource_name(rconfigs[i]), entry_cnt, rd_kafka_err2name(err), errstr2 ? errstr2 : ""); test_print_ConfigEntry_array(entries, entry_cnt, 1); if (rd_kafka_ConfigResource_type(rconfigs[i]) != rd_kafka_ConfigResource_type(configs[i]) || strcmp(rd_kafka_ConfigResource_name(rconfigs[i]), rd_kafka_ConfigResource_name(configs[i]))) { TEST_FAIL_LATER( "ConfigResource #%d: " "expected type %s name %s, " "got type %s name %s", i, rd_kafka_ResourceType_name(rd_kafka_ConfigResource_type(configs[i])), rd_kafka_ConfigResource_name(configs[i]), rd_kafka_ResourceType_name(rd_kafka_ConfigResource_type(rconfigs[i])), rd_kafka_ConfigResource_name(rconfigs[i])); fails++; continue; } if (err != exp_err[i]) { TEST_FAIL_LATER("ConfigResource #%d: " "expected %s (%d), got %s (%s)", i, rd_kafka_err2name(exp_err[i]), exp_err[i], rd_kafka_err2name(err), errstr2 ? errstr2 : ""); fails++; } } TEST_ASSERT(!fails, "See %d previous failure(s)", fails); rd_kafka_event_destroy(rkev); rd_kafka_ConfigResource_destroy_array(configs, ci); #undef MY_CONFRES_CNT }
static void do_test_CreateTopics (const char *what, rd_kafka_t *rk, rd_kafka_queue_t *useq, int op_timeout, rd_bool_t validate_only) { rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); #define MY_NEW_TOPICS_CNT 6 char *topics[MY_NEW_TOPICS_CNT]; rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT]; rd_kafka_AdminOptions_t *options = NULL; rd_kafka_resp_err_t exp_topicerr[MY_NEW_TOPICS_CNT] = {0}; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Expected topics in metadata */ rd_kafka_metadata_topic_t exp_mdtopics[MY_NEW_TOPICS_CNT] = {{0}}; int exp_mdtopic_cnt = 0; /* Not expected topics in metadata */ rd_kafka_metadata_topic_t exp_not_mdtopics[MY_NEW_TOPICS_CNT] = {{0}}; int exp_not_mdtopic_cnt = 0; int i; char errstr[512]; const char *errstr2; rd_kafka_resp_err_t err; test_timing_t timing; rd_kafka_event_t *rkev; const rd_kafka_CreateTopics_result_t *res; const rd_kafka_topic_result_t **restopics; size_t restopic_cnt; int metadata_tmout ; int num_replicas = (int)avail_broker_cnt; int32_t *replicas; /* Set up replicas */ replicas = rd_alloca(sizeof(*replicas) * num_replicas); for (i = 0 ; i < num_replicas ; i++) replicas[i] = avail_brokers[i]; TEST_SAY(_C_MAG "[ %s CreateTopics with %s, " "op_timeout %d, validate_only %d ]\n", rd_kafka_name(rk), what, op_timeout, validate_only); /** * Construct NewTopic array with different properties for * different partitions. */ for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); int num_parts = i * 7 + 1; int set_config = (i & 1); int add_invalid_config = (i == 1); int set_replicas = !(i % 3); rd_kafka_resp_err_t this_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; topics[i] = topic; new_topics[i] = rd_kafka_NewTopic_new(topic, num_parts, set_replicas ? -1 : num_replicas, NULL, 0); if (set_config) { /* * Add various configuration properties */ err = rd_kafka_NewTopic_set_config( new_topics[i], "compression.type", "lz4"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_NewTopic_set_config( new_topics[i], "delete.retention.ms", "900"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } if (add_invalid_config) { /* Add invalid config property */ err = rd_kafka_NewTopic_set_config( new_topics[i], "dummy.doesntexist", "broker is verifying this"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); this_exp_err = RD_KAFKA_RESP_ERR_INVALID_CONFIG; } TEST_SAY("Expected result for topic #%d: %s " "(set_config=%d, add_invalid_config=%d, " "set_replicas=%d)\n", i, rd_kafka_err2name(this_exp_err), set_config, add_invalid_config, set_replicas); if (set_replicas) { int32_t p; /* * Set valid replica assignments */ for (p = 0 ; p < num_parts ; p++) { err = rd_kafka_NewTopic_set_replica_assignment( new_topics[i], p, replicas, num_replicas, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); } } if (this_exp_err || validate_only) { exp_topicerr[i] = this_exp_err; exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic; } else { exp_mdtopics[exp_mdtopic_cnt].topic = topic; exp_mdtopics[exp_mdtopic_cnt].partition_cnt = num_parts; exp_mdtopic_cnt++; } } if (op_timeout != -1 || validate_only) { options = rd_kafka_AdminOptions_new( rk, RD_KAFKA_ADMIN_OP_CREATETOPICS); if (op_timeout != -1) { err = rd_kafka_AdminOptions_set_operation_timeout( options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } if (validate_only) { err = rd_kafka_AdminOptions_set_validate_only( options, validate_only, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } } TIMING_START(&timing, "CreateTopics"); TEST_SAY("Call CreateTopics\n"); rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); /* Poll result queue for CreateTopics result. * Print but otherwise ignore other event types * (typically generic Error events). */ TIMING_START(&timing, "CreateTopics.queue_poll"); do { rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000)); TEST_SAY("CreateTopics: got %s in %.3fms\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); if (rd_kafka_event_error(rkev)) TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); } while (rd_kafka_event_type(rkev) != RD_KAFKA_EVENT_CREATETOPICS_RESULT); /* Convert event to proper result */ res = rd_kafka_event_CreateTopics_result(rkev); TEST_ASSERT(res, "expected CreateTopics_result, not %s", rd_kafka_event_name(rkev)); /* Expecting error */ err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == exp_err, "expected CreateTopics to return %s, not %s (%s)", rd_kafka_err2str(exp_err), rd_kafka_err2str(err), err ? errstr2 : "n/a"); TEST_SAY("CreateTopics: returned %s (%s)\n", rd_kafka_err2str(err), err ? errstr2 : "n/a"); /* Extract topics */ restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt); /* Scan topics for proper fields and expected failures. */ for (i = 0 ; i < (int)restopic_cnt ; i++) { const rd_kafka_topic_result_t *terr = restopics[i]; /* Verify that topic order matches our request. */ if (strcmp(rd_kafka_topic_result_name(terr), topics[i])) TEST_FAIL_LATER("Topic result order mismatch at #%d: " "expected %s, got %s", i, topics[i], rd_kafka_topic_result_name(terr)); TEST_SAY("CreateTopics result: #%d: %s: %s: %s\n", i, rd_kafka_topic_result_name(terr), rd_kafka_err2name(rd_kafka_topic_result_error(terr)), rd_kafka_topic_result_error_string(terr)); if (rd_kafka_topic_result_error(terr) != exp_topicerr[i]) TEST_FAIL_LATER( "Expected %s, not %d: %s", rd_kafka_err2name(exp_topicerr[i]), rd_kafka_topic_result_error(terr), rd_kafka_err2name(rd_kafka_topic_result_error( terr))); } /** * Verify that the expecteded topics are created and the non-expected * are not. Allow it some time to propagate. */ if (validate_only) { /* No topics should have been created, give it some time * before checking. */ rd_sleep(2); metadata_tmout = 5 * 1000; } else { if (op_timeout > 0) metadata_tmout = op_timeout + 1000; else metadata_tmout = 10 * 1000; } test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, exp_not_mdtopics, exp_not_mdtopic_cnt, metadata_tmout); rd_kafka_event_destroy(rkev); for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { rd_kafka_NewTopic_destroy(new_topics[i]); rd_free(topics[i]); } if (options) rd_kafka_AdminOptions_destroy(options); if (!useq) rd_kafka_queue_destroy(q); #undef MY_NEW_TOPICS_CNT }
/** * @brief Test deletion of topics * * */ static void do_test_DeleteTopics (const char *what, rd_kafka_t *rk, rd_kafka_queue_t *useq, int op_timeout) { rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); const int skip_topic_cnt = 2; #define MY_DEL_TOPICS_CNT 9 char *topics[MY_DEL_TOPICS_CNT]; rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT]; rd_kafka_AdminOptions_t *options = NULL; rd_kafka_resp_err_t exp_topicerr[MY_DEL_TOPICS_CNT] = {0}; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Expected topics in metadata */ rd_kafka_metadata_topic_t exp_mdtopics[MY_DEL_TOPICS_CNT] = {{0}}; int exp_mdtopic_cnt = 0; /* Not expected topics in metadata */ rd_kafka_metadata_topic_t exp_not_mdtopics[MY_DEL_TOPICS_CNT] = {{0}}; int exp_not_mdtopic_cnt = 0; int i; char errstr[512]; const char *errstr2; rd_kafka_resp_err_t err; test_timing_t timing; rd_kafka_event_t *rkev; const rd_kafka_DeleteTopics_result_t *res; const rd_kafka_topic_result_t **restopics; size_t restopic_cnt; int metadata_tmout; TEST_SAY(_C_MAG "[ %s DeleteTopics with %s, op_timeout %d ]\n", rd_kafka_name(rk), what, op_timeout); /** * Construct DeleteTopic array */ for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) { char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); int notexist_topic = i >= MY_DEL_TOPICS_CNT - skip_topic_cnt; topics[i] = topic; del_topics[i] = rd_kafka_DeleteTopic_new(topic); if (notexist_topic) exp_topicerr[i] = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; else { exp_topicerr[i] = RD_KAFKA_RESP_ERR_NO_ERROR; exp_mdtopics[exp_mdtopic_cnt++].topic = topic; } exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic; } if (op_timeout != -1) { options = rd_kafka_AdminOptions_new( rk, RD_KAFKA_ADMIN_OP_ANY); err = rd_kafka_AdminOptions_set_operation_timeout( options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } /* Create the topics first, minus the skip count. */ test_CreateTopics_simple(rk, NULL, topics, MY_DEL_TOPICS_CNT-skip_topic_cnt, 2/*num_partitions*/, NULL); /* Verify that topics are reported by metadata */ test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, 15*1000); TIMING_START(&timing, "DeleteTopics"); TEST_SAY("Call DeleteTopics\n"); rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); /* Poll result queue for DeleteTopics result. * Print but otherwise ignore other event types * (typically generic Error events). */ TIMING_START(&timing, "DeleteTopics.queue_poll"); while (1) { rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000)); TEST_SAY("DeleteTopics: got %s in %.3fms\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); if (rd_kafka_event_error(rkev)) TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); if (rd_kafka_event_type(rkev) == RD_KAFKA_EVENT_DELETETOPICS_RESULT) break; rd_kafka_event_destroy(rkev); } /* Convert event to proper result */ res = rd_kafka_event_DeleteTopics_result(rkev); TEST_ASSERT(res, "expected DeleteTopics_result, not %s", rd_kafka_event_name(rkev)); /* Expecting error */ err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == exp_err, "expected DeleteTopics to return %s, not %s (%s)", rd_kafka_err2str(exp_err), rd_kafka_err2str(err), err ? errstr2 : "n/a"); TEST_SAY("DeleteTopics: returned %s (%s)\n", rd_kafka_err2str(err), err ? errstr2 : "n/a"); /* Extract topics */ restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt); /* Scan topics for proper fields and expected failures. */ for (i = 0 ; i < (int)restopic_cnt ; i++) { const rd_kafka_topic_result_t *terr = restopics[i]; /* Verify that topic order matches our request. */ if (strcmp(rd_kafka_topic_result_name(terr), topics[i])) TEST_FAIL_LATER("Topic result order mismatch at #%d: " "expected %s, got %s", i, topics[i], rd_kafka_topic_result_name(terr)); TEST_SAY("DeleteTopics result: #%d: %s: %s: %s\n", i, rd_kafka_topic_result_name(terr), rd_kafka_err2name(rd_kafka_topic_result_error(terr)), rd_kafka_topic_result_error_string(terr)); if (rd_kafka_topic_result_error(terr) != exp_topicerr[i]) TEST_FAIL_LATER( "Expected %s, not %d: %s", rd_kafka_err2name(exp_topicerr[i]), rd_kafka_topic_result_error(terr), rd_kafka_err2name(rd_kafka_topic_result_error( terr))); } /** * Verify that the expected topics are deleted and the non-expected * are not. Allow it some time to propagate. */ if (op_timeout > 0) metadata_tmout = op_timeout + 1000; else metadata_tmout = 10 * 1000; test_wait_metadata_update(rk, NULL, 0, exp_not_mdtopics, exp_not_mdtopic_cnt, metadata_tmout); rd_kafka_event_destroy(rkev); for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) { rd_kafka_DeleteTopic_destroy(del_topics[i]); rd_free(topics[i]); } if (options) rd_kafka_AdminOptions_destroy(options); if (!useq) rd_kafka_queue_destroy(q); #undef MY_DEL_TOPICS_CNT }
/** * @brief Test AdminOptions */ static void do_test_options (rd_kafka_t *rk) { #define _all_apis { RD_KAFKA_ADMIN_OP_CREATETOPICS, \ RD_KAFKA_ADMIN_OP_DELETETOPICS, \ RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, \ RD_KAFKA_ADMIN_OP_ALTERCONFIGS, \ RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, \ RD_KAFKA_ADMIN_OP_ANY /* Must be last */} struct { const char *setter; const rd_kafka_admin_op_t valid_apis[8]; } matrix[] = { { "request_timeout", _all_apis }, { "operation_timeout", { RD_KAFKA_ADMIN_OP_CREATETOPICS, RD_KAFKA_ADMIN_OP_DELETETOPICS, RD_KAFKA_ADMIN_OP_CREATEPARTITIONS } }, { "validate_only", { RD_KAFKA_ADMIN_OP_CREATETOPICS, RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, RD_KAFKA_ADMIN_OP_ALTERCONFIGS } }, { "broker", _all_apis }, { "opaque", _all_apis }, { NULL }, }; int i; rd_kafka_AdminOptions_t *options; for (i = 0 ; matrix[i].setter ; i++) { static const rd_kafka_admin_op_t all_apis[] = _all_apis; const rd_kafka_admin_op_t *for_api; for (for_api = all_apis ; ; for_api++) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; char errstr[512]; int fi; options = rd_kafka_AdminOptions_new(rk, *for_api); TEST_ASSERT(options, "AdminOptions_new(%d) failed", *for_api); if (!strcmp(matrix[i].setter, "request_timeout")) err = rd_kafka_AdminOptions_set_request_timeout( options, 1234, errstr, sizeof(errstr)); else if (!strcmp(matrix[i].setter, "operation_timeout")) err = rd_kafka_AdminOptions_set_operation_timeout( options, 12345, errstr, sizeof(errstr)); else if (!strcmp(matrix[i].setter, "validate_only")) err = rd_kafka_AdminOptions_set_validate_only( options, 1, errstr, sizeof(errstr)); else if (!strcmp(matrix[i].setter, "broker")) err = rd_kafka_AdminOptions_set_broker( options, 5, errstr, sizeof(errstr)); else if (!strcmp(matrix[i].setter, "opaque")) { rd_kafka_AdminOptions_set_opaque( options, (void *)options); err = RD_KAFKA_RESP_ERR_NO_ERROR; } else TEST_FAIL("Invalid setter: %s", matrix[i].setter); TEST_SAYL(3, "AdminOptions_set_%s on " "RD_KAFKA_ADMIN_OP_%d options " "returned %s: %s\n", matrix[i].setter, *for_api, rd_kafka_err2name(err), err ? errstr : "success"); /* Scan matrix valid_apis to see if this * setter should be accepted or not. */ if (exp_err) { /* An expected error is already set */ } else if (*for_api != RD_KAFKA_ADMIN_OP_ANY) { exp_err = RD_KAFKA_RESP_ERR__INVALID_ARG; for (fi = 0 ; matrix[i].valid_apis[fi] ; fi++) { if (matrix[i].valid_apis[fi] == *for_api) exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; } } else { exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; } if (err != exp_err) TEST_FAIL_LATER("Expected AdminOptions_set_%s " "for RD_KAFKA_ADMIN_OP_%d " "options to return %s, " "not %s", matrix[i].setter, *for_api, rd_kafka_err2name(exp_err), rd_kafka_err2name(err)); rd_kafka_AdminOptions_destroy(options); if (*for_api == RD_KAFKA_ADMIN_OP_ANY) break; /* This was the last one */ } } /* Try an invalid for_api */ options = rd_kafka_AdminOptions_new(rk, (rd_kafka_admin_op_t)1234); TEST_ASSERT(!options, "Expectred AdminOptions_new() to fail " "with an invalid for_api, didn't."); TEST_LATER_CHECK(); }