static ssize_t render_callback (const char *key, char *buf, size_t size, void *opaque) { rd_kafka_broker_t *rkb = opaque; if (!strcmp(key, "broker.name")) { char *val, *t; size_t len; rd_kafka_broker_lock(rkb); rd_strdupa(&val, rkb->rkb_nodename); rd_kafka_broker_unlock(rkb); /* Just the broker name, no port */ if ((t = strchr(val, ':'))) len = (size_t)(t-val); else len = strlen(val); if (buf) memcpy(buf, val, RD_MIN(len, size)); return len; } else { rd_kafka_conf_res_t res; size_t destsize = size; /* Try config lookup. */ res = rd_kafka_conf_get(&rkb->rkb_rk->rk_conf, key, buf, &destsize); if (res != RD_KAFKA_CONF_OK) return -1; /* Dont include \0 in returned size */ return (destsize > 0 ? destsize-1 : destsize); } }
/** * Initialize and start SASL authentication. * * Returns 0 on successful init and -1 on error. * * Locality: broker thread */ int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, char *errstr, int errstr_size) { int r; rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; rd_kafka_t *rk = rkb->rkb_rk; char *hostname, *t; sasl_callback_t callbacks[16] = { // { SASL_CB_GETOPT, (void *)rd_kafka_sasl_cb_getopt, rktrans }, { SASL_CB_LOG, (void *)rd_kafka_sasl_cb_log, rktrans }, { SASL_CB_AUTHNAME, (void *)rd_kafka_sasl_cb_getsimple, rktrans }, { SASL_CB_PASS, (void *)rd_kafka_sasl_cb_getsecret, rktrans }, { SASL_CB_ECHOPROMPT, (void *)rd_kafka_sasl_cb_chalprompt, rktrans }, { SASL_CB_GETREALM, (void *)rd_kafka_sasl_cb_getrealm, rktrans }, { SASL_CB_CANON_USER, (void *)rd_kafka_sasl_cb_canon, rktrans }, { SASL_CB_LIST_END } }; /* SASL_CB_USER is needed for PLAIN but breaks GSSAPI */ if (!strcmp(rk->rk_conf.sasl.service_name, "PLAIN")) { int endidx; /* Find end of callbacks array */ for (endidx = 0 ; callbacks[endidx].id != SASL_CB_LIST_END ; endidx++) ; callbacks[endidx].id = SASL_CB_USER; callbacks[endidx].proc = (void *)rd_kafka_sasl_cb_getsimple; endidx++; callbacks[endidx].id = SASL_CB_LIST_END; } rd_strdupa(&hostname, rktrans->rktrans_rkb->rkb_nodename); if ((t = strchr(hostname, ':'))) *t = '\0'; /* remove ":port" */ rd_rkb_dbg(rkb, SECURITY, "SASL", "Initializing SASL client: service name %s, " "hostname %s, mechanisms %s", rk->rk_conf.sasl.service_name, hostname, rk->rk_conf.sasl.mechanisms); /* Acquire or refresh ticket if kinit is configured */ rd_kafka_sasl_kinit_refresh(rkb); r = sasl_client_new(rk->rk_conf.sasl.service_name, hostname, NULL, NULL, /* no local & remote IP checks */ callbacks, 0, &rktrans->rktrans_sasl.conn); if (r != SASL_OK) { rd_snprintf(errstr, errstr_size, "%s", sasl_errstring(r, NULL, NULL)); return -1; } if (rk->rk_conf.debug & RD_KAFKA_DBG_SECURITY) { const char *avail_mechs; sasl_listmech(rktrans->rktrans_sasl.conn, NULL, NULL, " ", NULL, &avail_mechs, NULL, NULL); rd_rkb_dbg(rkb, SECURITY, "SASL", "My supported SASL mechanisms: %s", avail_mechs); } rd_kafka_transport_poll_set(rktrans, POLLIN); do { const char *out; unsigned int outlen; const char *mech = NULL; r = sasl_client_start(rktrans->rktrans_sasl.conn, rk->rk_conf.sasl.mechanisms, NULL, &out, &outlen, &mech); if (r >= 0) if (rd_kafka_sasl_send(rktrans, out, outlen, errstr, errstr_size)) return -1; } while (r == SASL_INTERACT); if (r == SASL_OK) { /* PLAIN is appearantly done here, but we still need to make sure * the PLAIN frame is sent and we get a response back (but we must * not pass the response to libsasl or it will fail). */ rktrans->rktrans_sasl.complete = 1; return 0; } else if (r != SASL_CONTINUE) { rd_snprintf(errstr, errstr_size, "SASL handshake failed (start (%d)): %s", r, sasl_errdetail(rktrans->rktrans_sasl.conn)); return -1; } return 0; }
/** * @brief Test DescribeConfigs */ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { #define MY_CONFRES_CNT 3 char *topics[MY_CONFRES_CNT]; rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT]; rd_kafka_AdminOptions_t *options; rd_kafka_resp_err_t exp_err[MY_CONFRES_CNT]; rd_kafka_event_t *rkev; rd_kafka_resp_err_t err; const rd_kafka_DescribeConfigs_result_t *res; const rd_kafka_ConfigResource_t **rconfigs; size_t rconfig_cnt; char errstr[128]; const char *errstr2; int ci = 0; int i; int fails = 0; /* * Only create one topic, the others will be non-existent. */ rd_strdupa(&topics[0], test_mk_topic_name("DescribeConfigs_exist", 1)); for (i = 1 ; i < MY_CONFRES_CNT ; i++) rd_strdupa(&topics[i], test_mk_topic_name("DescribeConfigs_notexist", 1)); test_CreateTopics_simple(rk, NULL, topics, 1, 1, NULL); /* * ConfigResource #0: topic config, no config entries. */ configs[ci] = rd_kafka_ConfigResource_new( RD_KAFKA_RESOURCE_TOPIC, topics[ci]); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; /* * ConfigResource #1:broker config, no config entries */ configs[ci] = rd_kafka_ConfigResource_new( RD_KAFKA_RESOURCE_BROKER, tsprintf("%"PRId32, avail_brokers[0])); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; /* * ConfigResource #2: topic config, non-existent topic, no config entr. */ configs[ci] = rd_kafka_ConfigResource_new( RD_KAFKA_RESOURCE_TOPIC, topics[ci]); /* FIXME: This is a bug in the broker (<v2.0.0), it returns a full response * for unknown topics. * https://issues.apache.org/jira/browse/KAFKA-6778 */ if (test_broker_version < TEST_BRKVER(2,0,0,0)) exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; else exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; ci++; /* * Timeout options */ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); err = rd_kafka_AdminOptions_set_request_timeout(options, 10000, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); /* * Fire off request */ rd_kafka_DescribeConfigs(rk, configs, ci, options, rkqu); rd_kafka_AdminOptions_destroy(options); /* * Wait for result */ rkev = test_wait_admin_result(rkqu, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 10000+1000); /* * Extract result */ res = rd_kafka_event_DescribeConfigs_result(rkev); TEST_ASSERT(res, "Expected DescribeConfigs result, not %s", rd_kafka_event_name(rkev)); err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(!err, "Expected success, not %s: %s", rd_kafka_err2name(err), errstr2); rconfigs = rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt); TEST_ASSERT((int)rconfig_cnt == ci, "Expected %d result resources, got %"PRIusz"\n", ci, rconfig_cnt); /* * Verify status per resource */ for (i = 0 ; i < (int)rconfig_cnt ; i++) { const rd_kafka_ConfigEntry_t **entries; size_t entry_cnt; err = rd_kafka_ConfigResource_error(rconfigs[i]); errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[i]); entries = rd_kafka_ConfigResource_configs(rconfigs[i], &entry_cnt); TEST_SAY("ConfigResource #%d: type %s (%d), \"%s\": " "%"PRIusz" ConfigEntries, error %s (%s)\n", i, rd_kafka_ResourceType_name( rd_kafka_ConfigResource_type(rconfigs[i])), rd_kafka_ConfigResource_type(rconfigs[i]), rd_kafka_ConfigResource_name(rconfigs[i]), entry_cnt, rd_kafka_err2name(err), errstr2 ? errstr2 : ""); test_print_ConfigEntry_array(entries, entry_cnt, 1); if (rd_kafka_ConfigResource_type(rconfigs[i]) != rd_kafka_ConfigResource_type(configs[i]) || strcmp(rd_kafka_ConfigResource_name(rconfigs[i]), rd_kafka_ConfigResource_name(configs[i]))) { TEST_FAIL_LATER( "ConfigResource #%d: " "expected type %s name %s, " "got type %s name %s", i, rd_kafka_ResourceType_name(rd_kafka_ConfigResource_type(configs[i])), rd_kafka_ConfigResource_name(configs[i]), rd_kafka_ResourceType_name(rd_kafka_ConfigResource_type(rconfigs[i])), rd_kafka_ConfigResource_name(rconfigs[i])); fails++; continue; } if (err != exp_err[i]) { TEST_FAIL_LATER("ConfigResource #%d: " "expected %s (%d), got %s (%s)", i, rd_kafka_err2name(exp_err[i]), exp_err[i], rd_kafka_err2name(err), errstr2 ? errstr2 : ""); fails++; } } TEST_ASSERT(!fails, "See %d previous failure(s)", fails); rd_kafka_event_destroy(rkev); rd_kafka_ConfigResource_destroy_array(configs, ci); #undef MY_CONFRES_CNT }
/** * @brief Test AlterConfigs */ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { #define MY_CONFRES_CNT 3 char *topics[MY_CONFRES_CNT]; rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT]; rd_kafka_AdminOptions_t *options; rd_kafka_resp_err_t exp_err[MY_CONFRES_CNT]; rd_kafka_event_t *rkev; rd_kafka_resp_err_t err; const rd_kafka_AlterConfigs_result_t *res; const rd_kafka_ConfigResource_t **rconfigs; size_t rconfig_cnt; char errstr[128]; const char *errstr2; int ci = 0; int i; int fails = 0; /* * Only create one topic, the others will be non-existent. */ for (i = 0 ; i < MY_CONFRES_CNT ; i++) rd_strdupa(&topics[i], test_mk_topic_name(__FUNCTION__, 1)); test_CreateTopics_simple(rk, NULL, topics, 1, 1, NULL); /* * ConfigResource #0: valid topic config */ configs[ci] = rd_kafka_ConfigResource_new( RD_KAFKA_RESOURCE_TOPIC, topics[ci]); err = rd_kafka_ConfigResource_set_config(configs[ci], "compression.type", "gzip"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_ConfigResource_set_config(configs[ci], "flush.ms", "12345678"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; if (test_broker_version >= TEST_BRKVER(1, 1, 0, 0)) { /* * ConfigResource #1: valid broker config */ configs[ci] = rd_kafka_ConfigResource_new( RD_KAFKA_RESOURCE_BROKER, tsprintf("%"PRId32, avail_brokers[0])); err = rd_kafka_ConfigResource_set_config( configs[ci], "sasl.kerberos.min.time.before.relogin", "58000"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; } else { TEST_WARN("Skipping RESOURCE_BROKER test on unsupported " "broker version\n"); } /* * ConfigResource #2: valid topic config, non-existent topic */ configs[ci] = rd_kafka_ConfigResource_new( RD_KAFKA_RESOURCE_TOPIC, topics[ci]); err = rd_kafka_ConfigResource_set_config(configs[ci], "compression.type", "lz4"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_ConfigResource_set_config(configs[ci], "offset.metadata.max.bytes", "12345"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN; ci++; /* * Timeout options */ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ALTERCONFIGS); err = rd_kafka_AdminOptions_set_request_timeout(options, 10000, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); /* * Fire off request */ rd_kafka_AlterConfigs(rk, configs, ci, options, rkqu); rd_kafka_AdminOptions_destroy(options); /* * Wait for result */ rkev = test_wait_admin_result(rkqu, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, 10000+1000); /* * Extract result */ res = rd_kafka_event_AlterConfigs_result(rkev); TEST_ASSERT(res, "Expected AlterConfigs result, not %s", rd_kafka_event_name(rkev)); err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(!err, "Expected success, not %s: %s", rd_kafka_err2name(err), errstr2); rconfigs = rd_kafka_AlterConfigs_result_resources(res, &rconfig_cnt); TEST_ASSERT((int)rconfig_cnt == ci, "Expected %d result resources, got %"PRIusz"\n", ci, rconfig_cnt); /* * Verify status per resource */ for (i = 0 ; i < (int)rconfig_cnt ; i++) { const rd_kafka_ConfigEntry_t **entries; size_t entry_cnt; err = rd_kafka_ConfigResource_error(rconfigs[i]); errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[i]); entries = rd_kafka_ConfigResource_configs(rconfigs[i], &entry_cnt); TEST_SAY("ConfigResource #%d: type %s (%d), \"%s\": " "%"PRIusz" ConfigEntries, error %s (%s)\n", i, rd_kafka_ResourceType_name( rd_kafka_ConfigResource_type(rconfigs[i])), rd_kafka_ConfigResource_type(rconfigs[i]), rd_kafka_ConfigResource_name(rconfigs[i]), entry_cnt, rd_kafka_err2name(err), errstr2 ? errstr2 : ""); test_print_ConfigEntry_array(entries, entry_cnt, 1); if (rd_kafka_ConfigResource_type(rconfigs[i]) != rd_kafka_ConfigResource_type(configs[i]) || strcmp(rd_kafka_ConfigResource_name(rconfigs[i]), rd_kafka_ConfigResource_name(configs[i]))) { TEST_FAIL_LATER( "ConfigResource #%d: " "expected type %s name %s, " "got type %s name %s", i, rd_kafka_ResourceType_name(rd_kafka_ConfigResource_type(configs[i])), rd_kafka_ConfigResource_name(configs[i]), rd_kafka_ResourceType_name(rd_kafka_ConfigResource_type(rconfigs[i])), rd_kafka_ConfigResource_name(rconfigs[i])); fails++; continue; } if (err != exp_err[i]) { TEST_FAIL_LATER("ConfigResource #%d: " "expected %s (%d), got %s (%s)", i, rd_kafka_err2name(exp_err[i]), exp_err[i], rd_kafka_err2name(err), errstr2 ? errstr2 : ""); fails++; } } TEST_ASSERT(!fails, "See %d previous failure(s)", fails); rd_kafka_event_destroy(rkev); rd_kafka_ConfigResource_destroy_array(configs, ci); #undef MY_CONFRES_CNT }