/** * Serves the entire OpenSSL error queue and logs each error. * The last error is not logged but returned in 'errstr'. * * If 'rkb' is non-NULL broker-specific logging will be used, * else it will fall back on global 'rk' debugging. */ static char *rd_kafka_ssl_error (rd_kafka_t *rk, rd_kafka_broker_t *rkb, char *errstr, size_t errstr_size) { unsigned long l; const char *file, *data; int line, flags; int cnt = 0; while ((l = ERR_get_error_line_data(&file, &line, &data, &flags)) != 0) { char buf[256]; if (cnt++ > 0) { /* Log last message */ if (rkb) rd_rkb_log(rkb, LOG_ERR, "SSL", "%s", errstr); else rd_kafka_log(rk, LOG_ERR, "SSL", "%s", errstr); } ERR_error_string_n(l, buf, sizeof(buf)); rd_snprintf(errstr, errstr_size, "%s:%d: %s: %s", file, line, buf, (flags & ERR_TXT_STRING) ? data : ""); } if (cnt == 0) rd_snprintf(errstr, errstr_size, "No error"); return errstr; }
static RD_UNUSED int rd_kafka_sasl_cb_canon (sasl_conn_t *conn, void *context, const char *in, unsigned inlen, unsigned flags, const char *user_realm, char *out, unsigned out_max, unsigned *out_len) { rd_kafka_transport_t *rktrans = context; if (strstr(rktrans->rktrans_rkb->rkb_rk->rk_conf. sasl.mechanisms, "GSSAPI")) { *out_len = rd_snprintf(out, out_max, "%s", rktrans->rktrans_rkb->rkb_rk-> rk_conf.sasl.principal); } else if (!strcmp(rktrans->rktrans_rkb->rkb_rk->rk_conf. sasl.mechanisms, "PLAIN")) { *out_len = rd_snprintf(out, out_max, "%.*s", inlen, in); } else out = NULL; rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", "CB_CANON: flags 0x%x, \"%.*s\" @ \"%s\": returning \"%.*s\"", flags, (int)inlen, in, user_realm, (int)(*out_len), out); return out ? SASL_OK : SASL_FAIL; }
void rd_hexdump (FILE *fp, const char *name, const void *ptr, size_t len) { const char *p = (const char *)ptr; size_t of = 0; if (name) fprintf(fp, "%s hexdump (%"PRIusz" bytes):\n", name, len); for (of = 0 ; of < len ; of += 16) { char hexen[16*3+1]; char charen[16+1]; int hof = 0; int cof = 0; unsigned int i; for (i = of ; i < of + 16 && i < len ; i++) { hof += rd_snprintf(hexen+hof, sizeof(hexen)-hof, "%02x ", p[i] & 0xff); cof += rd_snprintf(charen+cof, sizeof(charen)-cof, "%c", isprint((int)p[i]) ? p[i] : '.'); } fprintf(fp, "%08zx: %-48s %-16s\n", of, hexen, charen); } }
const char *test_mk_topic_name (const char *suffix, int randomized) { static RD_TLS char ret[128]; if (test_topic_random || randomized) rd_snprintf(ret, sizeof(ret), "%s_%"PRIx64"_%s", test_topic_prefix, test_id_generate(), suffix); else rd_snprintf(ret, sizeof(ret), "%s_%s", test_topic_prefix, suffix); TEST_SAY("Using topic \"%s\"\n", ret); return ret; }
static ssize_t rd_kafka_transport_socket_sendmsg (rd_kafka_transport_t *rktrans, const struct msghdr *msg, char *errstr, size_t errstr_size) { #ifndef _MSC_VER ssize_t r; #ifdef sun /* See recvmsg() comment. Setting it here to be safe. */ socket_errno = EAGAIN; #endif r = sendmsg(rktrans->rktrans_s, msg, MSG_DONTWAIT #ifdef MSG_NOSIGNAL | MSG_NOSIGNAL #endif ); if (r == -1) { if (socket_errno == EAGAIN) return 0; rd_snprintf(errstr, errstr_size, "%s", rd_strerror(errno)); } return r; #else int i; ssize_t sum = 0; for (i = 0; i < msg->msg_iovlen; i++) { ssize_t r; r = send(rktrans->rktrans_s, msg->msg_iov[i].iov_base, (int) msg->msg_iov[i].iov_len, 0); if (r == SOCKET_ERROR) { if (sum > 0 || WSAGetLastError() == WSAEWOULDBLOCK) return sum; else { rd_snprintf(errstr, errstr_size, "%s", socket_strerror(WSAGetLastError())); return -1; } } sum += r; if ((size_t)r < msg->msg_iov[i].iov_len) break; } return sum; #endif }
/** * Format a message token */ void test_msg_fmt (char *dest, size_t dest_size, uint64_t testid, int32_t partition, int msgid) { rd_snprintf(dest, dest_size, "testid=%"PRIu64", partition=%"PRId32", msg=%i", testid, partition, msgid); }
int rd_kafka_sasl_conf_validate (rd_kafka_t *rk, char *errstr, size_t errstr_size) { if (strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) return 0; if (rk->rk_conf.sasl.kinit_cmd) { rd_kafka_broker_t rkb; char *cmd; char tmperr[128]; memset(&rkb, 0, sizeof(rkb)); strcpy(rkb.rkb_nodename, "ATestBroker:9092"); rkb.rkb_rk = rk; mtx_init(&rkb.rkb_lock, mtx_plain); cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, tmperr, sizeof(tmperr), render_callback, &rkb); mtx_destroy(&rkb.rkb_lock); if (!cmd) { rd_snprintf(errstr, errstr_size, "Invalid sasl.kerberos.kinit.cmd value: %s", tmperr); return -1; } rd_free(cmd); } return 0; }
static void verify_consumed_msg0 (const char *func, int line, uint64_t testid, int32_t partition, int msgnum, rd_kafka_message_t *rkmessage) { uint64_t in_testid; int in_part; int in_msgnum; char buf[128]; if (rkmessage->len != 0) TEST_FAIL("Incoming message not NULL: %i bytes", (int)rkmessage->len); if (rkmessage->key_len +1 >= sizeof(buf)) TEST_FAIL("Incoming message key too large (%i): " "not sourced by this test", (int)rkmessage->key_len); rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->key_len, (char *)rkmessage->key); if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i", &in_testid, &in_part, &in_msgnum) != 3) TEST_FAIL("Incorrect key format: %s", buf); if (testid != in_testid || (partition != -1 && partition != in_part) || (msgnum != -1 && msgnum != in_msgnum) || (in_msgnum < 0 || in_msgnum > cons_msgs_size)) goto fail_match; if (test_level > 2) { TEST_SAY("%s:%i: Our testid %"PRIu64", part %i (%i), " "msg %i/%i did " ", key's: \"%s\"\n", func, line, testid, (int)partition, (int)rkmessage->partition, msgnum, cons_msgs_size, buf); } if (cons_msgs_cnt == cons_msgs_size) { TEST_SAY("Too many messages in cons_msgs (%i) while reading " "message key \"%s\"\n", cons_msgs_cnt, buf); verify_consumed_msg_check(); TEST_FAIL("See above error(s)"); } cons_msgs[cons_msgs_cnt++] = in_msgnum; return; fail_match: TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i/%i did " "not match message's key: \"%s\"\n", func, line, testid, (int)partition, msgnum, cons_msgs_size, buf); }
static ssize_t rd_kafka_transport_socket_recvmsg (rd_kafka_transport_t *rktrans, struct msghdr *msg, char *errstr, size_t errstr_size) { #ifndef _MSC_VER ssize_t r; #ifdef sun /* SunOS doesn't seem to set errno when recvmsg() fails * due to no data and MSG_DONTWAIT is set. */ socket_errno = EAGAIN; #endif r = recvmsg(rktrans->rktrans_s, msg, MSG_DONTWAIT); if (r == -1 && socket_errno == EAGAIN) return 0; else if (r == 0) { /* Receive 0 after POLLIN event means connection closed. */ rd_snprintf(errstr, errstr_size, "Disconnected"); return -1; } else if (r == -1) rd_snprintf(errstr, errstr_size, "%s", rd_strerror(errno)); return r; #else ssize_t sum = 0; int i; for (i = 0; i < msg->msg_iovlen; i++) { ssize_t r; r = recv(rktrans->rktrans_s, msg->msg_iov[i].iov_base, (int) msg->msg_iov[i].iov_len, 0); if (r == SOCKET_ERROR) { if (WSAGetLastError() == WSAEWOULDBLOCK) break; rd_snprintf(errstr, errstr_size, "%s", socket_strerror(WSAGetLastError())); return -1; } sum += r; if ((size_t)r < msg->msg_iov[i].iov_len) break; } return sum; #endif }
static int run_test0 (struct run_args *run_args) { struct test *test = run_args->test; test_timing_t t_run; int r; char stats_file[256]; rd_snprintf(stats_file, sizeof(stats_file), "stats_%s_%"PRIu64".json", test->name, test_id_generate()); if (!(test->stats_fp = fopen(stats_file, "w+"))) TEST_SAY("=== Failed to create stats file %s: %s ===\n", stats_file, strerror(errno)); test_curr = test; TEST_SAY("================= Running test %s =================\n", test->name); if (test->stats_fp) TEST_SAY("==== Stats written to file %s ====\n", stats_file); TIMING_START(&t_run, test->name); test->start = t_run.ts_start; r = test->mainfunc(run_args->argc, run_args->argv); TIMING_STOP(&t_run); TEST_LOCK(); test->duration = TIMING_DURATION(&t_run); if (r) { test->state = TEST_FAILED; TEST_SAY("\033[31m" "================= Test %s FAILED =================" "\033[0m\n", run_args->test->name); } else { test->state = TEST_PASSED; TEST_SAY("\033[32m" "================= Test %s PASSED =================" "\033[0m\n", run_args->test->name); } TEST_UNLOCK(); if (test->stats_fp) { long pos = ftell(test->stats_fp); fclose(test->stats_fp); test->stats_fp = NULL; /* Delete file if nothing was written */ if (pos == 0) { #ifndef _MSC_VER unlink(stats_file); #else _unlink(stats_file); #endif } } return r; }
/** * Allocate a new queue and initialize it. */ rd_kafka_q_t *rd_kafka_q_new0 (rd_kafka_t *rk, const char *func, int line) { rd_kafka_q_t *rkq = rd_malloc(sizeof(*rkq)); rd_kafka_q_init(rkq, rk); rkq->rkq_flags |= RD_KAFKA_Q_F_ALLOCATED; #if ENABLE_DEVEL rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line); #else rkq->rkq_name = func; #endif return rkq; }
void test_produce_msgs (rd_kafka_t *rk, rd_kafka_topic_t *rkt, uint64_t testid, int32_t partition, int msg_base, int cnt, const char *payload, size_t size) { int msg_id; test_timing_t t_all; int remains = 0; TEST_SAY("Produce to %s [%"PRId32"]: messages #%d..%d\n", rd_kafka_topic_name(rkt), partition, msg_base, msg_base+cnt); TIMING_START(&t_all, "PRODUCE"); for (msg_id = msg_base ; msg_id < msg_base + cnt ; msg_id++) { char key[128]; char buf[128]; const char *use_payload; size_t use_size; if (payload) { use_payload = payload; use_size = size; } else { test_msg_fmt(key, sizeof(key), testid, partition, msg_id); rd_snprintf(buf, sizeof(buf), "data: %s", key); use_payload = buf; use_size = strlen(buf); } remains++; if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, (void *)use_payload, use_size, key, strlen(key), &remains) == -1) TEST_FAIL("Failed to produce message %i " "to partition %i: %s", msg_id, (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); } /* Wait for messages to be delivered */ while (remains > 0 && rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 10); TIMING_STOP(&t_all); }
int main_0001_multiobj (int argc, char **argv) { int partition = RD_KAFKA_PARTITION_UA; /* random */ int i; const int NUM_ITER = 10; const char *topic = NULL; TEST_SAY("Creating and destroying %i kafka instances\n", NUM_ITER); /* Create, use and destroy NUM_ITER kafka instances. */ for (i = 0 ; i < NUM_ITER ; i++) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char msg[128]; test_timing_t t_destroy; test_conf_init(&conf, &topic_conf, 30); if (!topic) topic = test_mk_topic_name("0001", 0); rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic for " "rdkafka instance #%i: %s\n", i, rd_kafka_err2str(rd_kafka_errno2err(errno))); rd_snprintf(msg, sizeof(msg), "%s test message for iteration #%i", argv[0], i); /* Produce a message */ rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, strlen(msg), NULL, 0, NULL); /* Wait for it to be sent (and possibly acked) */ rd_kafka_flush(rk, -1); /* Destroy topic */ rd_kafka_topic_destroy(rkt); /* Destroy rdkafka instance */ TIMING_START(&t_destroy, "rd_kafka_destroy()"); rd_kafka_destroy(rk); TIMING_STOP(&t_destroy); } return 0; }
/** * Set transport IO event polling based on SSL error. * * Returns -1 on permanent errors. * * Locality: broker thread */ static RD_INLINE int rd_kafka_transport_ssl_io_update (rd_kafka_transport_t *rktrans, int ret, char *errstr, size_t errstr_size) { int serr = SSL_get_error(rktrans->rktrans_ssl, ret); int serr2; switch (serr) { case SSL_ERROR_WANT_READ: rd_kafka_transport_poll_set(rktrans, POLLIN); break; case SSL_ERROR_WANT_WRITE: case SSL_ERROR_WANT_CONNECT: rd_kafka_transport_poll_set(rktrans, POLLOUT); break; case SSL_ERROR_SYSCALL: if (!(serr2 = SSL_get_error(rktrans->rktrans_ssl, ret))) { if (ret == 0) errno = ECONNRESET; rd_snprintf(errstr, errstr_size, "SSL syscall error: %s", rd_strerror(errno)); } else rd_snprintf(errstr, errstr_size, "SSL syscall error number: %d: %s", serr2, rd_strerror(errno)); return -1; default: rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr, errstr_size); return -1; } return 0; }
/** * Convert an absolute or logical offset to string. */ const char *rd_kafka_offset2str (int64_t offset) { static RD_TLS char ret[16][32]; static RD_TLS int i = 0; i = (i + 1) % 16; if (offset >= 0) rd_snprintf(ret[i], sizeof(ret[i]), "%"PRId64, offset); else if (offset == RD_KAFKA_OFFSET_BEGINNING) return "BEGINNING"; else if (offset == RD_KAFKA_OFFSET_END) return "END"; else if (offset == RD_KAFKA_OFFSET_STORED) return "STORED"; else if (offset == RD_KAFKA_OFFSET_INVALID) return "INVALID"; else if (offset <= RD_KAFKA_OFFSET_TAIL_BASE) rd_snprintf(ret[i], sizeof(ret[i]), "TAIL(%lld)", llabs(offset - RD_KAFKA_OFFSET_TAIL_BASE)); else rd_snprintf(ret[i], sizeof(ret[i]), "%"PRId64"?", offset); return ret[i]; }
/** * Produces \p cnt messages and returns immediately. * Does not wait for delivery. * \p msgcounterp is incremented for each produced messages and passed * as \p msg_opaque which is later used in test_dr_cb to decrement * the counter on delivery. */ void test_produce_msgs_nowait (rd_kafka_t *rk, rd_kafka_topic_t *rkt, uint64_t testid, int32_t partition, int msg_base, int cnt, const char *payload, size_t size, int *msgcounterp) { int msg_id; test_timing_t t_all; TEST_SAY("Produce to %s [%"PRId32"]: messages #%d..%d\n", rd_kafka_topic_name(rkt), partition, msg_base, msg_base+cnt); TIMING_START(&t_all, "PRODUCE"); for (msg_id = msg_base ; msg_id < msg_base + cnt ; msg_id++) { char key[128]; char buf[128]; const char *use_payload; size_t use_size; if (payload) { use_payload = payload; use_size = size; } else { test_msg_fmt(key, sizeof(key), testid, partition, msg_id); rd_snprintf(buf, sizeof(buf), "%s: data", key); use_payload = buf; use_size = strlen(buf); } if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, (void *)use_payload, use_size, key, strlen(key), msgcounterp) == -1) TEST_FAIL("Failed to produce message %i " "to partition %i: %s", msg_id, (int)partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); (*msgcounterp)++; } TIMING_STOP(&t_all); }
rd_kafka_t *test_create_consumer (const char *group_id, void (*rebalance_cb) ( rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque), rd_kafka_topic_conf_t *default_topic_conf, void *opaque) { rd_kafka_t *rk; rd_kafka_conf_t *conf; char errstr[512]; char tmp[64]; test_conf_init(&conf, NULL, 20); if (group_id) { if (rd_kafka_conf_set(conf, "group.id", group_id, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("Conf failed: %s\n", errstr); } rd_snprintf(tmp, sizeof(tmp), "%d", test_session_timeout_ms); if (rd_kafka_conf_set(conf, "session.timeout.ms", tmp, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("Conf failed: %s\n", errstr); rd_kafka_conf_set_opaque(conf, opaque); if (rebalance_cb) rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); if (default_topic_conf) rd_kafka_conf_set_default_topic_conf(conf, default_topic_conf); /* Create kafka instance */ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); if (!rk) TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); return rk; }
/** * @brief Build client-final-message-without-proof * @remark out->ptr will be allocated and must be freed. */ static void rd_kafka_sasl_scram_build_client_final_message_wo_proof ( struct rd_kafka_sasl_scram_state *state, const char *snonce, rd_chariov_t *out) { const char *attr_c = "biws"; /* base64 encode of "n,," */ /* * client-final-message-without-proof = * channel-binding "," nonce ["," * extensions] */ out->size = strlen("c=,r=") + strlen(attr_c) + state->cnonce.size + strlen(snonce); out->ptr = rd_malloc(out->size+1); rd_snprintf(out->ptr, out->size+1, "c=%s,r=%.*s%s", attr_c, (int)state->cnonce.size, state->cnonce.ptr, snonce); }
/** * @brief Attempt to load library \p path. * @returns the library handle (platform dependent, thus opaque) on success, * else NULL. */ rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) { void *handle; const char *loadfunc; #if WITH_LIBDL loadfunc = "dlopen()"; handle = dlopen(path, RTLD_NOW | RTLD_LOCAL); #elif defined(_MSC_VER) loadfunc = "LoadLibrary()"; handle = (void *)LoadLibraryA(path); #endif if (!handle) { char *dlerrstr = rd_dl_error(); rd_snprintf(errstr, errstr_size, "%s failed: %s", loadfunc, dlerrstr); rd_free(dlerrstr); } return (rd_dl_hnd_t *)handle; }
/** * @brief look up address of \p symbol in library handle \p handle * @returns the function pointer on success or NULL on error. */ void * rd_dl_sym (rd_dl_hnd_t *handle, const char *symbol, char *errstr, size_t errstr_size) { void *func; #if WITH_LIBDL func = dlsym((void *)handle, symbol); #elif defined(_MSC_VER) func = GetProcAddress((HMODULE)handle, symbol); #endif if (!func) { char *dlerrstr = rd_dl_error(); rd_snprintf(errstr, errstr_size, "Failed to load symbol \"%s\": %s", symbol, dlerrstr); rd_free(dlerrstr); } return func; }
/** * Creates and sets up kafka configuration objects. * Will read "test.conf" file if it exists. */ void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, int timeout) { char buf[512]; const char *test_conf = #ifndef _MSC_VER getenv("RDKAFKA_TEST_CONF") ? getenv("RDKAFKA_TEST_CONF") : #endif "test.conf"; if (conf) { #ifndef _MSC_VER char *tmp; #endif *conf = rd_kafka_conf_new(); rd_kafka_conf_set_error_cb(*conf, test_error_cb); rd_kafka_conf_set_stats_cb(*conf, test_stats_cb); #ifndef _MSC_VER if ((tmp = getenv("TEST_DEBUG")) && *tmp) test_conf_set(*conf, "debug", tmp); #endif #ifdef SIGIO /* Quick termination */ rd_snprintf(buf, sizeof(buf), "%i", SIGIO); rd_kafka_conf_set(*conf, "internal.termination.signal", buf, NULL, 0); signal(SIGIO, SIG_IGN); #endif } if (topic_conf) *topic_conf = rd_kafka_topic_conf_new(); /* Open and read optional local test configuration file, if any. */ test_read_conf_file(test_conf, conf ? *conf : NULL, topic_conf ? *topic_conf : NULL, &timeout); if (timeout) test_timeout_set(timeout); }
int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout) { #ifndef _MSC_VER int r; r = poll(&rktrans->rktrans_pfd, 1, tmout); if (r <= 0) return r; return rktrans->rktrans_pfd.revents; #else int r; r = WSAPoll(&rktrans->rktrans_pfd, 1, tmout); if (r == 0) { /* Workaround for broken WSAPoll() while connecting: * failed connection attempts are not indicated at all by WSAPoll() * so we need to check the socket error when Poll returns 0. * Issue #525 */ r = ECONNRESET; if (unlikely(rktrans->rktrans_rkb->rkb_state == RD_KAFKA_BROKER_STATE_CONNECT && (rd_kafka_transport_get_socket_error(rktrans, &r) == -1 || r != 0))) { char errstr[512]; errno = r; rd_snprintf(errstr, sizeof(errstr), "Connect to %s failed: %s", rd_sockaddr2str(rktrans->rktrans_rkb-> rkb_addr_last, RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_FAMILY), socket_strerror(r)); rd_kafka_transport_connect_done(rktrans, errstr); return -1; } else return 0; } else if (r == SOCKET_ERROR) return -1; return rktrans->rktrans_pfd.revents; #endif }
/** * @brief Parses inbuf for SCRAM attribute \p attr (e.g., 's') * @returns a newly allocated copy of the value, or NULL * on failure in which case an error is written to \p errstr * prefixed by \p description. */ static char *rd_kafka_sasl_scram_get_attr (const rd_chariov_t *inbuf, char attr, const char *description, char *errstr, size_t errstr_size) { size_t of = 0; for (of = 0 ; of < inbuf->size ; ) { const char *td; size_t len; /* Find next delimiter , (if any) */ td = memchr(&inbuf->ptr[of], ',', inbuf->size - of); if (td) len = (size_t)(td - &inbuf->ptr[of]); else len = inbuf->size - of; /* Check if attr "x=" matches */ if (inbuf->ptr[of] == attr && inbuf->size > of+1 && inbuf->ptr[of+1] == '=') { char *ret; of += 2; /* past = */ ret = rd_malloc(len - 2 + 1); memcpy(ret, &inbuf->ptr[of], len - 2); ret[len-2] = '\0'; return ret; } /* Not the attr we are looking for, skip * past the next delimiter and continue looking. */ of += len+1; } rd_snprintf(errstr, errstr_size, "%s: could not find attribute (%c)", description, attr); return NULL; }
void test_verify_rkmessage0 (const char *func, int line, rd_kafka_message_t *rkmessage, uint64_t testid, int32_t partition, int msgnum) { uint64_t in_testid; int in_part; int in_msgnum; char buf[128]; rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->len, (char *)rkmessage->payload); if (sscanf(buf, "testid=%"SCNd64", partition=%i, msg=%i", &in_testid, &in_part, &in_msgnum) != 3) TEST_FAIL("Incorrect format: %s", buf); if (testid != in_testid || (partition != -1 && partition != in_part) || (msgnum != -1 && msgnum != in_msgnum) || in_msgnum < 0) goto fail_match; if (test_level > 2) { TEST_SAY("%s:%i: Our testid %"PRIu64", part %i (%i), msg %i\n", func, line, testid, (int)partition, (int)rkmessage->partition, msgnum); } return; fail_match: TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i did " "not match message: \"%s\"\n", func, line, testid, (int)partition, msgnum, buf); }
static void do_test_produce (rd_kafka_t *rk, const char *topic, int32_t partition, int msgid, int exp_fail, int exp_ic_cnt) { rd_kafka_resp_err_t err; char key[16]; struct msg_state *msg = &msgs[msgid]; int i; /* Message state should be empty, no interceptors should have * been called yet.. */ for (i = 0 ; i < _ON_CNT ; i++) TEST_ASSERT(msg->bits[i] == 0); msg->id = msgid; rd_snprintf(key, sizeof(key), "%d", msgid); err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(partition), RD_KAFKA_V_KEY(key, strlen(key)+1), RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_OPAQUE(msg), RD_KAFKA_V_END); msg_verify_ic_cnt(msg, "on_send", msg->bits[_ON_SEND], exp_ic_cnt); if (err) { msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], exp_ic_cnt); TEST_ASSERT(exp_fail, "producev() failed: %s", rd_kafka_err2str(err)); } else { msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], 0); TEST_ASSERT(!exp_fail, "expected produce failure for msg #%d, not %s", msgid, rd_kafka_err2str(err)); } }
/** * Parse a message token */ void test_msg_parse0 (const char *func, int line, uint64_t testid, const void *ptr, size_t size, int32_t exp_partition, int *msgidp) { char buf[128]; uint64_t in_testid; int in_part; if (!ptr) TEST_FAIL("%s:%i: Message has empty key\n", func, line); rd_snprintf(buf, sizeof(buf), "%.*s", (int)size, (char *)ptr); if (sscanf(buf, "testid=%"SCNd64", partition=%i, msg=%i", &in_testid, &in_part, msgidp) != 3) TEST_FAIL("%s:%i: Incorrect key format: %s", func, line, buf); if (testid != in_testid || (exp_partition != -1 && exp_partition != in_part)) TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i did " "not match message: \"%s\"\n", func, line, testid, (int)exp_partition, buf); }
/** * Prepare a toppar for using an offset file. * * Locality: rdkafka main thread * Locks: toppar_lock(rktp) must be held */ static void rd_kafka_offset_file_init (rd_kafka_toppar_t *rktp) { char spath[4096]; const char *path = rktp->rktp_rkt->rkt_conf.offset_store_path; int64_t offset = RD_KAFKA_OFFSET_INVALID; if (rd_kafka_path_is_dir(path)) { char tmpfile[1024]; char escfile[4096]; /* Include group.id in filename if configured. */ if (!RD_KAFKAP_STR_IS_NULL(rktp->rktp_rkt->rkt_rk-> rk_conf.group_id)) rd_snprintf(tmpfile, sizeof(tmpfile), "%s-%"PRId32"-%.*s.offset", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_rk-> rk_conf.group_id)); else rd_snprintf(tmpfile, sizeof(tmpfile), "%s-%"PRId32".offset", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); /* Escape filename to make it safe. */ mk_esc_filename(tmpfile, escfile, sizeof(escfile)); rd_snprintf(spath, sizeof(spath), "%s%s%s", path, path[strlen(path)-1] == '/' ? "" : "/", escfile); path = spath; } rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: using offset file %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, path); rktp->rktp_offset_path = rd_strdup(path); /* Set up the offset file sync interval. */ if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers, &rktp->rktp_offset_sync_tmr, rktp->rktp_rkt->rkt_conf. offset_store_sync_interval_ms * 1000ll, rd_kafka_offset_sync_tmr_cb, rktp); if (rd_kafka_offset_file_open(rktp) != -1) { /* Read offset from offset file. */ offset = rd_kafka_offset_file_read(rktp); } if (offset != RD_KAFKA_OFFSET_INVALID) { /* Start fetching from offset */ rktp->rktp_stored_offset = offset; rktp->rktp_committed_offset = offset; rd_kafka_toppar_next_offset_handle(rktp, offset); } else { /* Offset was not usable: perform offset reset logic */ rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID; rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_INVALID, RD_KAFKA_RESP_ERR__FS, "non-readable offset file"); } }
/** * Write offset to offset file. * * Locality: toppar's broker thread */ static rd_kafka_resp_err_t rd_kafka_offset_file_commit (rd_kafka_toppar_t *rktp) { rd_kafka_itopic_t *rkt = rktp->rktp_rkt; int attempt; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; int64_t offset = rktp->rktp_stored_offset; for (attempt = 0 ; attempt < 2 ; attempt++) { char buf[22]; int len; if (!rktp->rktp_offset_fp) if (rd_kafka_offset_file_open(rktp) == -1) continue; if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) { rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, "%s [%"PRId32"]: " "Seek failed on offset file %s: %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_offset_path, rd_strerror(errno)); err = RD_KAFKA_RESP_ERR__FS; rd_kafka_offset_file_close(rktp); continue; } len = rd_snprintf(buf, sizeof(buf), "%"PRId64"\n", offset); if (fwrite(buf, 1, len, rktp->rktp_offset_fp) < 1) { rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, "%s [%"PRId32"]: " "Failed to write offset %"PRId64" to " "offset file %s: %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, offset, rktp->rktp_offset_path, rd_strerror(errno)); err = RD_KAFKA_RESP_ERR__FS; rd_kafka_offset_file_close(rktp); continue; } /* Need to flush before truncate to preserve write ordering */ (void)fflush(rktp->rktp_offset_fp); /* Truncate file */ #ifdef _MSC_VER if (_chsize_s(_fileno(rktp->rktp_offset_fp), len) == -1) ; /* Ignore truncate failures */ #else if (ftruncate(fileno(rktp->rktp_offset_fp), len) == -1) ; /* Ignore truncate failures */ #endif rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: wrote offset %"PRId64" to " "file %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, offset, rktp->rktp_offset_path); rktp->rktp_committed_offset = offset; /* If sync interval is set to immediate we sync right away. */ if (rkt->rkt_conf.offset_store_sync_interval_ms == 0) rd_kafka_offset_file_sync(rktp); return RD_KAFKA_RESP_ERR_NO_ERROR; } return err; }
/** * Initialize and start SASL authentication. * * Returns 0 on successful init and -1 on error. * * Locality: broker thread */ int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, char *errstr, int errstr_size) { int r; rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; rd_kafka_t *rk = rkb->rkb_rk; char *hostname, *t; sasl_callback_t callbacks[16] = { // { SASL_CB_GETOPT, (void *)rd_kafka_sasl_cb_getopt, rktrans }, { SASL_CB_LOG, (void *)rd_kafka_sasl_cb_log, rktrans }, { SASL_CB_AUTHNAME, (void *)rd_kafka_sasl_cb_getsimple, rktrans }, { SASL_CB_PASS, (void *)rd_kafka_sasl_cb_getsecret, rktrans }, { SASL_CB_ECHOPROMPT, (void *)rd_kafka_sasl_cb_chalprompt, rktrans }, { SASL_CB_GETREALM, (void *)rd_kafka_sasl_cb_getrealm, rktrans }, { SASL_CB_CANON_USER, (void *)rd_kafka_sasl_cb_canon, rktrans }, { SASL_CB_LIST_END } }; /* SASL_CB_USER is needed for PLAIN but breaks GSSAPI */ if (!strcmp(rk->rk_conf.sasl.service_name, "PLAIN")) { int endidx; /* Find end of callbacks array */ for (endidx = 0 ; callbacks[endidx].id != SASL_CB_LIST_END ; endidx++) ; callbacks[endidx].id = SASL_CB_USER; callbacks[endidx].proc = (void *)rd_kafka_sasl_cb_getsimple; endidx++; callbacks[endidx].id = SASL_CB_LIST_END; } rd_strdupa(&hostname, rktrans->rktrans_rkb->rkb_nodename); if ((t = strchr(hostname, ':'))) *t = '\0'; /* remove ":port" */ rd_rkb_dbg(rkb, SECURITY, "SASL", "Initializing SASL client: service name %s, " "hostname %s, mechanisms %s", rk->rk_conf.sasl.service_name, hostname, rk->rk_conf.sasl.mechanisms); /* Acquire or refresh ticket if kinit is configured */ rd_kafka_sasl_kinit_refresh(rkb); r = sasl_client_new(rk->rk_conf.sasl.service_name, hostname, NULL, NULL, /* no local & remote IP checks */ callbacks, 0, &rktrans->rktrans_sasl.conn); if (r != SASL_OK) { rd_snprintf(errstr, errstr_size, "%s", sasl_errstring(r, NULL, NULL)); return -1; } if (rk->rk_conf.debug & RD_KAFKA_DBG_SECURITY) { const char *avail_mechs; sasl_listmech(rktrans->rktrans_sasl.conn, NULL, NULL, " ", NULL, &avail_mechs, NULL, NULL); rd_rkb_dbg(rkb, SECURITY, "SASL", "My supported SASL mechanisms: %s", avail_mechs); } rd_kafka_transport_poll_set(rktrans, POLLIN); do { const char *out; unsigned int outlen; const char *mech = NULL; r = sasl_client_start(rktrans->rktrans_sasl.conn, rk->rk_conf.sasl.mechanisms, NULL, &out, &outlen, &mech); if (r >= 0) if (rd_kafka_sasl_send(rktrans, out, outlen, errstr, errstr_size)) return -1; } while (r == SASL_INTERACT); if (r == SASL_OK) { /* PLAIN is appearantly done here, but we still need to make sure * the PLAIN frame is sent and we get a response back (but we must * not pass the response to libsasl or it will fail). */ rktrans->rktrans_sasl.complete = 1; return 0; } else if (r != SASL_CONTINUE) { rd_snprintf(errstr, errstr_size, "SASL handshake failed (start (%d)): %s", r, sasl_errdetail(rktrans->rktrans_sasl.conn)); return -1; } return 0; }
/** * Handle received frame from broker. */ static int rd_kafka_sasl_handle_recv (rd_kafka_transport_t *rktrans, rd_kafka_buf_t *rkbuf, char *errstr, int errstr_size) { int r; rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", "Received SASL frame from broker (%"PRIdsz" bytes)", rkbuf ? rkbuf->rkbuf_len : 0); if (rktrans->rktrans_sasl.complete && (!rkbuf || rkbuf->rkbuf_len == 0)) goto auth_successful; do { sasl_interact_t *interact = NULL; const char *out; unsigned int outlen; r = sasl_client_step(rktrans->rktrans_sasl.conn, rkbuf && rkbuf->rkbuf_len > 0 ? rkbuf->rkbuf_rbuf : NULL, rkbuf ? rkbuf->rkbuf_len : 0, &interact, &out, &outlen); if (rkbuf) { rd_kafka_buf_destroy(rkbuf); rkbuf = NULL; } if (r >= 0) { /* Note: outlen may be 0 here for an empty response */ if (rd_kafka_sasl_send(rktrans, out, outlen, errstr, errstr_size) == -1) return -1; } if (r == SASL_INTERACT) rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", "SASL_INTERACT: %lu %s, %s, %s, %p", interact->id, interact->challenge, interact->prompt, interact->defresult, interact->result); } while (r == SASL_INTERACT); if (r == SASL_CONTINUE) return 0; /* Wait for more data from broker */ else if (r != SASL_OK) { rd_snprintf(errstr, errstr_size, "SASL handshake failed (step): %s", sasl_errdetail(rktrans->rktrans_sasl.conn)); return -1; } /* Authentication successful */ auth_successful: if (rktrans->rktrans_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_SECURITY) { const char *user, *mech, *authsrc; if (sasl_getprop(rktrans->rktrans_sasl.conn, SASL_USERNAME, (const void **)&user) != SASL_OK) user = "******"; if (sasl_getprop(rktrans->rktrans_sasl.conn, SASL_MECHNAME, (const void **)&mech) != SASL_OK) mech = "(unknown)"; if (sasl_getprop(rktrans->rktrans_sasl.conn, SASL_AUTHSOURCE, (const void **)&authsrc) != SASL_OK) authsrc = "(unknown)"; rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", "Authenticated as %s using %s (%s)", user, mech, authsrc); } rd_kafka_broker_connect_up(rktrans->rktrans_rkb); return 0; }