Beispiel #1
0
void re_regfree(Reprog *prog)
{
	if (prog) {
		rd_free(prog->start);
		rd_free(prog);
	}
}
Beispiel #2
0
void rd_kafka_op_destroy (rd_kafka_op_t *rko) {

	/* Decrease refcount on rkbuf to eventually rd_free the shared buffer*/
	if (rko->rko_rkbuf)
		rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY);
	else if (rko->rko_payload && rko->rko_flags & RD_KAFKA_OP_F_FREE) {
                if (rko->rko_free_cb)
                        rko->rko_free_cb(rko->rko_payload);
                else
                        rd_free(rko->rko_payload);
        }
        if (rko->rko_rkt)
                rd_kafka_topic_destroy0(rd_kafka_topic_a2s(rko->rko_rkt));
        if (rko->rko_rktp)
                rd_kafka_toppar_destroy(rko->rko_rktp);
        if (rko->rko_metadata)
                rd_kafka_metadata_destroy(rko->rko_metadata);
        if (rko->rko_replyq)
                rd_kafka_q_destroy(rko->rko_replyq);

        if (rd_atomic32_sub(&rd_kafka_op_cnt, 1) < 0)
                rd_kafka_assert(NULL, !*"rd_kafka_op_cnt < 0");

	rd_free(rko);
}
Beispiel #3
0
Reprog *re_regcomp(const char *pattern, int cflags, const char **errorp)
{
	Renode *node;
	Reinst *split, *jump;
	int i;

	g.prog = rd_malloc(sizeof (Reprog));
	g.pstart = g.pend = rd_malloc(sizeof (Renode) * strlen(pattern) * 2);

	if (setjmp(g.kaboom)) {
		if (errorp) *errorp = g.error;
		rd_free(g.pstart);
		rd_free(g.prog);
		return NULL;
	}

	g.source = pattern;
	g.ncclass = 0;
	g.nsub = 1;
	for (i = 0; i < MAXSUB; ++i)
		g.sub[i] = 0;

	g.prog->flags = cflags;

	next();
	node = parsealt();
	if (g.lookahead == ')')
		die("unmatched ')'");
	if (g.lookahead != 0)
		die("syntax error");

	g.prog->nsub = g.nsub;
	g.prog->start = g.prog->end = rd_malloc((count(node) + 6) * sizeof (Reinst));

	split = emit(g.prog, I_SPLIT);
	split->x = split + 3;
	split->y = split + 1;
	emit(g.prog, I_ANYNL);
	jump = emit(g.prog, I_JUMP);
	jump->x = split;
	emit(g.prog, I_LPAR);
	compile(g.prog, node);
	emit(g.prog, I_RPAR);
	emit(g.prog, I_END);

#ifdef TEST
	dumpnode(node);
	putchar('\n');
	dumpprog(g.prog);
#endif

	rd_free(g.pstart);

	if (errorp) *errorp = NULL;
	return g.prog;
}
Beispiel #4
0
/**
 * Execute kinit to refresh ticket.
 *
 * Returns 0 on success, -1 on error.
 *
 * Locality: any
 */
static int rd_kafka_sasl_kinit_refresh (rd_kafka_broker_t *rkb) {
	rd_kafka_t *rk = rkb->rkb_rk;
	int r;
	char *cmd;
	char errstr[128];

	if (!rk->rk_conf.sasl.kinit_cmd ||
	    !strstr(rk->rk_conf.sasl.mechanisms, "GSSAPI"))
		return 0; /* kinit not configured */

	/* Build kinit refresh command line using string rendering and config */
	cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd,
			       errstr, sizeof(errstr),
			       render_callback, rkb);
	if (!cmd) {
		rd_rkb_log(rkb, LOG_ERR, "SASLREFRESH",
			   "Failed to construct kinit command "
			   "from sasl.kerberos.kinit.cmd template: %s",
			   errstr);
		return -1;
	}

	/* Execute kinit */
	rd_rkb_dbg(rkb, SECURITY, "SASLREFRESH",
		   "Refreshing SASL keys with command: %s", cmd);

	mtx_lock(&rd_kafka_sasl_kinit_lock);
	r = system(cmd);
	mtx_unlock(&rd_kafka_sasl_kinit_lock);

	if (r == -1) {
		rd_rkb_log(rkb, LOG_ERR, "SASLREFRESH",
			   "SASL key refresh failed: Failed to execute %s",
			   cmd);
		rd_free(cmd);
		return -1;
	} else if (WIFSIGNALED(r)) {
		rd_rkb_log(rkb, LOG_ERR, "SASLREFRESH",
			   "SASL key refresh failed: %s: received signal %d",
			   cmd, WTERMSIG(r));
		rd_free(cmd);
		return -1;
	} else if (WIFEXITED(r) && WEXITSTATUS(r) != 0) {
		rd_rkb_log(rkb, LOG_ERR, "SASLREFRESH",
			   "SASL key refresh failed: %s: exited with code %d",
			   cmd, WEXITSTATUS(r));
		rd_free(cmd);
		return -1;
	}

	rd_free(cmd);

	rd_rkb_dbg(rkb, SECURITY, "SASLREFRESH", "SASL key refreshed");
	return 0;
}
/**
 * Close and destroy a transport handle
 */
void rd_kafka_transport_close (rd_kafka_transport_t *rktrans) {
#if WITH_SSL
	if (rktrans->rktrans_ssl) {
		SSL_shutdown(rktrans->rktrans_ssl);
		SSL_free(rktrans->rktrans_ssl);
	}
#endif

#if WITH_SASL
	if (rktrans->rktrans_sasl.conn)
		sasl_dispose(&rktrans->rktrans_sasl.conn);
#endif

	if (rktrans->rktrans_recv_buf)
		rd_kafka_buf_destroy(rktrans->rktrans_recv_buf);

	if (rktrans->rktrans_s != -1) {
#ifndef _MSC_VER
		close(rktrans->rktrans_s);
#else
		closesocket(rktrans->rktrans_s);
#endif
	}

	rd_free(rktrans);
}
Beispiel #6
0
void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd,
				     const void *payload, size_t size) {
	rd_kafka_q_t *rkq = rkqu->rkqu_q;
	struct rd_kafka_q_io *qio;

	if (fd != -1) {
		qio = rd_malloc(sizeof(*qio) + size);
		qio->fd = fd;
		qio->size = size;
		qio->payload = (void *)(qio+1);
		memcpy(qio->payload, payload, size);
	}

	mtx_lock(&rkq->rkq_lock);
	if (rkq->rkq_qio) {
		rd_free(rkq->rkq_qio);
		rkq->rkq_qio = NULL;
	}

	if (fd != -1) {
		rkq->rkq_qio = qio;
	}

	mtx_unlock(&rkq->rkq_lock);
}
/**
 * @brief Base64 decode input string \p in of size \p insize.
 * @returns -1 on invalid Base64, or 0 on successes in which case a
 *         newly allocated binary string is set in out (and size).
 */
static int rd_base64_decode (const rd_chariov_t *in, rd_chariov_t *out) {
        size_t asize;
        BIO *b64, *bmem;

        if (in->size == 0 || (in->size % 4) != 0)
                return -1;

        asize = (in->size * 3) / 4; /* allocation size */
        out->ptr = rd_malloc(asize+1);

        b64 = BIO_new(BIO_f_base64());
        BIO_set_flags(b64, BIO_FLAGS_BASE64_NO_NL);

        bmem = BIO_new_mem_buf(in->ptr, (int)in->size);
        bmem = BIO_push(b64, bmem);

        out->size = BIO_read(bmem, out->ptr, (int)asize+1);
        assert(out->size <= asize);
        BIO_free_all(bmem);

#if ENABLE_DEVEL
        /* Verify that decode==encode */
        {
                char *encoded = rd_base64_encode(out);
                assert(strlen(encoded) == in->size);
                assert(!strncmp(encoded, in->ptr, in->size));
                rd_free(encoded);
        }
#endif

        return 0;
}
int plot_radec_count_inbounds(plot_args_t* pargs, plotradec_t* args) {
	rd_t myrd;
	rd_t* rd = NULL;
	int i, Nrd, nib;

	rd = get_rd(args, &myrd);
	if (!rd) return -1;
	Nrd = rd_n(rd);
	// If N is specified, apply it as a max.
	if (args->nobjs)
		Nrd = MIN(Nrd, args->nobjs);
	nib = 0;
	for (i=args->firstobj; i<Nrd; i++) {
		double x,y;
		double ra = rd_getra(rd, i);
		double dec = rd_getdec(rd, i);
		if (!plotstuff_radec2xy(pargs, ra, dec, &x, &y))
			continue;
		if (!plotstuff_marker_in_bounds(pargs, x, y))
			continue;
		nib++;
	}
	if (rd != &myrd)
		rd_free(rd);
	return nib;
}
Beispiel #9
0
int rd_kafka_sasl_conf_validate (rd_kafka_t *rk,
				 char *errstr, size_t errstr_size) {

	if (strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI"))
		return 0;

	if (rk->rk_conf.sasl.kinit_cmd) {
		rd_kafka_broker_t rkb;
		char *cmd;
		char tmperr[128];

		memset(&rkb, 0, sizeof(rkb));
		strcpy(rkb.rkb_nodename, "ATestBroker:9092");
		rkb.rkb_rk = rk;
		mtx_init(&rkb.rkb_lock, mtx_plain);

		cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd,
				       tmperr, sizeof(tmperr),
				       render_callback, &rkb);

		mtx_destroy(&rkb.rkb_lock);

		if (!cmd) {
			rd_snprintf(errstr, errstr_size,
				    "Invalid sasl.kerberos.kinit.cmd value: %s",
				    tmperr);
			return -1;
		}

		rd_free(cmd);
	}

	return 0;
}
Beispiel #10
0
void rd_kafka_message_destroy (rd_kafka_message_t *rkmessage) {
	rd_kafka_op_t *rko;

	if (likely((rko = (rd_kafka_op_t *)rkmessage->_private) != NULL))
		rd_kafka_op_destroy(rko);
	else
		rd_free(rkmessage);
}
Beispiel #11
0
/**
 * Destroy a queue. refcnt must be at zero.
 */
void rd_kafka_q_destroy_final (rd_kafka_q_t *rkq) {

        mtx_lock(&rkq->rkq_lock);
	if (unlikely(rkq->rkq_qio != NULL)) {
		rd_free(rkq->rkq_qio);
		rkq->rkq_qio = NULL;
	}
        rd_kafka_q_fwd_set0(rkq, NULL, 0/*no-lock*/);
        rd_kafka_q_disable0(rkq, 0/*no-lock*/);
        rd_kafka_q_purge0(rkq, 0/*no-lock*/);
	assert(!rkq->rkq_fwdq);
        mtx_unlock(&rkq->rkq_lock);
	mtx_destroy(&rkq->rkq_lock);
	cnd_destroy(&rkq->rkq_cond);

        if (rkq->rkq_flags & RD_KAFKA_Q_F_ALLOCATED)
                rd_free(rkq);
}
/**
 * Global OpenSSL cleanup.
 *
 * NOTE: This function is never called, see rd_kafka_transport_term()
 */
static RD_UNUSED void rd_kafka_transport_ssl_term (void) {
	int i;

	ERR_free_strings();

	for (i = 0 ; i < rd_kafka_ssl_locks_cnt ; i++)
		mtx_destroy(&rd_kafka_ssl_locks[i]);

	rd_free(rd_kafka_ssl_locks);
}
Beispiel #13
0
void rd_list_destroy (rd_list_t *rl, void (*free_cb) (void *)) {

	if (!free_cb)
		free_cb = rl->rl_free_cb;

        if (rl->rl_elems) {
                int i;
                if (free_cb) {
                        for (i = 0 ; i < rl->rl_cnt ; i++)
                                if (rl->rl_elems[i])
                                        free_cb(rl->rl_elems[i]);
                }

		rd_free(rl->rl_elems);
        }

	if (rl->rl_flags & RD_LIST_F_ALLOCATED)
		rd_free(rl);
}
Beispiel #14
0
static void do_test_non_exist_and_partchange (void) {
	char *topic_a = rd_strdup(test_mk_topic_name("topic_a", 1));
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_queue_t *queue;

	/**
	 * Test #1:
	 * - Subscribe to non-existing topic.
	 * - Verify empty assignment
	 * - Create topic
	 * - Verify new assignment containing topic
	 */
	TEST_SAY("#1 & #2 testing\n");
	test_conf_init(&conf, NULL, 60);

	/* Decrease metadata interval to speed up topic change discovery. */
	test_conf_set(conf, "metadata.max.age.ms", "5000");

	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
	rk = test_create_consumer(test_str_id_generate_tmp(),
				  NULL, conf, NULL, NULL);
	queue = rd_kafka_queue_get_consumer(rk);

	TEST_SAY("#1: Subscribing to %s\n", topic_a);
	test_consumer_subscribe(rk, topic_a);

	/* Should not see a rebalance since no topics are matched. */
	await_no_rebalance("#1: empty", rk, queue, 10000);

	TEST_SAY("#1: creating topic %s\n", topic_a);
	test_create_topic(topic_a, 2, 1);

	await_assignment("#1: proper", rk, queue, 1,
			 topic_a, 2);


	/**
	 * Test #2 (continue with #1 consumer)
	 * - Increase the partition count
	 * - Verify updated assignment
	 */
	test_kafka_topics("--alter --topic %s --partitions 4",
			  topic_a);
	await_revoke("#2", rk, queue);

	await_assignment("#2: more partitions", rk, queue, 1,
			 topic_a, 4);

	test_consumer_close(rk);
	rd_kafka_queue_destroy(queue);
	rd_kafka_destroy(rk);

	rd_free(topic_a);
}
/**
 * @brief Close and free authentication state
 */
static void rd_kafka_sasl_scram_close (rd_kafka_transport_t *rktrans) {
        struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;

        if (!state)
                return;

        RD_IF_FREE(state->cnonce.ptr, rd_free);
        RD_IF_FREE(state->first_msg_bare.ptr, rd_free);
        RD_IF_FREE(state->ServerSignatureB64, rd_free);
        rd_free(state);
}
Beispiel #16
0
void rd_kafka_queue_destroy (rd_kafka_queue_t *rkqu) {
        int do_destroy;

        mtx_lock(&rkqu->rkqu_q.rkq_lock);
        do_destroy = rkqu->rkqu_q.rkq_refcnt == 1;
        mtx_unlock(&rkqu->rkqu_q.rkq_lock);
	rd_kafka_q_destroy(&rkqu->rkqu_q);
        if (!do_destroy)
		return; /* Still references */

	rd_free(rkqu);
}
int plot_radec_plot(const char* command, cairo_t* cairo,
				 plot_args_t* pargs, void* baton) {
	plotradec_t* args = (plotradec_t*)baton;
	// Plot it!
	rd_t myrd;
	rd_t* rd = NULL;
	//rd_t* freerd = NULL;
	int Nrd;
	int i;

	if (!pargs->wcs) {
		ERROR("plotting radec but not plot_wcs has been set.");
		return -1;
	}

	if (args->fn && dl_size(args->radecvals)) {
		ERROR("Can only plot one of rdlist filename and radec_vals");
		return -1;
	}
	if (!args->fn && !dl_size(args->radecvals)) {
		ERROR("Neither rdlist filename nor radec_vals given!");
		return -1;
	}

	plotstuff_builtin_apply(cairo, pargs);

	rd = get_rd(args, &myrd);
	if (!rd) return -1;
	Nrd = rd_n(rd);
	// If N is specified, apply it as a max.
	if (args->nobjs)
		Nrd = MIN(Nrd, args->nobjs);

	// Plot markers.
	for (i=args->firstobj; i<Nrd; i++) {
		double x,y;
		double ra = rd_getra(rd, i);
		double dec = rd_getdec(rd, i);
		if (!plotstuff_radec2xy(pargs, ra, dec, &x, &y))
			continue;
		if (!plotstuff_marker_in_bounds(pargs, x, y))
			continue;
		plotstuff_stack_marker(pargs, x-1, y-1);
	}
	plotstuff_plot_stack(pargs, cairo);

	if (rd != &myrd)
		rd_free(rd);
	//rd_free(freerd);
	return 0;
}
Beispiel #18
0
/**
 * @brief Attempt to load library \p path.
 * @returns the library handle (platform dependent, thus opaque) on success,
 *          else NULL.
 */
rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) {
        void *handle;
        const char *loadfunc;
#if WITH_LIBDL
        loadfunc = "dlopen()";
        handle = dlopen(path, RTLD_NOW | RTLD_LOCAL);
#elif defined(_MSC_VER)
        loadfunc = "LoadLibrary()";
        handle = (void *)LoadLibraryA(path);
#endif
        if (!handle) {
                char *dlerrstr = rd_dl_error();
                rd_snprintf(errstr, errstr_size, "%s failed: %s",
                            loadfunc, dlerrstr);
                rd_free(dlerrstr);
        }
        return (rd_dl_hnd_t *)handle;
}
/**
 * Decommissions the use of an offset file for a toppar.
 * The file content will not be touched and the file will not be removed.
 */
static rd_kafka_resp_err_t rd_kafka_offset_file_term (rd_kafka_toppar_t *rktp) {
        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;

        /* Sync offset file if the sync is intervalled (> 0) */
        if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) {
                rd_kafka_offset_file_sync(rktp);
		rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
				    &rktp->rktp_offset_sync_tmr, 1/*lock*/);
	}


	rd_kafka_offset_file_close(rktp);

	rd_free(rktp->rktp_offset_path);
	rktp->rktp_offset_path = NULL;

        return err;
}
Beispiel #20
0
/**
 * @brief look up address of \p symbol in library handle \p handle
 * @returns the function pointer on success or NULL on error.
 */
void *
rd_dl_sym (rd_dl_hnd_t *handle, const char *symbol,
           char *errstr, size_t errstr_size) {
        void *func;
#if WITH_LIBDL
        func = dlsym((void *)handle, symbol);
#elif defined(_MSC_VER)
        func = GetProcAddress((HMODULE)handle, symbol);
#endif
        if (!func) {
                char *dlerrstr = rd_dl_error();
                rd_snprintf(errstr, errstr_size,
                            "Failed to load symbol \"%s\": %s",
                            symbol, dlerrstr);
                rd_free(dlerrstr);
        }
        return func;
}
Beispiel #21
0
/* @remark This test will fail if auto topic creation is enabled on the broker
 * since the client will issue a topic-creating metadata request to find
 * a new leader when the topic is removed.
 *
 * To run with trivup, do:
 * ./interactive_broker_version.py .. -conf '{"auto_create_topics":"false"}' ..
 * TESTS=0045 ./run-test.sh -k ./merged
 */
static void do_test_topic_remove (void) {
	char *topic_f = rd_strdup(test_mk_topic_name("topic_f", 1));
	char *topic_g = rd_strdup(test_mk_topic_name("topic_g", 1));
	int parts_f = 5;
	int parts_g = 9;
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_queue_t *queue;
	rd_kafka_topic_partition_list_t *topics;
	rd_kafka_resp_err_t err;

	/**
	 * Topic removal test:
	 * - Create topic f & g
	 * - Subscribe to f & g
	 * - Verify f & g assignment
	 * - Remove topic f
	 * - Verify g assignment
	 * - Remove topic g
	 * - Verify empty assignment
	 */
	TEST_SAY("Topic removal testing\n");
	test_conf_init(&conf, NULL, 60);

	/* Decrease metadata interval to speed up topic change discovery. */
	test_conf_set(conf, "metadata.max.age.ms", "5000");

	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
	rk = test_create_consumer(test_str_id_generate_tmp(),
				  NULL, conf, NULL, NULL);
	queue = rd_kafka_queue_get_consumer(rk);

	TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_f);
	test_create_topic(topic_f, parts_f, 1);

	TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_g);
	test_create_topic(topic_g, parts_g, 1);

	rd_sleep(1); // FIXME: do check&wait loop instead

	TEST_SAY("Topic removal: Subscribing to %s & %s\n", topic_f, topic_g);
	topics = rd_kafka_topic_partition_list_new(2);
	rd_kafka_topic_partition_list_add(topics, topic_f, RD_KAFKA_PARTITION_UA);
	rd_kafka_topic_partition_list_add(topics, topic_g, RD_KAFKA_PARTITION_UA);
	err = rd_kafka_subscribe(rk, topics);
	TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR,
		    "%s", rd_kafka_err2str(err));
	rd_kafka_topic_partition_list_destroy(topics);

	await_assignment("Topic removal: both topics exist", rk, queue, 2,
			 topic_f, parts_f,
			 topic_g, parts_g);

	TEST_SAY("Topic removal: removing %s\n", topic_f);
	test_kafka_topics("--delete --topic %s", topic_f);

	await_revoke("Topic removal: rebalance after topic removal", rk, queue);

	await_assignment("Topic removal: one topic exists", rk, queue, 1,
			 topic_g, parts_g);
	
	TEST_SAY("Topic removal: removing %s\n", topic_g);
	test_kafka_topics("--delete --topic %s", topic_g);

	await_revoke("Topic removal: rebalance after 2nd topic removal",
		     rk, queue);

	/* Should not see another rebalance since all topics now removed */
	await_no_rebalance("Topic removal: empty", rk, queue, 10000);

	test_consumer_close(rk);
	rd_kafka_queue_destroy(queue);
	rd_kafka_destroy(rk);

	rd_free(topic_f);
	rd_free(topic_g);
}
/**
 * @brief Decompress MessageSet, pass the uncompressed MessageSet to
 *        the MessageSet reader.
 */
static rd_kafka_resp_err_t
rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr,
                                   int MsgVersion, int Attributes,
                                   int64_t Timestamp, int64_t Offset,
                                   const void *compressed,
                                   size_t compressed_size) {
        struct iovec iov = { .iov_base = NULL, .iov_len = 0 };
        rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
        int codec = Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK;
        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
        rd_kafka_buf_t *rkbufz;

        switch (codec)
        {
#if WITH_ZLIB
        case RD_KAFKA_COMPRESSION_GZIP:
        {
                uint64_t outlenx = 0;

                /* Decompress Message payload */
                iov.iov_base = rd_gz_decompress(compressed, (int)compressed_size,
                                                &outlenx);
                if (unlikely(!iov.iov_base)) {
                        rd_rkb_dbg(msetr->msetr_rkb, MSG, "GZIP",
                                   "Failed to decompress Gzip "
                                   "message at offset %"PRId64
                                   " of %"PRIusz" bytes: "
                                   "ignoring message",
                                   Offset, compressed_size);
                        err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
                        goto err;
                }

                iov.iov_len = (size_t)outlenx;
        }
        break;
#endif

#if WITH_SNAPPY
        case RD_KAFKA_COMPRESSION_SNAPPY:
        {
                const char *inbuf = compressed;
                size_t inlen = compressed_size;
                int r;
                static const unsigned char snappy_java_magic[] =
                        { 0x82, 'S','N','A','P','P','Y', 0 };
                static const size_t snappy_java_hdrlen = 8+4+4;

                /* snappy-java adds its own header (SnappyCodec)
                 * which is not compatible with the official Snappy
                 * implementation.
                 *   8: magic, 4: version, 4: compatible
                 * followed by any number of chunks:
                 *   4: length
                 * ...: snappy-compressed data. */
                if (likely(inlen > snappy_java_hdrlen + 4 &&
                           !memcmp(inbuf, snappy_java_magic, 8))) {
                        /* snappy-java framing */
                        char errstr[128];

                        inbuf  = inbuf + snappy_java_hdrlen;
                        inlen -= snappy_java_hdrlen;
                        iov.iov_base = rd_kafka_snappy_java_uncompress(
                                inbuf, inlen,
                                &iov.iov_len,
                                errstr, sizeof(errstr));

                        if (unlikely(!iov.iov_base)) {
                                rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
                                           "%s [%"PRId32"]: "
                                           "Snappy decompression for message "
                                           "at offset %"PRId64" failed: %s: "
                                           "ignoring message",
                                           rktp->rktp_rkt->rkt_topic->str,
                                           rktp->rktp_partition,
                                           Offset, errstr);
                                err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
                                goto err;
                        }


                } else {
                        /* No framing */

                        /* Acquire uncompressed length */
                        if (unlikely(!rd_kafka_snappy_uncompressed_length(
                                             inbuf, inlen, &iov.iov_len))) {
                                rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
                                           "Failed to get length of Snappy "
                                           "compressed payload "
                                           "for message at offset %"PRId64
                                           " (%"PRIusz" bytes): "
                                           "ignoring message",
                                           Offset, inlen);
                                err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
                                goto err;
                        }

                        /* Allocate output buffer for uncompressed data */
                        iov.iov_base = rd_malloc(iov.iov_len);
                        if (unlikely(!iov.iov_base)) {
                                rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
                                           "Failed to allocate Snappy "
                                           "decompress buffer of size %"PRIusz
                                           "for message at offset %"PRId64
                                           " (%"PRIusz" bytes): %s: "
                                           "ignoring message",
                                           iov.iov_len, Offset, inlen,
                                           rd_strerror(errno));
                                err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
                                goto err;
                        }

                        /* Uncompress to outbuf */
                        if (unlikely((r = rd_kafka_snappy_uncompress(
                                              inbuf, inlen, iov.iov_base)))) {
                                rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
                                           "Failed to decompress Snappy "
                                           "payload for message at offset "
                                           "%"PRId64" (%"PRIusz" bytes): %s: "
                                           "ignoring message",
                                           Offset, inlen,
                                           rd_strerror(-r/*negative errno*/));
                                rd_free(iov.iov_base);
                                err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
                                goto err;
                        }
                }

        }
        break;
#endif

        case RD_KAFKA_COMPRESSION_LZ4:
        {
                err = rd_kafka_lz4_decompress(msetr->msetr_rkb,
                                              /* Proper HC? */
                                              MsgVersion >= 1 ? 1 : 0,
                                              Offset,
                                              /* @warning Will modify compressed
                                               *          if no proper HC */
                                              (char *)compressed,
                                              compressed_size,
                                              &iov.iov_base, &iov.iov_len);
                if (err)
                        goto err;
        }
        break;

        default:
                rd_rkb_dbg(msetr->msetr_rkb, MSG, "CODEC",
                           "%s [%"PRId32"]: Message at offset %"PRId64
                           " with unsupported "
                           "compression codec 0x%x: message ignored",
                           rktp->rktp_rkt->rkt_topic->str,
                           rktp->rktp_partition,
                           Offset, (int)codec);

                err = RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
                goto err;
        }


        rd_assert(iov.iov_base);

        /*
         * Decompression successful
         */

        /* Create a new buffer pointing to the uncompressed
         * allocated buffer (outbuf) and let messages keep a reference to
         * this new buffer. */
        rkbufz = rd_kafka_buf_new_shadow(iov.iov_base, iov.iov_len, rd_free);
        rkbufz->rkbuf_rkb = msetr->msetr_rkbuf->rkbuf_rkb;
        rd_kafka_broker_keep(rkbufz->rkbuf_rkb);


        /* In MsgVersion v0..1 the decompressed data contains
         * an inner MessageSet, pass it to a new MessageSet reader.
         *
         * For MsgVersion v2 the decompressed data are the list of messages.
         */

        if (MsgVersion <= 1) {
                /* Pass decompressed data (inner Messageset)
                 * to new instance of the MessageSet parser. */
                rd_kafka_msgset_reader_t inner_msetr;
                rd_kafka_msgset_reader_init(&inner_msetr,
                                            rkbufz,
                                            msetr->msetr_rktp,
                                            msetr->msetr_tver,
                                            &msetr->msetr_rkq);

                if (MsgVersion == 1) {
                        /* postproc() will convert relative to
                         * absolute offsets */
                        inner_msetr.msetr_relative_offsets = 1;
                        inner_msetr.msetr_outer.offset = Offset;

                        /* Apply single LogAppendTime timestamp for
                         * all messages. */
                        if (Attributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME) {
                                inner_msetr.msetr_outer.tstype =
                                        RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
                                inner_msetr.msetr_outer.timestamp = Timestamp;
                        }
                }

                /* Parse the inner MessageSet */
                err = rd_kafka_msgset_reader_run(&inner_msetr);


        } else {
                /* MsgVersion 2 */
                rd_kafka_buf_t *orig_rkbuf = msetr->msetr_rkbuf;

                /* Temporarily replace read buffer with uncompressed buffer */
                msetr->msetr_rkbuf = rkbufz;

                /* Read messages */
                err = rd_kafka_msgset_reader_msgs_v2(msetr);

                /* Restore original buffer */
                msetr->msetr_rkbuf = orig_rkbuf;
        }

        /* Loose our refcnt of the uncompressed rkbuf.
         * Individual messages/rko's will have their own reference. */
        rd_kafka_buf_destroy(rkbufz);

        return err;

 err:
        /* Enqueue error messsage:
         * Create op and push on temporary queue. */
        rd_kafka_q_op_err(&msetr->msetr_rkq, RD_KAFKA_OP_CONSUMER_ERR,
                          err, msetr->msetr_tver->version, rktp, Offset,
                          "Decompression (codec 0x%x) of message at %"PRIu64
                          " of %"PRIu64" bytes failed: %s",
                          codec, Offset, compressed_size, rd_kafka_err2str(err));

        return err;

}



/**
 * @brief Message parser for MsgVersion v0..1
 *
 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or on single-message errors,
 *          or any other error code when the MessageSet parser should stop
 *          parsing (such as for partial Messages).
 */
static rd_kafka_resp_err_t
rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) {
        rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
        rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
        rd_kafka_broker_t *rkb = msetr->msetr_rkb;
        struct {
                int64_t Offset;       /* MessageSet header */
                int32_t MessageSize;  /* MessageSet header */
                uint32_t Crc;
                int8_t  MagicByte;  /* MsgVersion */
                int8_t  Attributes;
                int64_t Timestamp;  /* v1 */
        } hdr; /* Message header */
        rd_kafkap_bytes_t Key;
        rd_kafkap_bytes_t Value;
        int32_t Value_len;
        rd_kafka_op_t *rko;
        size_t hdrsize = 6; /* Header size following MessageSize */
        rd_slice_t crc_slice;
        rd_kafka_msg_t *rkm;
        int relative_offsets = 0;
        const char *reloff_str = "";
        /* Only log decoding errors if protocol debugging enabled. */
        int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug &
                                 RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0;
        size_t message_end;

        rd_kafka_buf_read_i64(rkbuf, &hdr.Offset);
        rd_kafka_buf_read_i32(rkbuf, &hdr.MessageSize);
        message_end = rd_slice_offset(&rkbuf->rkbuf_reader) + hdr.MessageSize;

        rd_kafka_buf_read_i32(rkbuf, &hdr.Crc);
        if (!rd_slice_narrow_copy_relative(&rkbuf->rkbuf_reader, &crc_slice,
                                           hdr.MessageSize - 4))
                rd_kafka_buf_check_len(rkbuf, hdr.MessageSize - 4);

        rd_kafka_buf_read_i8(rkbuf, &hdr.MagicByte);
        rd_kafka_buf_read_i8(rkbuf, &hdr.Attributes);

        if (hdr.MagicByte == 1) { /* MsgVersion */
                rd_kafka_buf_read_i64(rkbuf, &hdr.Timestamp);
                hdrsize += 8;
                /* MsgVersion 1 has relative offsets for compressed MessageSets*/
                if (!(hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) &&
                    msetr->msetr_relative_offsets) {
                        relative_offsets = 1;
                        reloff_str = "relative ";
                }
        } else
                hdr.Timestamp = 0;

        /* Verify MessageSize */
        if (unlikely(hdr.MessageSize < (ssize_t)hdrsize))
                rd_kafka_buf_parse_fail(rkbuf,
                                        "Message at %soffset %"PRId64
                                        " MessageSize %"PRId32
                                        " < hdrsize %"PRIusz,
                                        reloff_str,
                                        hdr.Offset, hdr.MessageSize, hdrsize);

        /* Early check for partial messages */
        rd_kafka_buf_check_len(rkbuf, hdr.MessageSize - hdrsize);

        if (rkb->rkb_rk->rk_conf.check_crcs) {
                /* Verify CRC32 if desired. */
                uint32_t calc_crc;

                calc_crc = rd_slice_crc32(&crc_slice);
                rd_dassert(rd_slice_remains(&crc_slice) == 0);

                if (unlikely(hdr.Crc != calc_crc)) {
                        /* Propagate CRC error to application and
                         * continue with next message. */
                        rd_kafka_q_op_err(&msetr->msetr_rkq,
                                          RD_KAFKA_OP_CONSUMER_ERR,
                                          RD_KAFKA_RESP_ERR__BAD_MSG,
                                          msetr->msetr_tver->version,
                                          rktp,
                                          hdr.Offset,
                                          "Message at %soffset %"PRId64
                                          " (%"PRId32" bytes) "
                                          "failed CRC32 check "
                                          "(original 0x%"PRIx32" != "
                                          "calculated 0x%"PRIx32")",
                                          reloff_str, hdr.Offset,
                                          hdr.MessageSize, hdr.Crc, calc_crc);
                        rd_kafka_buf_skip_to(rkbuf, message_end);
                        rd_atomic64_add(&rkb->rkb_c.rx_err, 1);
                        /* Continue with next message */
                        return RD_KAFKA_RESP_ERR_NO_ERROR;
                }
        }


        /* Extract key */
        rd_kafka_buf_read_bytes(rkbuf, &Key);

        /* Extract Value */
        rd_kafka_buf_read_bytes(rkbuf, &Value);
        Value_len = RD_KAFKAP_BYTES_LEN(&Value);

        /* MessageSets may contain offsets earlier than we
         * requested (compressed MessageSets in particular),
         * drop the earlier messages.
         * Note: the inner offset may only be trusted for
         *       absolute offsets. KIP-31 introduced
         *       ApiVersion 2 that maintains relative offsets
         *       of compressed messages and the base offset
         *       in the outer message is the offset of
         *       the *LAST* message in the MessageSet.
         *       This requires us to assign offsets
         *       after all messages have been read from
         *       the messageset, and it also means
         *       we cant perform this offset check here
         *       in that case. */
        if (!relative_offsets &&
            hdr.Offset < rktp->rktp_offsets.fetch_offset)
                return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next msg */

        /* Handle compressed MessageSet */
        if (unlikely(hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK))
                return rd_kafka_msgset_reader_decompress(
                        msetr, hdr.MagicByte, hdr.Attributes, hdr.Timestamp,
                        hdr.Offset, Value.data, Value_len);


        /* Pure uncompressed message, this is the innermost
         * handler after all compression and cascaded
         * MessageSets have been peeled off. */

        /* Create op/message container for message. */
        rko = rd_kafka_op_new_fetch_msg(&rkm, rktp, msetr->msetr_tver->version,
                                        rkbuf,
                                        hdr.Offset,
                                        (size_t)RD_KAFKAP_BYTES_LEN(&Key),
                                        RD_KAFKAP_BYTES_IS_NULL(&Key) ?
                                        NULL : Key.data,
                                        (size_t)RD_KAFKAP_BYTES_LEN(&Value),
                                        RD_KAFKAP_BYTES_IS_NULL(&Value) ?
                                        NULL : Value.data);

        /* Assign message timestamp.
         * If message was in a compressed MessageSet and the outer/wrapper
         * Message.Attribute had a LOG_APPEND_TIME set, use the
         * outer timestamp */
        if (msetr->msetr_outer.tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) {
                rkm->rkm_timestamp = msetr->msetr_outer.timestamp;
                rkm->rkm_tstype    = msetr->msetr_outer.tstype;

        } else if (hdr.MagicByte >= 1 && hdr.Timestamp) {
                rkm->rkm_timestamp = hdr.Timestamp;
                if (hdr.Attributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME)
                        rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
                else
                        rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME;
        }

        /* Enqueue message on temporary queue */
        rd_kafka_q_enq(&msetr->msetr_rkq, rko);
        msetr->msetr_msgcnt++;

        return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue */

 err_parse:
        /* Count all parse errors as partial message errors. */
        rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1);
        return rkbuf->rkbuf_err;
}
Beispiel #23
0
void rd_kafka_queue_destroy (rd_kafka_queue_t *rkqu) {
	rd_kafka_q_disable(rkqu->rkqu_q);
	rd_kafka_q_destroy(rkqu->rkqu_q);
	rd_free(rkqu);
}
Beispiel #24
0
/**
 * @brief Test creation of partitions
 *
 *
 */
static void do_test_CreatePartitions (const char *what,
                                      rd_kafka_t *rk, rd_kafka_queue_t *useq,
                                      int op_timeout) {
        rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
#define MY_CRP_TOPICS_CNT 9
        char *topics[MY_CRP_TOPICS_CNT];
        rd_kafka_NewTopic_t *new_topics[MY_CRP_TOPICS_CNT];
        rd_kafka_NewPartitions_t *crp_topics[MY_CRP_TOPICS_CNT];
        rd_kafka_AdminOptions_t *options = NULL;
        /* Expected topics in metadata */
        rd_kafka_metadata_topic_t exp_mdtopics[MY_CRP_TOPICS_CNT] = {{0}};
        rd_kafka_metadata_partition_t exp_mdparts[2] = {{0}};
        int exp_mdtopic_cnt = 0;
        int i;
        char errstr[512];
        rd_kafka_resp_err_t err;
        test_timing_t timing;
        int metadata_tmout;
        int num_replicas = (int)avail_broker_cnt;

        TEST_SAY(_C_MAG "[ %s CreatePartitions with %s, op_timeout %d ]\n",
                 rd_kafka_name(rk), what, op_timeout);

        /* Set up two expected partitions with different replication sets
         * so they can be matched by the metadata checker later.
         * Even partitions use exp_mdparts[0] while odd partitions
         * use exp_mdparts[1]. */

        /* Set valid replica assignments (even, and odd (reverse) ) */
        exp_mdparts[0].replicas = rd_alloca(sizeof(*exp_mdparts[0].replicas) *
                                            num_replicas);
        exp_mdparts[1].replicas = rd_alloca(sizeof(*exp_mdparts[1].replicas) *
                                            num_replicas);
        exp_mdparts[0].replica_cnt = num_replicas;
        exp_mdparts[1].replica_cnt = num_replicas;
        for (i = 0 ; i < num_replicas ; i++) {
                exp_mdparts[0].replicas[i] = avail_brokers[i];
                exp_mdparts[1].replicas[i] = avail_brokers[num_replicas-i-1];
        }

        /**
         * Construct CreatePartitions array
         */
        for (i = 0 ; i < MY_CRP_TOPICS_CNT ; i++) {
                char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
                int initial_part_cnt = 1 + (i * 2);
                int new_part_cnt = 1 + (i / 2);
                int final_part_cnt = initial_part_cnt + new_part_cnt;
                int set_replicas = !(i % 2);
                int pi;

                topics[i] = topic;

                /* Topic to create with initial partition count */
                new_topics[i] = rd_kafka_NewTopic_new(topic, initial_part_cnt,
                                                      set_replicas ?
                                                      -1 : num_replicas,
                                                      NULL, 0);

                /* .. and later add more partitions to */
                crp_topics[i] = rd_kafka_NewPartitions_new(topic,
                                                           final_part_cnt,
                                                           errstr,
                                                           sizeof(errstr));

                if (set_replicas) {
                        exp_mdtopics[exp_mdtopic_cnt].partitions =
                                rd_alloca(final_part_cnt *
                                          sizeof(*exp_mdtopics[exp_mdtopic_cnt].
                                                 partitions));

                        for (pi = 0 ; pi < final_part_cnt ; pi++) {
                                const rd_kafka_metadata_partition_t *exp_mdp =
                                        &exp_mdparts[pi & 1];

                                exp_mdtopics[exp_mdtopic_cnt].
                                        partitions[pi] = *exp_mdp; /* copy */

                                exp_mdtopics[exp_mdtopic_cnt].
                                        partitions[pi].id = pi;

                                if (pi < initial_part_cnt) {
                                        /* Set replica assignment
                                         * for initial partitions */
                                        err = rd_kafka_NewTopic_set_replica_assignment(
                                                new_topics[i], pi,
                                                exp_mdp->replicas,
                                                (size_t)exp_mdp->replica_cnt,
                                                errstr, sizeof(errstr));
                                        TEST_ASSERT(!err, "NewTopic_set_replica_assignment: %s",
                                                errstr);
                                } else {
                                        /* Set replica assignment for new
                                         * partitions */
                                        err = rd_kafka_NewPartitions_set_replica_assignment(
                                                crp_topics[i],
                                                pi - initial_part_cnt,
                                                exp_mdp->replicas,
                                                (size_t)exp_mdp->replica_cnt,
                                                errstr, sizeof(errstr));
                                        TEST_ASSERT(!err, "NewPartitions_set_replica_assignment: %s",
                                                errstr);
                                }

                        }
                }

                TEST_SAY(_C_YEL "Topic %s with %d initial partitions will grow "
                         "by %d to %d total partitions with%s replicas set\n",
                         topics[i],
                         initial_part_cnt, new_part_cnt, final_part_cnt,
                         set_replicas ? "" : "out");

                exp_mdtopics[exp_mdtopic_cnt].topic = topic;
                exp_mdtopics[exp_mdtopic_cnt].partition_cnt = final_part_cnt;

                exp_mdtopic_cnt++;
        }

        if (op_timeout != -1) {
                options = rd_kafka_AdminOptions_new(
                        rk, RD_KAFKA_ADMIN_OP_ANY);

                err = rd_kafka_AdminOptions_set_operation_timeout(
                        options, op_timeout, errstr, sizeof(errstr));
                TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
        }

        /*
         * Create topics with initial partition count
         */
        TIMING_START(&timing, "CreateTopics");
        TEST_SAY("Creating topics with initial partition counts\n");
        rd_kafka_CreateTopics(rk, new_topics, MY_CRP_TOPICS_CNT,
                                    options, q);
        TIMING_ASSERT_LATER(&timing, 0, 50);

        err = test_wait_topic_admin_result(q,
                                           RD_KAFKA_EVENT_CREATETOPICS_RESULT,
                                           NULL, 15000);
        TEST_ASSERT(!err, "CreateTopics failed: %s", rd_kafka_err2str(err));

        rd_kafka_NewTopic_destroy_array(new_topics, MY_CRP_TOPICS_CNT);


        /*
         * Create new partitions
         */
        TIMING_START(&timing, "CreatePartitions");
        TEST_SAY("Creating partitions\n");
        rd_kafka_CreatePartitions(rk, crp_topics, MY_CRP_TOPICS_CNT,
                                    options, q);
        TIMING_ASSERT_LATER(&timing, 0, 50);

        err = test_wait_topic_admin_result(q,
                                           RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT,
                                           NULL, 15000);
        TEST_ASSERT(!err, "CreatePartitions failed: %s", rd_kafka_err2str(err));

        rd_kafka_NewPartitions_destroy_array(crp_topics, MY_CRP_TOPICS_CNT);


        /**
         * Verify that the expected topics are deleted and the non-expected
         * are not. Allow it some time to propagate.
         */
        if (op_timeout > 0)
                metadata_tmout = op_timeout + 1000;
        else
                metadata_tmout = 10 * 1000;

        test_wait_metadata_update(rk,
                                  exp_mdtopics,
                                  exp_mdtopic_cnt,
                                  NULL, 0,
                                  metadata_tmout);

        for (i = 0 ; i < MY_CRP_TOPICS_CNT ; i++)
                rd_free(topics[i]);

        if (options)
                rd_kafka_AdminOptions_destroy(options);

        if (!useq)
                rd_kafka_queue_destroy(q);

#undef MY_CRP_TOPICS_CNT
}
Beispiel #25
0
static void do_test_regex (void) {
	char *base_topic = rd_strdup(test_mk_topic_name("topic", 1));
	char *topic_b = rd_strdup(tsprintf("%s_b", base_topic));
	char *topic_c = rd_strdup(tsprintf("%s_c", base_topic));
	char *topic_d = rd_strdup(tsprintf("%s_d", base_topic));
	char *topic_e = rd_strdup(tsprintf("%s_e", base_topic));
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	rd_kafka_queue_t *queue;

	/**
	 * Regex test:
	 * - Create topic b
	 * - Subscribe to b & d & e
	 * - Verify b assignment
	 * - Create topic c
	 * - Verify no rebalance
	 * - Create topic d
	 * - Verify b & d assignment
	 */
	TEST_SAY("Regex testing\n");
	test_conf_init(&conf, NULL, 60);

	/* Decrease metadata interval to speed up topic change discovery. */
	test_conf_set(conf, "metadata.max.age.ms", "5000");

	rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
	rk = test_create_consumer(test_str_id_generate_tmp(),
				  NULL, conf, NULL, NULL);
	queue = rd_kafka_queue_get_consumer(rk);

	TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_b);
	test_create_topic(topic_b, 2, 1);
	rd_sleep(1); // FIXME: do check&wait loop instead

	TEST_SAY("Regex: Subscribing to %s & %s & %s\n",
		 topic_b, topic_d, topic_e);
	test_consumer_subscribe(rk, tsprintf("^%s_[bde]$", base_topic));

	await_assignment("Regex: just one topic exists", rk, queue, 1,
			 topic_b, 2);

	TEST_SAY("Regex: creating topic %s (not subscribed)\n", topic_c);
	test_create_topic(topic_c, 4, 1);

	/* Should not see a rebalance since no topics are matched. */
	await_no_rebalance("Regex: empty", rk, queue, 10000);

	TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_d);
	test_create_topic(topic_d, 1, 1);

	await_revoke("Regex: rebalance after topic creation", rk, queue);

	await_assignment("Regex: two topics exist", rk, queue, 2,
			 topic_b, 2,
			 topic_d, 1);

	test_consumer_close(rk);
	rd_kafka_queue_destroy(queue);
	rd_kafka_destroy(rk);

	rd_free(base_topic);
	rd_free(topic_b);
	rd_free(topic_c);
	rd_free(topic_d);
	rd_free(topic_e);
}
Beispiel #26
0
static void do_test_CreateTopics (const char *what,
                                  rd_kafka_t *rk, rd_kafka_queue_t *useq,
                                  int op_timeout, rd_bool_t validate_only) {
        rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
#define MY_NEW_TOPICS_CNT 6
        char *topics[MY_NEW_TOPICS_CNT];
        rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT];
        rd_kafka_AdminOptions_t *options = NULL;
        rd_kafka_resp_err_t exp_topicerr[MY_NEW_TOPICS_CNT] = {0};
        rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
        /* Expected topics in metadata */
        rd_kafka_metadata_topic_t exp_mdtopics[MY_NEW_TOPICS_CNT] = {{0}};
        int exp_mdtopic_cnt = 0;
        /* Not expected topics in metadata */
        rd_kafka_metadata_topic_t exp_not_mdtopics[MY_NEW_TOPICS_CNT] = {{0}};
        int exp_not_mdtopic_cnt = 0;
        int i;
        char errstr[512];
        const char *errstr2;
        rd_kafka_resp_err_t err;
        test_timing_t timing;
        rd_kafka_event_t *rkev;
        const rd_kafka_CreateTopics_result_t *res;
        const rd_kafka_topic_result_t **restopics;
        size_t restopic_cnt;
        int metadata_tmout ;
        int num_replicas = (int)avail_broker_cnt;
        int32_t *replicas;

        /* Set up replicas */
        replicas = rd_alloca(sizeof(*replicas) * num_replicas);
        for (i = 0 ; i < num_replicas ; i++)
                replicas[i] = avail_brokers[i];

        TEST_SAY(_C_MAG "[ %s CreateTopics with %s, "
                 "op_timeout %d, validate_only %d ]\n",
                 rd_kafka_name(rk), what, op_timeout, validate_only);

        /**
         * Construct NewTopic array with different properties for
         * different partitions.
         */
        for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) {
                char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
                int num_parts = i * 7 + 1;
                int set_config = (i & 1);
                int add_invalid_config = (i == 1);
                int set_replicas = !(i % 3);
                rd_kafka_resp_err_t this_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;

                topics[i] = topic;
                new_topics[i] = rd_kafka_NewTopic_new(topic,
                                                      num_parts,
                                                      set_replicas ? -1 :
                                                      num_replicas,
                                                      NULL, 0);

                if (set_config) {
                        /*
                         * Add various configuration properties
                         */
                        err = rd_kafka_NewTopic_set_config(
                                new_topics[i], "compression.type", "lz4");
                        TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));

                        err = rd_kafka_NewTopic_set_config(
                                new_topics[i], "delete.retention.ms", "900");
                        TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
                }

                if (add_invalid_config) {
                        /* Add invalid config property */
                        err = rd_kafka_NewTopic_set_config(
                                new_topics[i],
                                "dummy.doesntexist",
                                "broker is verifying this");
                        TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
                        this_exp_err = RD_KAFKA_RESP_ERR_INVALID_CONFIG;
                }

                TEST_SAY("Expected result for topic #%d: %s "
                         "(set_config=%d, add_invalid_config=%d, "
                         "set_replicas=%d)\n",
                         i, rd_kafka_err2name(this_exp_err),
                         set_config, add_invalid_config, set_replicas);

                if (set_replicas) {
                        int32_t p;

                        /*
                         * Set valid replica assignments
                         */
                        for (p = 0 ; p < num_parts ; p++) {
                                err = rd_kafka_NewTopic_set_replica_assignment(
                                        new_topics[i], p,
                                        replicas, num_replicas,
                                        errstr, sizeof(errstr));
                                TEST_ASSERT(!err, "%s", errstr);
                        }
                }

                if (this_exp_err || validate_only) {
                        exp_topicerr[i] = this_exp_err;
                        exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic;

                } else {
                        exp_mdtopics[exp_mdtopic_cnt].topic = topic;
                        exp_mdtopics[exp_mdtopic_cnt].partition_cnt =
                                num_parts;
                        exp_mdtopic_cnt++;
                }
        }

        if (op_timeout != -1 || validate_only) {
                options = rd_kafka_AdminOptions_new(
                        rk, RD_KAFKA_ADMIN_OP_CREATETOPICS);

                if (op_timeout != -1) {
                        err = rd_kafka_AdminOptions_set_operation_timeout(
                                options, op_timeout, errstr, sizeof(errstr));
                        TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
                }

                if (validate_only) {
                        err = rd_kafka_AdminOptions_set_validate_only(
                                options, validate_only, errstr, sizeof(errstr));
                        TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
                }
        }

        TIMING_START(&timing, "CreateTopics");
        TEST_SAY("Call CreateTopics\n");
        rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT,
                                    options, q);
        TIMING_ASSERT_LATER(&timing, 0, 50);

        /* Poll result queue for CreateTopics result.
         * Print but otherwise ignore other event types
         * (typically generic Error events). */
        TIMING_START(&timing, "CreateTopics.queue_poll");
        do {
                rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000));
                TEST_SAY("CreateTopics: got %s in %.3fms\n",
                         rd_kafka_event_name(rkev),
                         TIMING_DURATION(&timing) / 1000.0f);
                if (rd_kafka_event_error(rkev))
                        TEST_SAY("%s: %s\n",
                                 rd_kafka_event_name(rkev),
                                 rd_kafka_event_error_string(rkev));
        } while (rd_kafka_event_type(rkev) !=
                 RD_KAFKA_EVENT_CREATETOPICS_RESULT);

        /* Convert event to proper result */
        res = rd_kafka_event_CreateTopics_result(rkev);
        TEST_ASSERT(res, "expected CreateTopics_result, not %s",
                    rd_kafka_event_name(rkev));

        /* Expecting error */
        err = rd_kafka_event_error(rkev);
        errstr2 = rd_kafka_event_error_string(rkev);
        TEST_ASSERT(err == exp_err,
                    "expected CreateTopics to return %s, not %s (%s)",
                    rd_kafka_err2str(exp_err),
                    rd_kafka_err2str(err),
                    err ? errstr2 : "n/a");

        TEST_SAY("CreateTopics: returned %s (%s)\n",
                 rd_kafka_err2str(err), err ? errstr2 : "n/a");

        /* Extract topics */
        restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt);


        /* Scan topics for proper fields and expected failures. */
        for (i = 0 ; i < (int)restopic_cnt ; i++) {
                const rd_kafka_topic_result_t *terr = restopics[i];

                /* Verify that topic order matches our request. */
                if (strcmp(rd_kafka_topic_result_name(terr), topics[i]))
                        TEST_FAIL_LATER("Topic result order mismatch at #%d: "
                                        "expected %s, got %s",
                                        i, topics[i],
                                        rd_kafka_topic_result_name(terr));

                TEST_SAY("CreateTopics result: #%d: %s: %s: %s\n",
                         i,
                         rd_kafka_topic_result_name(terr),
                         rd_kafka_err2name(rd_kafka_topic_result_error(terr)),
                         rd_kafka_topic_result_error_string(terr));
                if (rd_kafka_topic_result_error(terr) != exp_topicerr[i])
                        TEST_FAIL_LATER(
                                "Expected %s, not %d: %s",
                                rd_kafka_err2name(exp_topicerr[i]),
                                rd_kafka_topic_result_error(terr),
                                rd_kafka_err2name(rd_kafka_topic_result_error(
                                                          terr)));
        }

        /**
         * Verify that the expecteded topics are created and the non-expected
         * are not. Allow it some time to propagate.
         */
        if (validate_only) {
                /* No topics should have been created, give it some time
                 * before checking. */
                rd_sleep(2);
                metadata_tmout = 5 * 1000;
        } else {
                if (op_timeout > 0)
                        metadata_tmout = op_timeout + 1000;
                else
                        metadata_tmout = 10 * 1000;
        }

        test_wait_metadata_update(rk,
                                  exp_mdtopics,
                                  exp_mdtopic_cnt,
                                  exp_not_mdtopics,
                                  exp_not_mdtopic_cnt,
                                  metadata_tmout);

        rd_kafka_event_destroy(rkev);

        for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) {
                rd_kafka_NewTopic_destroy(new_topics[i]);
                rd_free(topics[i]);
        }

        if (options)
                rd_kafka_AdminOptions_destroy(options);

        if (!useq)
                rd_kafka_queue_destroy(q);

#undef MY_NEW_TOPICS_CNT
}
Beispiel #27
0
/**
 * @brief Test deletion of topics
 *
 *
 */
static void do_test_DeleteTopics (const char *what,
                                  rd_kafka_t *rk, rd_kafka_queue_t *useq,
                                  int op_timeout) {
        rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
        const int skip_topic_cnt = 2;
#define MY_DEL_TOPICS_CNT 9
        char *topics[MY_DEL_TOPICS_CNT];
        rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT];
        rd_kafka_AdminOptions_t *options = NULL;
        rd_kafka_resp_err_t exp_topicerr[MY_DEL_TOPICS_CNT] = {0};
        rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
        /* Expected topics in metadata */
        rd_kafka_metadata_topic_t exp_mdtopics[MY_DEL_TOPICS_CNT] = {{0}};
        int exp_mdtopic_cnt = 0;
        /* Not expected topics in metadata */
        rd_kafka_metadata_topic_t exp_not_mdtopics[MY_DEL_TOPICS_CNT] = {{0}};
        int exp_not_mdtopic_cnt = 0;
        int i;
        char errstr[512];
        const char *errstr2;
        rd_kafka_resp_err_t err;
        test_timing_t timing;
        rd_kafka_event_t *rkev;
        const rd_kafka_DeleteTopics_result_t *res;
        const rd_kafka_topic_result_t **restopics;
        size_t restopic_cnt;
        int metadata_tmout;

        TEST_SAY(_C_MAG "[ %s DeleteTopics with %s, op_timeout %d ]\n",
                 rd_kafka_name(rk), what, op_timeout);

        /**
         * Construct DeleteTopic array
         */
        for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) {
                char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
                int notexist_topic = i >= MY_DEL_TOPICS_CNT - skip_topic_cnt;

                topics[i] = topic;

                del_topics[i] = rd_kafka_DeleteTopic_new(topic);

                if (notexist_topic)
                        exp_topicerr[i] =
                                RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
                else {
                        exp_topicerr[i] =
                                RD_KAFKA_RESP_ERR_NO_ERROR;

                        exp_mdtopics[exp_mdtopic_cnt++].topic = topic;
                }

                exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic;
        }

        if (op_timeout != -1) {
                options = rd_kafka_AdminOptions_new(
                        rk, RD_KAFKA_ADMIN_OP_ANY);

                err = rd_kafka_AdminOptions_set_operation_timeout(
                        options, op_timeout, errstr, sizeof(errstr));
                TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
        }


        /* Create the topics first, minus the skip count. */
        test_CreateTopics_simple(rk, NULL, topics,
                                 MY_DEL_TOPICS_CNT-skip_topic_cnt,
                                 2/*num_partitions*/,
                                 NULL);

        /* Verify that topics are reported by metadata */
        test_wait_metadata_update(rk,
                                  exp_mdtopics, exp_mdtopic_cnt,
                                  NULL, 0,
                                  15*1000);

        TIMING_START(&timing, "DeleteTopics");
        TEST_SAY("Call DeleteTopics\n");
        rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT,
                                    options, q);
        TIMING_ASSERT_LATER(&timing, 0, 50);

        /* Poll result queue for DeleteTopics result.
         * Print but otherwise ignore other event types
         * (typically generic Error events). */
        TIMING_START(&timing, "DeleteTopics.queue_poll");
        while (1) {
                rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000));
                TEST_SAY("DeleteTopics: got %s in %.3fms\n",
                         rd_kafka_event_name(rkev),
                         TIMING_DURATION(&timing) / 1000.0f);
                if (rd_kafka_event_error(rkev))
                        TEST_SAY("%s: %s\n",
                                 rd_kafka_event_name(rkev),
                                 rd_kafka_event_error_string(rkev));

                if (rd_kafka_event_type(rkev) ==
                    RD_KAFKA_EVENT_DELETETOPICS_RESULT)
                        break;

                rd_kafka_event_destroy(rkev);
        }

        /* Convert event to proper result */
        res = rd_kafka_event_DeleteTopics_result(rkev);
        TEST_ASSERT(res, "expected DeleteTopics_result, not %s",
                    rd_kafka_event_name(rkev));

        /* Expecting error */
        err = rd_kafka_event_error(rkev);
        errstr2 = rd_kafka_event_error_string(rkev);
        TEST_ASSERT(err == exp_err,
                    "expected DeleteTopics to return %s, not %s (%s)",
                    rd_kafka_err2str(exp_err),
                    rd_kafka_err2str(err),
                    err ? errstr2 : "n/a");

        TEST_SAY("DeleteTopics: returned %s (%s)\n",
                 rd_kafka_err2str(err), err ? errstr2 : "n/a");

        /* Extract topics */
        restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt);


        /* Scan topics for proper fields and expected failures. */
        for (i = 0 ; i < (int)restopic_cnt ; i++) {
                const rd_kafka_topic_result_t *terr = restopics[i];

                /* Verify that topic order matches our request. */
                if (strcmp(rd_kafka_topic_result_name(terr), topics[i]))
                        TEST_FAIL_LATER("Topic result order mismatch at #%d: "
                                        "expected %s, got %s",
                                        i, topics[i],
                                        rd_kafka_topic_result_name(terr));

                TEST_SAY("DeleteTopics result: #%d: %s: %s: %s\n",
                         i,
                         rd_kafka_topic_result_name(terr),
                         rd_kafka_err2name(rd_kafka_topic_result_error(terr)),
                         rd_kafka_topic_result_error_string(terr));
                if (rd_kafka_topic_result_error(terr) != exp_topicerr[i])
                        TEST_FAIL_LATER(
                                "Expected %s, not %d: %s",
                                rd_kafka_err2name(exp_topicerr[i]),
                                rd_kafka_topic_result_error(terr),
                                rd_kafka_err2name(rd_kafka_topic_result_error(
                                                          terr)));
        }

        /**
         * Verify that the expected topics are deleted and the non-expected
         * are not. Allow it some time to propagate.
         */
        if (op_timeout > 0)
                metadata_tmout = op_timeout + 1000;
        else
                metadata_tmout = 10 * 1000;

        test_wait_metadata_update(rk,
                                  NULL, 0,
                                  exp_not_mdtopics,
                                  exp_not_mdtopic_cnt,
                                  metadata_tmout);

        rd_kafka_event_destroy(rkev);

        for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) {
                rd_kafka_DeleteTopic_destroy(del_topics[i]);
                rd_free(topics[i]);
        }

        if (options)
                rd_kafka_AdminOptions_destroy(options);

        if (!useq)
                rd_kafka_queue_destroy(q);

#undef MY_DEL_TOPICS_CNT
}
Beispiel #28
0
void rd_kafka_metadata_destroy (const struct rd_kafka_metadata *metadata) {
        rd_free((void *)metadata);
}
void rd_kafka_op_destroy (rd_kafka_op_t *rko) {

	switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK)
	{
	case RD_KAFKA_OP_FETCH:
		rd_kafka_msg_destroy(NULL, &rko->rko_u.fetch.rkm);
		/* Decrease refcount on rkbuf to eventually rd_free shared buf*/
		if (rko->rko_u.fetch.rkbuf)
			rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY);

		break;

	case RD_KAFKA_OP_OFFSET_FETCH:
		if (rko->rko_u.offset_fetch.partitions &&
		    rko->rko_u.offset_fetch.do_free)
			rd_kafka_topic_partition_list_destroy(
				rko->rko_u.offset_fetch.partitions);
		break;

	case RD_KAFKA_OP_OFFSET_COMMIT:
		RD_IF_FREE(rko->rko_u.offset_commit.partitions,
			   rd_kafka_topic_partition_list_destroy);
                RD_IF_FREE(rko->rko_u.offset_commit.reason, rd_free);
		break;

	case RD_KAFKA_OP_SUBSCRIBE:
	case RD_KAFKA_OP_GET_SUBSCRIPTION:
		RD_IF_FREE(rko->rko_u.subscribe.topics,
			   rd_kafka_topic_partition_list_destroy);
		break;

	case RD_KAFKA_OP_ASSIGN:
	case RD_KAFKA_OP_GET_ASSIGNMENT:
		RD_IF_FREE(rko->rko_u.assign.partitions,
			   rd_kafka_topic_partition_list_destroy);
		break;

	case RD_KAFKA_OP_REBALANCE:
		RD_IF_FREE(rko->rko_u.rebalance.partitions,
			   rd_kafka_topic_partition_list_destroy);
		break;

	case RD_KAFKA_OP_NAME:
		RD_IF_FREE(rko->rko_u.name.str, rd_free);
		break;

	case RD_KAFKA_OP_ERR:
	case RD_KAFKA_OP_CONSUMER_ERR:
		RD_IF_FREE(rko->rko_u.err.errstr, rd_free);
		rd_kafka_msg_destroy(NULL, &rko->rko_u.err.rkm);
		break;

		break;

	case RD_KAFKA_OP_THROTTLE:
		RD_IF_FREE(rko->rko_u.throttle.nodename, rd_free);
		break;

	case RD_KAFKA_OP_STATS:
		RD_IF_FREE(rko->rko_u.stats.json, rd_free);
		break;

	case RD_KAFKA_OP_XMIT_RETRY:
	case RD_KAFKA_OP_XMIT_BUF:
	case RD_KAFKA_OP_RECV_BUF:
		if (rko->rko_u.xbuf.rkbuf)
			rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY);

		RD_IF_FREE(rko->rko_u.xbuf.rkbuf, rd_kafka_buf_destroy);
		break;

	case RD_KAFKA_OP_DR:
		rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq);
		if (rko->rko_u.dr.do_purge2)
			rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq2);

		if (rko->rko_u.dr.s_rkt)
			rd_kafka_topic_destroy0(rko->rko_u.dr.s_rkt);
		break;

	case RD_KAFKA_OP_OFFSET_RESET:
		RD_IF_FREE(rko->rko_u.offset_reset.reason, rd_free);
		break;

        case RD_KAFKA_OP_METADATA:
                RD_IF_FREE(rko->rko_u.metadata.md, rd_kafka_metadata_destroy);
                break;

        case RD_KAFKA_OP_LOG:
                rd_free(rko->rko_u.log.str);
                break;

	default:
		break;
	}

        if (rko->rko_type & RD_KAFKA_OP_CB && rko->rko_op_cb) {
                rd_kafka_op_res_t res;
                /* Let callback clean up */
                rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY;
                res = rko->rko_op_cb(rko->rko_rk, NULL, rko);
                assert(res != RD_KAFKA_OP_RES_YIELD);
        }

	RD_IF_FREE(rko->rko_rktp, rd_kafka_toppar_destroy);

	rd_kafka_replyq_destroy(&rko->rko_replyq);

#if ENABLE_DEVEL
        if (rd_atomic32_sub(&rd_kafka_op_cnt, 1) < 0)
                rd_kafka_assert(NULL, !*"rd_kafka_op_cnt < 0");
#endif

	rd_free(rko);
}
/**
 * @brief Build client-final-message
 * @returns -1 on error.
 */
static int
rd_kafka_sasl_scram_build_client_final_message (
        rd_kafka_transport_t *rktrans,
        const rd_chariov_t *salt,
        const char *server_nonce,
        const rd_chariov_t *server_first_msg,
        int itcnt, rd_chariov_t *out) {
        struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
        const rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf;
        rd_chariov_t SaslPassword =
                { .ptr = conf->sasl.password,
                  .size = strlen(conf->sasl.password) };
        rd_chariov_t SaltedPassword =
                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
        rd_chariov_t ClientKey =
                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
        rd_chariov_t ServerKey =
                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
        rd_chariov_t StoredKey =
                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
        rd_chariov_t AuthMessage = RD_ZERO_INIT;
        rd_chariov_t ClientSignature =
                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
        rd_chariov_t ServerSignature =
                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
        const rd_chariov_t ClientKeyVerbatim =
                { .ptr = "Client Key", .size = 10 };
        const rd_chariov_t ServerKeyVerbatim =
                { .ptr = "Server Key", .size = 10 };
        rd_chariov_t ClientProof =
                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
        rd_chariov_t client_final_msg_wo_proof;
        char *ClientProofB64;
        int i;

        /* Constructing the ClientProof attribute (p):
         *
         * p = Base64-encoded ClientProof
         * SaltedPassword  := Hi(Normalize(password), salt, i)
         * ClientKey       := HMAC(SaltedPassword, "Client Key")
         * StoredKey       := H(ClientKey)
         * AuthMessage     := client-first-message-bare + "," +
         *                    server-first-message + "," +
         *                    client-final-message-without-proof
         * ClientSignature := HMAC(StoredKey, AuthMessage)
         * ClientProof     := ClientKey XOR ClientSignature
         * ServerKey       := HMAC(SaltedPassword, "Server Key")
         * ServerSignature := HMAC(ServerKey, AuthMessage)
         */

        /* SaltedPassword  := Hi(Normalize(password), salt, i) */
        if (rd_kafka_sasl_scram_Hi(
                    rktrans, &SaslPassword, salt,
                    itcnt, &SaltedPassword) == -1)
                return -1;

        /* ClientKey       := HMAC(SaltedPassword, "Client Key") */
        if (rd_kafka_sasl_scram_HMAC(
                    rktrans, &SaltedPassword, &ClientKeyVerbatim,
                    &ClientKey) == -1)
                return -1;

        /* StoredKey       := H(ClientKey) */
        if (rd_kafka_sasl_scram_H(rktrans, &ClientKey, &StoredKey) == -1)
                return -1;

        /* client-final-message-without-proof */
        rd_kafka_sasl_scram_build_client_final_message_wo_proof(
                state, server_nonce, &client_final_msg_wo_proof);

        /* AuthMessage     := client-first-message-bare + "," +
         *                    server-first-message + "," +
         *                    client-final-message-without-proof */
        AuthMessage.size =
                state->first_msg_bare.size + 1 +
                server_first_msg->size + 1 +
                client_final_msg_wo_proof.size;
        AuthMessage.ptr = rd_alloca(AuthMessage.size+1);
        rd_snprintf(AuthMessage.ptr, AuthMessage.size+1,
                    "%.*s,%.*s,%.*s",
                    (int)state->first_msg_bare.size, state->first_msg_bare.ptr,
                    (int)server_first_msg->size, server_first_msg->ptr,
                    (int)client_final_msg_wo_proof.size,
                    client_final_msg_wo_proof.ptr);

        /*
         * Calculate ServerSignature for later verification when
         * server-final-message is received.
         */

        /* ServerKey       := HMAC(SaltedPassword, "Server Key") */
        if (rd_kafka_sasl_scram_HMAC(
                    rktrans, &SaltedPassword, &ServerKeyVerbatim,
                    &ServerKey) == -1) {
                rd_free(client_final_msg_wo_proof.ptr);
                return -1;
        }

        /* ServerSignature := HMAC(ServerKey, AuthMessage) */
        if (rd_kafka_sasl_scram_HMAC(rktrans, &ServerKey,
                                     &AuthMessage, &ServerSignature) == -1) {
                rd_free(client_final_msg_wo_proof.ptr);
                return -1;
        }

        /* Store the Base64 encoded ServerSignature for quick comparison */
        state->ServerSignatureB64 = rd_base64_encode(&ServerSignature);


        /*
         * Continue with client-final-message
         */

        /* ClientSignature := HMAC(StoredKey, AuthMessage) */
        if (rd_kafka_sasl_scram_HMAC(rktrans, &StoredKey,
                                     &AuthMessage, &ClientSignature) == -1) {
                rd_free(client_final_msg_wo_proof.ptr);
                return -1;
        }

        /* ClientProof     := ClientKey XOR ClientSignature */
        assert(ClientKey.size == ClientSignature.size);
        for (i = 0 ; i < (int)ClientKey.size ; i++)
                ClientProof.ptr[i] = ClientKey.ptr[i] ^ ClientSignature.ptr[i];
        ClientProof.size = ClientKey.size;


        /* Base64 encoded ClientProof */
        ClientProofB64 = rd_base64_encode(&ClientProof);

        /* Construct client-final-message */
        out->size = client_final_msg_wo_proof.size +
                strlen(",p=") + strlen(ClientProofB64);
        out->ptr = rd_malloc(out->size + 1);

        rd_snprintf(out->ptr, out->size+1,
                    "%.*s,p=%s",
                    (int)client_final_msg_wo_proof.size,
                    client_final_msg_wo_proof.ptr,
                    ClientProofB64);
        rd_free(ClientProofB64);
        rd_free(client_final_msg_wo_proof.ptr);

        return 0;
}


/**
 * @brief Handle first message from server
 *
 * Parse server response which looks something like:
 * "r=fyko+d2lbbFgONR....,s=QSXCR+Q6sek8bf92,i=4096"
 *
 * @returns -1 on error.
 */
static int
rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans,
                                                 const rd_chariov_t *in,
                                                 rd_chariov_t *out,
                                                 char *errstr,
                                                 size_t errstr_size) {
        struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
        char *server_nonce;
        rd_chariov_t salt_b64, salt;
        char *itcntstr;
        const char *endptr;
        int itcnt;
        char *attr_m;

        /* Mandatory future extension check */
        if ((attr_m = rd_kafka_sasl_scram_get_attr(
                     in, 'm', NULL, NULL, 0))) {
                rd_snprintf(errstr, errstr_size,
                            "Unsupported mandatory SCRAM extension");
                rd_free(attr_m);
                return -1;
        }

        /* Server nonce */
        if (!(server_nonce = rd_kafka_sasl_scram_get_attr(
                      in, 'r',
                      "Server nonce in server-first-message",
                      errstr, errstr_size)))
                return -1;

        if (strlen(server_nonce) <= state->cnonce.size ||
            strncmp(state->cnonce.ptr, server_nonce, state->cnonce.size)) {
                rd_snprintf(errstr, errstr_size,
                            "Server/client nonce mismatch in "
                            "server-first-message");
                rd_free(server_nonce);
                return -1;
        }

        /* Salt (Base64) */
        if (!(salt_b64.ptr = rd_kafka_sasl_scram_get_attr(
                      in, 's',
                      "Salt in server-first-message",
                      errstr, errstr_size))) {
                rd_free(server_nonce);
                return -1;
        }
        salt_b64.size = strlen(salt_b64.ptr);

        /* Convert Salt to binary */
        if (rd_base64_decode(&salt_b64, &salt) == -1) {
                rd_snprintf(errstr, errstr_size,
                            "Invalid Base64 Salt in server-first-message");
                rd_free(server_nonce);
                rd_free(salt_b64.ptr);
        }
        rd_free(salt_b64.ptr);

        /* Iteration count (as string) */
        if (!(itcntstr = rd_kafka_sasl_scram_get_attr(
                      in, 'i',
                      "Iteration count in server-first-message",
                      errstr, errstr_size))) {
                rd_free(server_nonce);
                rd_free(salt.ptr);
                return -1;
        }

        /* Iteration count (as int) */
        errno = 0;
        itcnt = (int)strtoul(itcntstr, (char **)&endptr, 10);
        if (itcntstr == endptr || *endptr != '\0' || errno != 0 ||
            itcnt > 1000000) {
                rd_snprintf(errstr, errstr_size,
                            "Invalid value (not integer or too large) "
                            "for Iteration count in server-first-message");
                rd_free(server_nonce);
                rd_free(salt.ptr);
                rd_free(itcntstr);
                return -1;
        }
        rd_free(itcntstr);

        /* Build client-final-message */
        if (rd_kafka_sasl_scram_build_client_final_message(
                    rktrans, &salt, server_nonce, in, itcnt, out) == -1) {
                rd_snprintf(errstr, errstr_size,
                            "Failed to build SCRAM client-final-message");
                rd_free(salt.ptr);
                rd_free(server_nonce);
                return -1;
        }

        rd_free(server_nonce);
        rd_free(salt.ptr);

        return 0;
}

/**
 * @brief Handle server-final-message
 * 
 *        This is the end of authentication and the SCRAM state
 *        will be freed at the end of this function regardless of
 *        authentication outcome.
 *
 * @returns -1 on failure
 */
static int
rd_kafka_sasl_scram_handle_server_final_message (
        rd_kafka_transport_t *rktrans,
        const rd_chariov_t *in,
        char *errstr, size_t errstr_size) {
        struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
        char *attr_v, *attr_e;

        if ((attr_e = rd_kafka_sasl_scram_get_attr(
                            in, 'e', "server-error in server-final-message",
                            errstr, errstr_size))) {
                /* Authentication failed */

                rd_snprintf(errstr, errstr_size,
                            "SASL SCRAM authentication failed: "
                            "broker responded with %s",
                            attr_e);
                rd_free(attr_e);
                return -1;

        } else if ((attr_v = rd_kafka_sasl_scram_get_attr(
                     in, 'v', "verifier in server-final-message",
                     errstr, errstr_size))) {
                const rd_kafka_conf_t *conf;

                /* Authentication succesful on server,
                 * but we need to verify the ServerSignature too. */
                rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER,
                           "SCRAMAUTH",
                           "SASL SCRAM authentication succesful on server: "
                           "verifying ServerSignature");

                if (strcmp(attr_v, state->ServerSignatureB64)) {
                        rd_snprintf(errstr, errstr_size,
                                    "SASL SCRAM authentication failed: "
                                    "ServerSignature mismatch "
                                    "(server's %s != ours %s)",
                                    attr_v, state->ServerSignatureB64);
                        rd_free(attr_v);
                        return -1;
                }
                rd_free(attr_v);

                conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf;

                rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER,
                           "SCRAMAUTH",
                           "Authenticated as %s using %s",
                           conf->sasl.username,
                           conf->sasl.mechanisms);

                rd_kafka_sasl_auth_done(rktrans);
                return 0;

        } else {
                rd_snprintf(errstr, errstr_size,
                            "SASL SCRAM authentication failed: "
                            "no verifier or server-error returned from broker");
                return -1;
        }
}



/**
 * @brief Build client-first-message
 */
static void
rd_kafka_sasl_scram_build_client_first_message (
        rd_kafka_transport_t *rktrans,
        rd_chariov_t *out) {
        char *sasl_username;
        struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
        const rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf;

        rd_kafka_sasl_scram_generate_nonce(&state->cnonce);

        sasl_username = rd_kafka_sasl_safe_string(conf->sasl.username);

        out->size = strlen("n,,n=,r=") + strlen(sasl_username) +
                state->cnonce.size;
        out->ptr = rd_malloc(out->size+1);

        rd_snprintf(out->ptr, out->size+1,
                    "n,,n=%s,r=%.*s",
                    sasl_username,
                    (int)state->cnonce.size, state->cnonce.ptr);
        rd_free(sasl_username);

        /* Save client-first-message-bare (skip gs2-header) */
        state->first_msg_bare.size = out->size-3;
        state->first_msg_bare.ptr  = rd_memdup(out->ptr+3,
                                               state->first_msg_bare.size);
}



/**
 * @brief SASL SCRAM client state machine
 * @returns -1 on failure (errstr set), else 0.
 */
static int rd_kafka_sasl_scram_fsm (rd_kafka_transport_t *rktrans,
                                    const rd_chariov_t *in,
                                    char *errstr, size_t errstr_size) {
        static const char *state_names[] = {
                "client-first-message",
                "server-first-message",
                "client-final-message",
        };
        struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
        rd_chariov_t out = RD_ZERO_INIT;
        int r = -1;
        rd_ts_t ts_start = rd_clock();
        int prev_state = state->state;

        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLSCRAM",
                   "SASL SCRAM client in state %s",
                   state_names[state->state]);

        switch (state->state)
        {
        case RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE:
                rd_dassert(!in); /* Not expecting any server-input */

                rd_kafka_sasl_scram_build_client_first_message(rktrans, &out);
                state->state = RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE;
                break;


        case RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE:
                rd_dassert(in); /* Requires server-input */

                if (rd_kafka_sasl_scram_handle_server_first_message(
                             rktrans, in, &out, errstr, errstr_size) == -1)
                        return -1;

                state->state = RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE;
                break;

        case RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE:
                rd_dassert(in);  /* Requires server-input */

                r = rd_kafka_sasl_scram_handle_server_final_message(
                        rktrans, in, errstr, errstr_size);
                break;
        }

        if (out.ptr) {
                r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size,
                                       errstr, errstr_size);
                rd_free(out.ptr);
        }

        ts_start = (rd_clock() - ts_start) / 1000;
        if (ts_start >= 100)
                rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
                           "SASL SCRAM state %s handled in %"PRId64"ms",
                           state_names[prev_state], ts_start);


        return r;
}


/**
 * @brief Handle received frame from broker.
 */
static int rd_kafka_sasl_scram_recv (rd_kafka_transport_t *rktrans,
                                     const void *buf, size_t size,
                                     char *errstr, size_t errstr_size) {
        const rd_chariov_t in = { .ptr = (char *)buf, .size = size };
        return rd_kafka_sasl_scram_fsm(rktrans, &in, errstr, errstr_size);
}


/**
 * @brief Initialize and start SASL SCRAM (builtin) authentication.
 *
 * Returns 0 on successful init and -1 on error.
 *
 * @locality broker thread
 */
static int rd_kafka_sasl_scram_client_new (rd_kafka_transport_t *rktrans,
                                    const char *hostname,
                                    char *errstr, size_t errstr_size) {
        struct rd_kafka_sasl_scram_state *state;

        state = rd_calloc(1, sizeof(*state));
        state->state = RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE;
        rktrans->rktrans_sasl.state = state;

        /* Kick off the FSM */
        return rd_kafka_sasl_scram_fsm(rktrans, NULL, errstr, errstr_size);
}



/**
 * @brief Validate SCRAM config and look up the hash function
 */
static int rd_kafka_sasl_scram_conf_validate (rd_kafka_t *rk,
                                              char *errstr,
                                              size_t errstr_size) {
        const char *mech = rk->rk_conf.sasl.mechanisms;

        if (!rk->rk_conf.sasl.username || !rk->rk_conf.sasl.password) {
                rd_snprintf(errstr, errstr_size,
                            "sasl.username and sasl.password must be set");
                return -1;
        }

        if (!strcmp(mech, "SCRAM-SHA-1")) {
                rk->rk_conf.sasl.scram_evp = EVP_sha1();
                rk->rk_conf.sasl.scram_H = SHA1;
                rk->rk_conf.sasl.scram_H_size = SHA_DIGEST_LENGTH;
        } else if (!strcmp(mech, "SCRAM-SHA-256")) {
                rk->rk_conf.sasl.scram_evp = EVP_sha256();
                rk->rk_conf.sasl.scram_H = SHA256;
                rk->rk_conf.sasl.scram_H_size = SHA256_DIGEST_LENGTH;
        } else if (!strcmp(mech, "SCRAM-SHA-512")) {
                rk->rk_conf.sasl.scram_evp = EVP_sha512();
                rk->rk_conf.sasl.scram_H = SHA512;
                rk->rk_conf.sasl.scram_H_size = SHA512_DIGEST_LENGTH;
        } else {
                rd_snprintf(errstr, errstr_size,
                            "Unsupported hash function: %s "
                            "(try SCRAM-SHA-512)",
                            mech);
                return -1;
        }

        return 0;
}




const struct rd_kafka_sasl_provider rd_kafka_sasl_scram_provider = {
        .name          = "SCRAM (builtin)",
        .client_new    = rd_kafka_sasl_scram_client_new,
        .recv          = rd_kafka_sasl_scram_recv,
        .close         = rd_kafka_sasl_scram_close,
        .conf_validate = rd_kafka_sasl_scram_conf_validate,
};