Esempio n. 1
0
/**
 * Take action when the offset for a toppar becomes unusable.
 *
 * Locality: toppar handler thread
 * Locks: toppar_lock() MUST be held
 */
void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset,
			    rd_kafka_resp_err_t err, const char *reason) {
	int64_t offset = RD_KAFKA_OFFSET_INVALID;
	rd_kafka_op_t *rko;

        /* Enqueue op for toppar handler thread if we're on the wrong thread. */
        if (!thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)) {
                rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_CALLBACK);
                rko->rko_op_cb = rd_kafka_offset_reset_op_cb;
                rko->rko_rktp = rd_kafka_toppar_keep(rktp);
                rko->rko_err = err;
                rko->rko_offset = err_offset;
                rko->rko_flags |= RD_KAFKA_OP_F_FREE;
                rko->rko_payload = rd_strdup(reason);
                rko->rko_len = strlen(reason);
                rd_kafka_q_enq(&rktp->rktp_ops, rko);
                return;
        }

	if (err_offset == RD_KAFKA_OFFSET_INVALID || err)
		offset = rktp->rktp_rkt->rkt_conf.auto_offset_reset;
	else
		offset = err_offset;

	if (offset == RD_KAFKA_OFFSET_INVALID) {
		/* Error, auto.offset.reset tells us to error out. */
		rko = rd_kafka_op_new(RD_KAFKA_OP_CONSUMER_ERR);

		rko->rko_err                 = err;
		rko->rko_rkmessage.offset    = err_offset;
		rko->rko_rkmessage.partition = rktp->rktp_partition;
		rko->rko_payload             = rd_strdup(reason);
		rko->rko_len                 = strlen(rko->rko_payload);
		rko->rko_flags              |= RD_KAFKA_OP_F_FREE;
                rko->rko_rktp                = rd_kafka_toppar_keep(rktp);

		rd_kafka_q_enq(&rktp->rktp_fetchq, rko);
                rd_kafka_toppar_set_fetch_state(
			rktp, RD_KAFKA_TOPPAR_FETCH_NONE);

	} else {
		/* Query logical offset */
		rktp->rktp_query_offset = offset;
                rd_kafka_toppar_set_fetch_state(
			rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
	}

	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
		     "%s [%"PRId32"]: offset reset (at offset %s) "
		     "to %s: %s: %s",
		     rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
		     rd_kafka_offset2str(err_offset),
                     rd_kafka_offset2str(offset),
                     reason, rd_kafka_err2str(err));

	if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
		rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_offset, 0);
}
Esempio n. 2
0
/**
 * Commit a list of offsets asynchronously. Response will be queued on 'replyq'.
 * Optional 'op_cb' will be set on requesting op.
 * 'opaque' will be set as 'rko_opaque'.
 */
rd_kafka_resp_err_t
rd_kafka_commit0 (rd_kafka_t *rk,
                  const rd_kafka_topic_partition_list_t *offsets,
                  rd_kafka_q_t *replyq, void (*op_cb) (rd_kafka_t *,
                                                       rd_kafka_op_t *)) {
        rd_kafka_cgrp_t *rkcg;
        rd_kafka_op_t *rko;

        if (!(rkcg = rd_kafka_cgrp_get(rk)))
                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;

        rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT);
        rko->rko_op_cb = op_cb;
        rko->rko_replyq = replyq;
        if (replyq)
                rd_kafka_q_keep(rko->rko_replyq);

        if (offsets)
                rd_kafka_op_payload_set(
                        rko, rd_kafka_topic_partition_list_copy(offsets),
                        (void *)rd_kafka_topic_partition_list_destroy);

        rd_kafka_q_enq(&rkcg->rkcg_ops, rko);

        return RD_KAFKA_RESP_ERR_NO_ERROR;
}
Esempio n. 3
0
/**
 * Commit a list of offsets asynchronously. Response will be queued on 'replyq'.
 * Optional \p cb will be set on requesting op.
 *
 * Makes a copy of \p offsets (may be NULL for current assignment)
 */
static rd_kafka_resp_err_t
rd_kafka_commit0 (rd_kafka_t *rk,
                  const rd_kafka_topic_partition_list_t *offsets,
		  rd_kafka_toppar_t *rktp,
                  rd_kafka_replyq_t replyq,
		  void (*cb) (rd_kafka_t *rk,
			      rd_kafka_resp_err_t err,
			      rd_kafka_topic_partition_list_t *offsets,
			      void *opaque),
		  void *opaque) {
        rd_kafka_cgrp_t *rkcg;
        rd_kafka_op_t *rko;

        if (!(rkcg = rd_kafka_cgrp_get(rk)))
                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;

        rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT);
	rko->rko_replyq = replyq;
	rko->rko_u.offset_commit.cb = cb;
	rko->rko_u.offset_commit.opaque = opaque;
	if (rktp)
		rko->rko_rktp = rd_kafka_toppar_keep(rktp);

        if (offsets)
		rko->rko_u.offset_commit.partitions =
                        rd_kafka_topic_partition_list_copy(offsets);

        rd_kafka_q_enq(rkcg->rkcg_ops, rko);

        return RD_KAFKA_RESP_ERR_NO_ERROR;
}
Esempio n. 4
0
/**
 * Enqueue ERR__THROTTLE op, if desired.
 */
void rd_kafka_op_throttle_time (rd_kafka_broker_t *rkb,
				rd_kafka_q_t *rkq,
				int throttle_time) {
	rd_kafka_op_t *rko;

	rd_avg_add(&rkb->rkb_avg_throttle, throttle_time);

	if (!rkb->rkb_rk->rk_conf.quota_support)
		return;

	/* We send throttle events when:
	 *  - throttle_time > 0
	 *  - throttle_time == 0 and last throttle_time > 0
	 */
	if (!throttle_time && !rd_atomic32_get(&rkb->rkb_rk->rk_last_throttle))
		return;

	rd_atomic32_set(&rkb->rkb_rk->rk_last_throttle, throttle_time);

	rko = rd_kafka_op_new(RD_KAFKA_OP_THROTTLE);
	rko->rko_nodename      = rd_strdup(rkb->rkb_nodename);
	rko->rko_flags        |= RD_KAFKA_OP_F_FREE; /* free nodename */
	rko->rko_nodeid        = rkb->rkb_nodeid;
	rko->rko_throttle_time = throttle_time;
	rd_kafka_q_enq(rkq, rko);
}
Esempio n. 5
0
/**
 * Take action when the offset for a toppar becomes unusable.
 * NOTE: toppar_lock(rktp) must be held
 */
void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset,
			    rd_kafka_resp_err_t err, const char *reason) {
	int64_t offset = RD_KAFKA_OFFSET_ERROR;
	rd_kafka_op_t *rko;
	int64_t offset_reset = rktp->rktp_rkt->rkt_conf.auto_offset_reset;

	if (offset_reset == RD_KAFKA_OFFSET_END ||
	    offset_reset == RD_KAFKA_OFFSET_BEGINNING ||
            offset_reset <= RD_KAFKA_OFFSET_TAIL_BASE) {
		offset = rktp->rktp_rkt->rkt_conf.auto_offset_reset;
		rktp->rktp_query_offset = offset;
		rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY;

	} else if (offset_reset == RD_KAFKA_OFFSET_ERROR) {
		rko = rd_kafka_op_new(RD_KAFKA_OP_ERR);

		rko->rko_err                 = err;
		rko->rko_rkmessage.offset    = err_offset;
		rko->rko_rkmessage.rkt       = rktp->rktp_rkt;
		rko->rko_rkmessage.partition = rktp->rktp_partition;
		rko->rko_payload             = strdup(reason);
		rko->rko_len                 = strlen(rko->rko_payload);
		rko->rko_flags              |= RD_KAFKA_OP_F_FREE;
                rd_kafka_topic_keep(rko->rko_rkmessage.rkt);

		rd_kafka_q_enq(&rktp->rktp_fetchq, rko);
		rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_NONE;
	}

	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
		     "%s [%"PRId32"]: offset reset (at offset %"PRId64") "
		     "to %"PRId64": %s: %s",
		     rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
		     err_offset, offset, reason, rd_kafka_err2str(err));
}
/**
 * @brief Create new callback op for type \p type
 */
rd_kafka_op_t *rd_kafka_op_new_cb (rd_kafka_t *rk,
                                   rd_kafka_op_type_t type,
                                   rd_kafka_op_cb_t *cb) {
        rd_kafka_op_t *rko;
        rko = rd_kafka_op_new(type | RD_KAFKA_OP_CB);
        rko->rko_op_cb = cb;
        rko->rko_rk = rk;
        return rko;
}
Esempio n. 7
0
/**
 * Creates a reply opp based on 'rko_orig'.
 * If 'rko_orig' has rko_op_cb set the reply op will be of type
 * RD_KAFKA_OP_CALLBACK, else the reply type will be the original rko_type OR:ed
 * with RD_KAFKA_OP_REPLY.
 */
rd_kafka_op_t *rd_kafka_op_new_reply (rd_kafka_op_t *rko_orig) {
        rd_kafka_op_t *rko;

        rko = rd_kafka_op_new(rko_orig->rko_op_cb ?
                              RD_KAFKA_OP_CALLBACK :
                              (rko_orig->rko_type | RD_KAFKA_OP_REPLY));
        rko->rko_op_cb = rko_orig->rko_op_cb;
        rko->rko_version = rko_orig->rko_version;

        return rko;
}
/**
 * Creates a reply opp based on 'rko_orig'.
 * If 'rko_orig' has rko_op_cb set the reply op will be OR:ed with
 * RD_KAFKA_OP_CB, else the reply type will be the original rko_type OR:ed
 * with RD_KAFKA_OP_REPLY.
 */
rd_kafka_op_t *rd_kafka_op_new_reply (rd_kafka_op_t *rko_orig,
				      rd_kafka_resp_err_t err) {
        rd_kafka_op_t *rko;

        rko = rd_kafka_op_new(rko_orig->rko_type |
			      (rko_orig->rko_op_cb ?
			       RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY));
	rd_kafka_op_get_reply_version(rko, rko_orig);
	rko->rko_op_cb   = rko_orig->rko_op_cb;
	rko->rko_err     = err;
	if (rko_orig->rko_rktp)
		rko->rko_rktp = rd_kafka_toppar_keep(
			rd_kafka_toppar_s2i(rko_orig->rko_rktp));

        return rko;
}
Esempio n. 9
0
/**
 * Trigger offset_commit_cb op, if configured.
 *
 */
void rd_kafka_offset_commit_cb_op (rd_kafka_t *rk,
				   rd_kafka_resp_err_t err,
				   const rd_kafka_topic_partition_list_t *offsets) {
	rd_kafka_op_t *rko;

        if (!rk->rk_conf.offset_commit_cb)
		return;

	rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT|RD_KAFKA_OP_REPLY);
	rko->rko_err = err;
	rd_kafka_assert(NULL, offsets->cnt > 0);
        rd_kafka_op_payload_set(rko,
				rd_kafka_topic_partition_list_copy(offsets),
                                (void *)rd_kafka_topic_partition_list_destroy);
	rd_kafka_q_enq(&rk->rk_rep, rko);
}
Esempio n. 10
0
/**
 * Enqueue offset_commit_cb op, if configured.
 *
 */
void rd_kafka_offset_commit_cb_op (rd_kafka_t *rk,
				   rd_kafka_resp_err_t err,
				   const rd_kafka_topic_partition_list_t *offsets) {
	rd_kafka_op_t *rko;

        if (!(rk->rk_conf.enabled_events & RD_KAFKA_EVENT_OFFSET_COMMIT))
		return;

	rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT|RD_KAFKA_OP_REPLY);
	rko->rko_err = err;
	rko->rko_u.offset_commit.cb = rk->rk_conf.offset_commit_cb;/*maybe NULL*/
	rko->rko_u.offset_commit.opaque = rk->rk_conf.opaque;
	if (offsets)
		rko->rko_u.offset_commit.partitions =
			rd_kafka_topic_partition_list_copy(offsets);
	rd_kafka_q_enq(rk->rk_rep, rko);
}
rd_kafka_resp_err_t
rd_kafka_assign (rd_kafka_t *rk,
                 const rd_kafka_topic_partition_list_t *partitions) {
        rd_kafka_op_t *rko;
        rd_kafka_cgrp_t *rkcg;

        if (!(rkcg = rd_kafka_cgrp_get(rk)))
                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;

        rko = rd_kafka_op_new(RD_KAFKA_OP_ASSIGN);
	if (partitions)
		rko->rko_u.assign.partitions =
                        rd_kafka_topic_partition_list_copy(partitions);

        return rd_kafka_op_err_destroy(
                rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE));
}
Esempio n. 12
0
/**
 * Reply to 'rko_orig' using err,payload,len if a replyq is set up,
 * else do nothing.
 *
 * Returns 0 if 'rko_orig' did not have a replyq and nothing was enqueued,
 * else 1.
 */
int rd_kafka_op_reply (rd_kafka_op_t *rko_orig,
                       rd_kafka_resp_err_t err,
                       void *payload, size_t len, void (*free_cb) (void *)) {
        rd_kafka_op_t *rko;

        if (!rko_orig->rko_replyq)
                return 0;

        rko = rd_kafka_op_new(rko_orig->rko_type);
        rko->rko_err     = err;
        rko->rko_payload = payload;
        rko->rko_len     = len;
        rko->rko_free_cb = free_cb;
	if (free_cb)
		rko->rko_flags |= RD_KAFKA_OP_F_FREE;
        rko->rko_version = rko_orig->rko_version;

        return rd_kafka_q_enq(rko_orig->rko_replyq, rko);
}
/**
 * Propagate an error event to the application on a specific queue.
 * \p optype should be RD_KAFKA_OP_ERR for generic errors and
 * RD_KAFKA_OP_CONSUMER_ERR for consumer errors.
 */
void rd_kafka_q_op_err (rd_kafka_q_t *rkq, rd_kafka_op_type_t optype,
                        rd_kafka_resp_err_t err, int32_t version,
			rd_kafka_toppar_t *rktp, int64_t offset,
                        const char *fmt, ...) {
	va_list ap;
	char buf[2048];
	rd_kafka_op_t *rko;

	va_start(ap, fmt);
	rd_vsnprintf(buf, sizeof(buf), fmt, ap);
	va_end(ap);

	rko = rd_kafka_op_new(optype);
	rko->rko_version = version;
	rko->rko_err = err;
	rko->rko_u.err.offset = offset;
	rko->rko_u.err.errstr = rd_strdup(buf);
	if (rktp)
		rko->rko_rktp = rd_kafka_toppar_keep(rktp);

	rd_kafka_q_enq(rkq, rko);
}
rd_kafka_resp_err_t
rd_kafka_subscribe (rd_kafka_t *rk,
                    const rd_kafka_topic_partition_list_t *topics) {

        rd_kafka_op_t *rko;
        rd_kafka_cgrp_t *rkcg;

        if (!(rkcg = rd_kafka_cgrp_get(rk)))
                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;

        /* Validate topics */
        if (topics->cnt == 0 ||
            rd_kafka_topic_partition_list_sum(topics,
                                              _invalid_topic_cb, NULL) > 0)
                return RD_KAFKA_RESP_ERR__INVALID_ARG;

        rko = rd_kafka_op_new(RD_KAFKA_OP_SUBSCRIBE);
	rko->rko_u.subscribe.topics = rd_kafka_topic_partition_list_copy(topics);

        return rd_kafka_op_err_destroy(
                rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE));
}
Esempio n. 15
0
/**
 * Enqueue op for app. Convenience function
 */
void rd_kafka_op_app (rd_kafka_q_t *rkq, rd_kafka_op_type_t type,
                      int op_flags, rd_kafka_toppar_t *rktp,
                      rd_kafka_resp_err_t err,
                      void *payload, size_t len,
                      void (*free_cb) (void *)) {
        rd_kafka_op_t *rko;

        rko = rd_kafka_op_new(type);
        if (rktp) {
                rko->rko_rktp = rd_kafka_toppar_keep(rktp);
                rko->rko_version = rktp->rktp_fetch_version;
                rko->rko_rkmessage.partition = rktp->rktp_partition;
        }

        rko->rko_err     = err;
        rko->rko_payload = payload;
        rko->rko_len     = len;
        rko->rko_flags  |= op_flags;
        rko->rko_free_cb = free_cb;

        rd_kafka_q_enq(rkq, rko);
}
/**
 * @brief Creates a new RD_KAFKA_OP_FETCH op and sets up the
 *        embedded message according to the parameters.
 *
 * @param rkmp will be set to the embedded rkm in the rko (for convenience)
 * @param offset may be updated later if relative offset.
 */
rd_kafka_op_t *
rd_kafka_op_new_fetch_msg (rd_kafka_msg_t **rkmp,
                           rd_kafka_toppar_t *rktp,
                           int32_t version,
                           rd_kafka_buf_t *rkbuf,
                           int64_t offset,
                           size_t key_len, const void *key,
                           size_t val_len, const void *val) {
        rd_kafka_msg_t *rkm;
        rd_kafka_op_t *rko;

        rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH);
        rko->rko_rktp    = rd_kafka_toppar_keep(rktp);
        rko->rko_version = version;
        rkm   = &rko->rko_u.fetch.rkm;
        *rkmp = rkm;

        /* Since all the ops share the same payload buffer
         * a refcnt is used on the rkbuf that makes sure all
         * consume_cb() will have been
         * called for each of these ops before the rkbuf
         * and its memory backing buffers are freed. */
        rko->rko_u.fetch.rkbuf = rkbuf;
        rd_kafka_buf_keep(rkbuf);

        rkm->rkm_offset    = offset;

        rkm->rkm_key       = (void *)key;
        rkm->rkm_key_len   = key_len;

        rkm->rkm_payload   = (void *)val;
        rkm->rkm_len       = val_len;
        rko->rko_len       = (int32_t)rkm->rkm_len;

        rkm->rkm_partition = rktp->rktp_partition;

        return rko;
}
/**
 * Enqueue ERR__THROTTLE op, if desired.
 */
void rd_kafka_op_throttle_time (rd_kafka_broker_t *rkb,
				rd_kafka_q_t *rkq,
				int throttle_time) {
	rd_kafka_op_t *rko;

	rd_avg_add(&rkb->rkb_avg_throttle, throttle_time);

	/* We send throttle events when:
	 *  - throttle_time > 0
	 *  - throttle_time == 0 and last throttle_time > 0
	 */
	if (!rkb->rkb_rk->rk_conf.throttle_cb ||
	    (!throttle_time && !rd_atomic32_get(&rkb->rkb_rk->rk_last_throttle)))
		return;

	rd_atomic32_set(&rkb->rkb_rk->rk_last_throttle, throttle_time);

	rko = rd_kafka_op_new(RD_KAFKA_OP_THROTTLE);
        rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH);
	rko->rko_u.throttle.nodename = rd_strdup(rkb->rkb_nodename);
	rko->rko_u.throttle.nodeid   = rkb->rkb_nodeid;
	rko->rko_u.throttle.throttle_time = throttle_time;
	rd_kafka_q_enq(rkq, rko);
}
Esempio n. 18
0
/**
 * Send an op back to the application.
 *
 * Locality: Kafka thread
 */
void rd_kafka_op_app_reply (rd_kafka_q_t *rkq,
                            rd_kafka_op_type_t type,
                            rd_kafka_resp_err_t err,
                            int32_t version,
                            void *payload, size_t len) {
	rd_kafka_op_t *rko;

        rko = rd_kafka_op_new(type);

	if (err && !payload) {
		/* Provide human readable error string if not provided. */

                payload = rd_strdup(rd_kafka_err2str(err));
		len = strlen(payload);
	}

        rko->rko_flags      |= RD_KAFKA_OP_F_FREE;
        rko->rko_version     = version;
	rko->rko_payload     = payload;
	rko->rko_len         = len;
	rko->rko_err         = err;

	rd_kafka_q_enq(rkq, rko);
}
Esempio n. 19
0
rd_kafka_resp_err_t
rd_kafka_metadata (rd_kafka_t *rk, int all_topics,
                   rd_kafka_topic_t *only_rkt,
                   const struct rd_kafka_metadata **metadatap,
                   int timeout_ms) {
        rd_kafka_q_t *rkq;
        rd_kafka_broker_t *rkb;
        rd_kafka_op_t *rko;
	rd_ts_t ts_end = rd_timeout_init(timeout_ms);
        rd_list_t topics;

        /* Query any broker that is up, and if none are up pick the first one,
         * if we're lucky it will be up before the timeout */
        rkb = rd_kafka_broker_any_usable(rk, timeout_ms, 1,
                                         "application metadata request");
	if (!rkb)
		return RD_KAFKA_RESP_ERR__TRANSPORT;

        rkq = rd_kafka_q_new(rk);

        rd_list_init(&topics, 0, rd_free);
        if (!all_topics) {
                if (only_rkt)
                        rd_list_add(&topics,
                                    rd_strdup(rd_kafka_topic_a2i(only_rkt)->
                                              rkt_topic->str));
                else
                        rd_kafka_local_topics_to_list(rkb->rkb_rk, &topics);
        }

        /* Async: request metadata */
        rko = rd_kafka_op_new(RD_KAFKA_OP_METADATA);
        rd_kafka_op_set_replyq(rko, rkq, 0);
        rko->rko_u.metadata.force = 1; /* Force metadata request regardless
                                        * of outstanding metadata requests. */
        rd_kafka_MetadataRequest(rkb, &topics, "application requested", rko);

        rd_list_destroy(&topics);
        rd_kafka_broker_destroy(rkb);

        /* Wait for reply (or timeout) */
        rko = rd_kafka_q_pop(rkq, rd_timeout_remains(ts_end), 0);

        rd_kafka_q_destroy_owner(rkq);

        /* Timeout */
        if (!rko)
                return RD_KAFKA_RESP_ERR__TIMED_OUT;

        /* Error */
        if (rko->rko_err) {
                rd_kafka_resp_err_t err = rko->rko_err;
                rd_kafka_op_destroy(rko);
                return err;
        }

        /* Reply: pass metadata pointer to application who now owns it*/
        rd_kafka_assert(rk, rko->rko_u.metadata.md);
        *metadatap = rko->rko_u.metadata.md;
        rko->rko_u.metadata.md = NULL;
        rd_kafka_op_destroy(rko);

        return RD_KAFKA_RESP_ERR_NO_ERROR;
}
/**
 * Send simple type-only request to queue, wait for response.
 */
rd_kafka_op_t *rd_kafka_op_req2 (rd_kafka_q_t *destq, rd_kafka_op_type_t type) {
        rd_kafka_op_t *rko;

        rko = rd_kafka_op_new(type);
        return rd_kafka_op_req(destq, rko, RD_POLL_INFINITE);
}