Пример #1
0
/**
 * Take action when the offset for a toppar becomes unusable.
 *
 * Locality: toppar handler thread
 * Locks: toppar_lock() MUST be held
 */
void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset,
			    rd_kafka_resp_err_t err, const char *reason) {
	int64_t offset = RD_KAFKA_OFFSET_INVALID;
	rd_kafka_op_t *rko;

        /* Enqueue op for toppar handler thread if we're on the wrong thread. */
        if (!thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)) {
                rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_CALLBACK);
                rko->rko_op_cb = rd_kafka_offset_reset_op_cb;
                rko->rko_rktp = rd_kafka_toppar_keep(rktp);
                rko->rko_err = err;
                rko->rko_offset = err_offset;
                rko->rko_flags |= RD_KAFKA_OP_F_FREE;
                rko->rko_payload = rd_strdup(reason);
                rko->rko_len = strlen(reason);
                rd_kafka_q_enq(&rktp->rktp_ops, rko);
                return;
        }

	if (err_offset == RD_KAFKA_OFFSET_INVALID || err)
		offset = rktp->rktp_rkt->rkt_conf.auto_offset_reset;
	else
		offset = err_offset;

	if (offset == RD_KAFKA_OFFSET_INVALID) {
		/* Error, auto.offset.reset tells us to error out. */
		rko = rd_kafka_op_new(RD_KAFKA_OP_CONSUMER_ERR);

		rko->rko_err                 = err;
		rko->rko_rkmessage.offset    = err_offset;
		rko->rko_rkmessage.partition = rktp->rktp_partition;
		rko->rko_payload             = rd_strdup(reason);
		rko->rko_len                 = strlen(rko->rko_payload);
		rko->rko_flags              |= RD_KAFKA_OP_F_FREE;
                rko->rko_rktp                = rd_kafka_toppar_keep(rktp);

		rd_kafka_q_enq(&rktp->rktp_fetchq, rko);
                rd_kafka_toppar_set_fetch_state(
			rktp, RD_KAFKA_TOPPAR_FETCH_NONE);

	} else {
		/* Query logical offset */
		rktp->rktp_query_offset = offset;
                rd_kafka_toppar_set_fetch_state(
			rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
	}

	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
		     "%s [%"PRId32"]: offset reset (at offset %s) "
		     "to %s: %s: %s",
		     rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
		     rd_kafka_offset2str(err_offset),
                     rd_kafka_offset2str(offset),
                     reason, rd_kafka_err2str(err));

	if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
		rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_offset, 0);
}
Пример #2
0
/**
 * Commit a list of offsets asynchronously. Response will be queued on 'replyq'.
 * Optional \p cb will be set on requesting op.
 *
 * Makes a copy of \p offsets (may be NULL for current assignment)
 */
static rd_kafka_resp_err_t
rd_kafka_commit0 (rd_kafka_t *rk,
                  const rd_kafka_topic_partition_list_t *offsets,
		  rd_kafka_toppar_t *rktp,
                  rd_kafka_replyq_t replyq,
		  void (*cb) (rd_kafka_t *rk,
			      rd_kafka_resp_err_t err,
			      rd_kafka_topic_partition_list_t *offsets,
			      void *opaque),
		  void *opaque) {
        rd_kafka_cgrp_t *rkcg;
        rd_kafka_op_t *rko;

        if (!(rkcg = rd_kafka_cgrp_get(rk)))
                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;

        rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT);
	rko->rko_replyq = replyq;
	rko->rko_u.offset_commit.cb = cb;
	rko->rko_u.offset_commit.opaque = opaque;
	if (rktp)
		rko->rko_rktp = rd_kafka_toppar_keep(rktp);

        if (offsets)
		rko->rko_u.offset_commit.partitions =
                        rd_kafka_topic_partition_list_copy(offsets);

        rd_kafka_q_enq(rkcg->rkcg_ops, rko);

        return RD_KAFKA_RESP_ERR_NO_ERROR;
}
/**
 * Creates a reply opp based on 'rko_orig'.
 * If 'rko_orig' has rko_op_cb set the reply op will be OR:ed with
 * RD_KAFKA_OP_CB, else the reply type will be the original rko_type OR:ed
 * with RD_KAFKA_OP_REPLY.
 */
rd_kafka_op_t *rd_kafka_op_new_reply (rd_kafka_op_t *rko_orig,
				      rd_kafka_resp_err_t err) {
        rd_kafka_op_t *rko;

        rko = rd_kafka_op_new(rko_orig->rko_type |
			      (rko_orig->rko_op_cb ?
			       RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY));
	rd_kafka_op_get_reply_version(rko, rko_orig);
	rko->rko_op_cb   = rko_orig->rko_op_cb;
	rko->rko_err     = err;
	if (rko_orig->rko_rktp)
		rko->rko_rktp = rd_kafka_toppar_keep(
			rd_kafka_toppar_s2i(rko_orig->rko_rktp));

        return rko;
}
/**
 * Propagate an error event to the application on a specific queue.
 * \p optype should be RD_KAFKA_OP_ERR for generic errors and
 * RD_KAFKA_OP_CONSUMER_ERR for consumer errors.
 */
void rd_kafka_q_op_err (rd_kafka_q_t *rkq, rd_kafka_op_type_t optype,
                        rd_kafka_resp_err_t err, int32_t version,
			rd_kafka_toppar_t *rktp, int64_t offset,
                        const char *fmt, ...) {
	va_list ap;
	char buf[2048];
	rd_kafka_op_t *rko;

	va_start(ap, fmt);
	rd_vsnprintf(buf, sizeof(buf), fmt, ap);
	va_end(ap);

	rko = rd_kafka_op_new(optype);
	rko->rko_version = version;
	rko->rko_err = err;
	rko->rko_u.err.offset = offset;
	rko->rko_u.err.errstr = rd_strdup(buf);
	if (rktp)
		rko->rko_rktp = rd_kafka_toppar_keep(rktp);

	rd_kafka_q_enq(rkq, rko);
}
Пример #5
0
/**
 * Enqueue op for app. Convenience function
 */
void rd_kafka_op_app (rd_kafka_q_t *rkq, rd_kafka_op_type_t type,
                      int op_flags, rd_kafka_toppar_t *rktp,
                      rd_kafka_resp_err_t err,
                      void *payload, size_t len,
                      void (*free_cb) (void *)) {
        rd_kafka_op_t *rko;

        rko = rd_kafka_op_new(type);
        if (rktp) {
                rko->rko_rktp = rd_kafka_toppar_keep(rktp);
                rko->rko_version = rktp->rktp_fetch_version;
                rko->rko_rkmessage.partition = rktp->rktp_partition;
        }

        rko->rko_err     = err;
        rko->rko_payload = payload;
        rko->rko_len     = len;
        rko->rko_flags  |= op_flags;
        rko->rko_free_cb = free_cb;

        rd_kafka_q_enq(rkq, rko);
}
/**
 * @brief Creates a new RD_KAFKA_OP_FETCH op and sets up the
 *        embedded message according to the parameters.
 *
 * @param rkmp will be set to the embedded rkm in the rko (for convenience)
 * @param offset may be updated later if relative offset.
 */
rd_kafka_op_t *
rd_kafka_op_new_fetch_msg (rd_kafka_msg_t **rkmp,
                           rd_kafka_toppar_t *rktp,
                           int32_t version,
                           rd_kafka_buf_t *rkbuf,
                           int64_t offset,
                           size_t key_len, const void *key,
                           size_t val_len, const void *val) {
        rd_kafka_msg_t *rkm;
        rd_kafka_op_t *rko;

        rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH);
        rko->rko_rktp    = rd_kafka_toppar_keep(rktp);
        rko->rko_version = version;
        rkm   = &rko->rko_u.fetch.rkm;
        *rkmp = rkm;

        /* Since all the ops share the same payload buffer
         * a refcnt is used on the rkbuf that makes sure all
         * consume_cb() will have been
         * called for each of these ops before the rkbuf
         * and its memory backing buffers are freed. */
        rko->rko_u.fetch.rkbuf = rkbuf;
        rd_kafka_buf_keep(rkbuf);

        rkm->rkm_offset    = offset;

        rkm->rkm_key       = (void *)key;
        rkm->rkm_key_len   = key_len;

        rkm->rkm_payload   = (void *)val;
        rkm->rkm_len       = val_len;
        rko->rko_len       = (int32_t)rkm->rkm_len;

        rkm->rkm_partition = rktp->rktp_partition;

        return rko;
}