示例#1
0
static rd_kafka_message_t *
rd_kafka_message_setup (rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) {
	rd_kafka_itopic_t *rkt;
	rd_kafka_toppar_t *rktp = NULL;

	if (rko->rko_type == RD_KAFKA_OP_DR) {
		rkt = rd_kafka_topic_s2i(rko->rko_u.dr.s_rkt);
	} else {
		if (rko->rko_rktp) {
			rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
			rkt = rktp->rktp_rkt;
		} else
			rkt = NULL;

		rkmessage->_private = rko;
	}


	if (!rkmessage->rkt && rkt)
		rkmessage->rkt = rd_kafka_topic_keep_a(rkt);

	if (rktp)
		rkmessage->partition = rktp->rktp_partition;

	if (!rkmessage->err)
		rkmessage->err = rko->rko_err;

	return rkmessage;
}
示例#2
0
static void rd_kafka_offset_reset_op_cb (rd_kafka_t *rk, rd_kafka_op_t *rko) {
	rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
	rd_kafka_toppar_lock(rktp);
        rd_kafka_offset_reset(rktp,
                              rko->rko_offset,
                              rko->rko_err, rko->rko_payload);
	rd_kafka_toppar_unlock(rktp);
}
/**
 * Creates a reply opp based on 'rko_orig'.
 * If 'rko_orig' has rko_op_cb set the reply op will be OR:ed with
 * RD_KAFKA_OP_CB, else the reply type will be the original rko_type OR:ed
 * with RD_KAFKA_OP_REPLY.
 */
rd_kafka_op_t *rd_kafka_op_new_reply (rd_kafka_op_t *rko_orig,
				      rd_kafka_resp_err_t err) {
        rd_kafka_op_t *rko;

        rko = rd_kafka_op_new(rko_orig->rko_type |
			      (rko_orig->rko_op_cb ?
			       RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY));
	rd_kafka_op_get_reply_version(rko, rko_orig);
	rko->rko_op_cb   = rko_orig->rko_op_cb;
	rko->rko_err     = err;
	if (rko_orig->rko_rktp)
		rko->rko_rktp = rd_kafka_toppar_keep(
			rd_kafka_toppar_s2i(rko_orig->rko_rktp));

        return rko;
}
示例#4
0
/**
 * Filters out outdated ops.
 */
static RD_INLINE rd_kafka_op_t *rd_kafka_op_filter (rd_kafka_q_t *rkq,
                                                   rd_kafka_op_t *rko) {
        if (unlikely(!rko))
                return NULL;

        if (unlikely(rko->rko_version && rko->rko_rktp &&
                     rko->rko_version <
                     rd_atomic32_get(&rd_kafka_toppar_s2i(rko->rko_rktp)->
                                     rktp_version))) {
		rd_kafka_q_deq0(rkq, rko);
                rd_kafka_op_destroy(rko);
                return NULL;
        }

        return rko;
}
示例#5
0
rd_kafka_message_t *rd_kafka_message_get (rd_kafka_op_t *rko) {
	rd_kafka_message_t *rkmessage;

	if (rko) {
		rkmessage = &rko->rko_rkmessage;
		rkmessage->_private = rko;

		if (!rkmessage->rkt && rko->rko_rktp)
			rkmessage->rkt =
				rd_kafka_topic_keep_a(
					rd_kafka_toppar_s2i(rko->rko_rktp)->
					rktp_rkt);
	} else
                rkmessage = rd_kafka_message_new();

	return rkmessage;
}
/**
 * @brief Store offset for fetched message.
 */
void rd_kafka_op_offset_store (rd_kafka_t *rk, rd_kafka_op_t *rko,
			       const rd_kafka_message_t *rkmessage) {
	rd_kafka_toppar_t *rktp;

	if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH || rko->rko_err))
		return;

	rktp = rd_kafka_toppar_s2i(rko->rko_rktp);

	if (unlikely(!rk))
		rk = rktp->rktp_rkt->rkt_rk;

	rd_kafka_toppar_lock(rktp);
	rktp->rktp_app_offset = rkmessage->offset+1;
	if (rk->rk_conf.enable_auto_offset_store)
		rd_kafka_offset_store0(rktp, rkmessage->offset+1, 0/*no lock*/);
	rd_kafka_toppar_unlock(rktp);
}
示例#7
0
/**
 * Called when a broker commit is done.
 *
 * Locality: toppar handler thread
 * Locks: none
 */
static void
rd_kafka_offset_broker_commit_cb (rd_kafka_t *rk,
				  rd_kafka_resp_err_t err,
				  rd_kafka_topic_partition_list_t *offsets,
				  void *opaque) {
        shptr_rd_kafka_toppar_t *s_rktp;
        rd_kafka_toppar_t *rktp;

        if (!(s_rktp = rd_kafka_topic_partition_list_get_toppar(rk,
                                                                offsets, 0))) {
		rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT",
			     "No local partition found for %s [%"PRId32"] "
			     "while parsing OffsetCommit response "
			     "(offset %"PRId64", error \"%s\")",
			     offsets->elems[0].topic,
			     offsets->elems[0].partition,
			     offsets->elems[0].offset,
			     rd_kafka_err2str(offsets->elems[0].err));
                return;
        }

        rktp = rd_kafka_toppar_s2i(s_rktp);

        if (!err)
                err = offsets->elems[0].err;

	rd_kafka_toppar_offset_commit_result(rktp, err, offsets);

        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
                     "%s [%"PRId32"]: offset %"PRId64" committed: %s",
                     rktp->rktp_rkt->rkt_topic->str,
                     rktp->rktp_partition, offsets->elems[0].offset,
                     rd_kafka_err2str(err));

        rktp->rktp_committing_offset = 0;

        rd_kafka_toppar_lock(rktp);
        if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING)
                rd_kafka_offset_store_term(rktp, err);
        rd_kafka_toppar_unlock(rktp);

        rd_kafka_toppar_destroy(s_rktp);
}
示例#8
0
/**
 * Store offset.
 * Typically called from application code.
 *
 * NOTE: No locks must be held.
 */
rd_kafka_resp_err_t rd_kafka_offset_store (rd_kafka_topic_t *app_rkt,
					   int32_t partition, int64_t offset) {
        rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt);
	shptr_rd_kafka_toppar_t *s_rktp;

	/* Find toppar */
	rd_kafka_topic_rdlock(rkt);
	if (!(s_rktp = rd_kafka_toppar_get(rkt, partition, 0/*!ua_on_miss*/))) {
		rd_kafka_topic_rdunlock(rkt);
		return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
	}
	rd_kafka_topic_rdunlock(rkt);

	rd_kafka_offset_store0(rd_kafka_toppar_s2i(s_rktp), offset+1,
                               1/*lock*/);

	rd_kafka_toppar_destroy(s_rktp);

	return RD_KAFKA_RESP_ERR_NO_ERROR;
}
示例#9
0
/**
 * Purge all entries from a queue with a rktp version smaller than `version`
 * This shaves off the head of the queue, up until the first rko with
 * a non-matching rktp or version.
 */
void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq,
                                      rd_kafka_toppar_t *rktp, int version) {
	rd_kafka_op_t *rko, *next;
	TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq);
        int32_t cnt = 0;
        int64_t size = 0;

	mtx_lock(&rkq->rkq_lock);

	if (rkq->rkq_fwdq) {
		rd_kafka_q_purge_toppar_version(rkq->rkq_fwdq, rktp, version);
		mtx_unlock(&rkq->rkq_lock);
		return;
	}

        /* Move ops to temporary queue and then destroy them from there
         * without locks to avoid lock-ordering problems in op_destroy() */
        while ((rko = TAILQ_FIRST(&rkq->rkq_q)) && rko->rko_rktp &&
               rd_kafka_toppar_s2i(rko->rko_rktp) == rktp &&
               rko->rko_version < version) {
                TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link);
                TAILQ_INSERT_TAIL(&tmpq, rko, rko_link);
                cnt++;
                size += rko->rko_len;
        }


        rkq->rkq_qlen -= cnt;
        rkq->rkq_qsize -= size;
	mtx_unlock(&rkq->rkq_lock);

	next = TAILQ_FIRST(&tmpq);
	while ((rko = next)) {
		next = TAILQ_NEXT(next, rko_link);
		rd_kafka_op_destroy(rko);
	}
}
void rd_kafka_op_print (FILE *fp, const char *prefix, rd_kafka_op_t *rko) {
	fprintf(fp,
		"%s((rd_kafka_op_t*)%p)\n"
		"%s Type: %s (0x%x), Version: %"PRId32"\n",
		prefix, rko,
		prefix, rd_kafka_op2str(rko->rko_type), rko->rko_type,
		rko->rko_version);
	if (rko->rko_err)
		fprintf(fp, "%s Error: %s\n",
			prefix, rd_kafka_err2str(rko->rko_err));
	if (rko->rko_replyq.q)
		fprintf(fp, "%s Replyq %p v%d (%s)\n",
			prefix, rko->rko_replyq.q, rko->rko_replyq.version,
#if ENABLE_DEVEL
			rko->rko_replyq._id
#else
			""
#endif
			);
	if (rko->rko_rktp) {
		rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
		fprintf(fp, "%s ((rd_kafka_toppar_t*)%p) "
			"%s [%"PRId32"] v%d (shptr %p)\n",
			prefix, rktp, rktp->rktp_rkt->rkt_topic->str,
			rktp->rktp_partition,
			rd_atomic32_get(&rktp->rktp_version), rko->rko_rktp);
	}

	switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK)
	{
	case RD_KAFKA_OP_FETCH:
		fprintf(fp,  "%s Offset: %"PRId64"\n",
			prefix, rko->rko_u.fetch.rkm.rkm_offset);
		break;
	case RD_KAFKA_OP_CONSUMER_ERR:
		fprintf(fp,  "%s Offset: %"PRId64"\n",
			prefix, rko->rko_u.err.offset);
		/* FALLTHRU */
	case RD_KAFKA_OP_ERR:
		fprintf(fp, "%s Reason: %s\n", prefix, rko->rko_u.err.errstr);
		break;
	case RD_KAFKA_OP_DR:
		fprintf(fp, "%s %"PRId32" messages on %s\n", prefix,
			rd_atomic32_get(&rko->rko_u.dr.msgq.rkmq_msg_cnt),
			rko->rko_u.dr.s_rkt ?
			rd_kafka_topic_s2i(rko->rko_u.dr.s_rkt)->
			rkt_topic->str : "(n/a)");
		break;
	case RD_KAFKA_OP_OFFSET_COMMIT:
		fprintf(fp, "%s Callback: %p (opaque %p)\n",
			prefix, rko->rko_u.offset_commit.cb,
			rko->rko_u.offset_commit.opaque);
		fprintf(fp, "%s %d partitions\n",
			prefix,
			rko->rko_u.offset_commit.partitions ?
			rko->rko_u.offset_commit.partitions->cnt : 0);
		break;

        case RD_KAFKA_OP_LOG:
                fprintf(fp, "%s Log: %%%d %s: %s\n",
                        prefix, rko->rko_u.log.level,
                        rko->rko_u.log.fac,
                        rko->rko_u.log.str);
                break;

	default:
		break;
	}
}
示例#11
0
int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms,
                                 rd_kafka_message_t **rkmessages,
                                 size_t rkmessages_size) {
	unsigned int cnt = 0;
        TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq);
        rd_kafka_op_t *rko, *next;
        rd_kafka_t *rk = rkq->rkq_rk;

	mtx_lock(&rkq->rkq_lock);
	if (rkq->rkq_fwdq) {
                rd_kafka_q_t *fwdq = rkq->rkq_fwdq;
                rd_kafka_q_keep(fwdq);
                /* Since the q_pop may block we need to release the parent
                 * queue's lock. */
                mtx_unlock(&rkq->rkq_lock);
		cnt = rd_kafka_q_serve_rkmessages(fwdq, timeout_ms,
						  rkmessages, rkmessages_size);
                rd_kafka_q_destroy(fwdq);
		return cnt;
	}
        mtx_unlock(&rkq->rkq_lock);

	while (cnt < rkmessages_size) {

                mtx_lock(&rkq->rkq_lock);

		while (!(rko = TAILQ_FIRST(&rkq->rkq_q))) {
			if (cnd_timedwait_ms(&rkq->rkq_cond, &rkq->rkq_lock,
                                             timeout_ms) == thrd_timedout)
				break;
		}

		if (!rko) {
                        mtx_unlock(&rkq->rkq_lock);
			break; /* Timed out */
                }

		rd_kafka_q_deq0(rkq, rko);

                mtx_unlock(&rkq->rkq_lock);

		if (rd_kafka_op_version_outdated(rko, 0)) {
                        /* Outdated op, put on discard queue */
                        TAILQ_INSERT_TAIL(&tmpq, rko, rko_link);
                        continue;
                }

                /* Serve callbacks */
                if (rd_kafka_poll_cb(rk, rko, _Q_CB_CONSUMER, NULL)) {
                        /* Callback served, rko is done, put on discard queue */
                        TAILQ_INSERT_TAIL(&tmpq, rko, rko_link);
                        continue;
                }

		/* Auto-commit offset, if enabled. */
		if (!rko->rko_err && rko->rko_type == RD_KAFKA_OP_FETCH) {
                        rd_kafka_toppar_t *rktp;
                        rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
			rd_kafka_toppar_lock(rktp);
			rktp->rktp_app_offset = rko->rko_u.fetch.rkm.rkm_offset+1;
                        if (rktp->rktp_cgrp &&
			    rk->rk_conf.enable_auto_offset_store)
                                rd_kafka_offset_store0(rktp,
						       rktp->rktp_app_offset,
                                                       0/* no lock */);
			rd_kafka_toppar_unlock(rktp);
                }

		/* Get rkmessage from rko and append to array. */
		rkmessages[cnt++] = rd_kafka_message_get(rko);
	}

        /* Discard non-desired and already handled ops */
        next = TAILQ_FIRST(&tmpq);
        while (next) {
                rko = next;
                next = TAILQ_NEXT(next, rko_link);
                rd_kafka_op_destroy(rko);
        }


	return cnt;
}