/**
 * @brief Set up a MessageSet reader but don't start reading messages.
 */
static void
rd_kafka_msgset_reader_init (rd_kafka_msgset_reader_t *msetr,
                             rd_kafka_buf_t *rkbuf,
                             rd_kafka_toppar_t *rktp,
                             const struct rd_kafka_toppar_ver *tver,
                             rd_kafka_q_t *par_rkq) {

        memset(msetr, 0, sizeof(*msetr));

        msetr->msetr_rkb        = rkbuf->rkbuf_rkb;
        msetr->msetr_rktp       = rktp;
        msetr->msetr_tver       = tver;
        msetr->msetr_rkbuf      = rkbuf;

        /* All parsed messages are put on this temporary op
         * queue first and then moved in one go to the real op queue. */
        rd_kafka_q_init(&msetr->msetr_rkq, msetr->msetr_rkb->rkb_rk);

        /* Make sure enqueued ops get the correct serve/opaque reflecting the
         * original queue. */
        msetr->msetr_rkq.rkq_serve  = par_rkq->rkq_serve;
        msetr->msetr_rkq.rkq_opaque = par_rkq->rkq_opaque;

        /* Keep (non-refcounted) reference to parent queue for
         * moving the messages and events in msetr_rkq to when
         * parsing is done. */
        msetr->msetr_par_rkq = par_rkq;
}
Beispiel #2
0
/**
 * Allocate a new queue and initialize it.
 */
rd_kafka_q_t *rd_kafka_q_new0 (rd_kafka_t *rk, const char *func, int line) {
        rd_kafka_q_t *rkq = rd_malloc(sizeof(*rkq));
        rd_kafka_q_init(rkq, rk);
        rkq->rkq_flags |= RD_KAFKA_Q_F_ALLOCATED;
#if ENABLE_DEVEL
	rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line);
#else
	rkq->rkq_name = func;
#endif
        return rkq;
}
Beispiel #3
0
rd_kafka_queue_t *rd_kafka_queue_new (rd_kafka_t *rk) {
	rd_kafka_queue_t *rkqu;

	rkqu = rd_calloc(1, sizeof(*rkqu));

	rd_kafka_q_init(&rkqu->rkqu_q, rk);

        rkqu->rkqu_rk = rk;

	return rkqu;
}
Beispiel #4
0
/**
 * Pop all available ops from a queue and call the provided 
 * callback for each op.
 * `max_cnt` limits the number of ops served, 0 = no limit.
 *
 * Returns the number of ops served.
 *
 * Locality: any thread.
 */
int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms,
                      int max_cnt, int cb_type,
                      int (*callback) (rd_kafka_t *rk, rd_kafka_op_t *rko,
                                       int cb_type, void *opaque),
                      void *opaque) {
        rd_kafka_t *rk = rkq->rkq_rk;
	rd_kafka_op_t *rko;
	rd_kafka_q_t localq;
        int cnt = 0;
        int handled = 0;

	mtx_lock(&rkq->rkq_lock);

        rd_dassert(TAILQ_EMPTY(&rkq->rkq_q) || rkq->rkq_qlen > 0);
	if (rkq->rkq_fwdq) {
                rd_kafka_q_t *fwdq = rkq->rkq_fwdq;
                int ret;
                rd_kafka_q_keep(fwdq);
                /* Since the q_pop may block we need to release the parent
                 * queue's lock. */
                mtx_unlock(&rkq->rkq_lock);
		ret = rd_kafka_q_serve(fwdq, timeout_ms, max_cnt,
                                       cb_type, callback, opaque);
                rd_kafka_q_destroy(fwdq);
		return ret;
	}

	if (timeout_ms == RD_POLL_INFINITE)
		timeout_ms = INT_MAX;

	/* Wait for op */
	while (!(rko = TAILQ_FIRST(&rkq->rkq_q)) && timeout_ms != 0) {
		if (cnd_timedwait_ms(&rkq->rkq_cond,
				     &rkq->rkq_lock,
				     timeout_ms) != thrd_success)
			break;

		timeout_ms = 0;
	}

	if (!rko) {
		mtx_unlock(&rkq->rkq_lock);
		return 0;
	}

	/* Move the first `max_cnt` ops. */
	rd_kafka_q_init(&localq, rkq->rkq_rk);
	rd_kafka_q_move_cnt(&localq, rkq, max_cnt == 0 ? -1/*all*/ : max_cnt,
			    0/*no-locks*/);

        mtx_unlock(&rkq->rkq_lock);

        rd_kafka_yield_thread = 0;

	/* Call callback for each op */
        while ((rko = TAILQ_FIRST(&localq.rkq_q))) {
		handled += callback(rk, rko, cb_type, opaque);
		rd_kafka_q_deq0(&localq, rko);
		rd_kafka_op_destroy(rko);
                cnt++;

                if (unlikely(rd_kafka_yield_thread)) {
                        /* Callback called rd_kafka_yield(), we must
                         * stop our callback dispatching and put the
                         * ops in localq back on the original queue head. */
                        if (!TAILQ_EMPTY(&localq.rkq_q))
                                rd_kafka_q_prepend(rkq, &localq);
                        break;
                }
	}

        /* Make sure no op was left unhandled. i.e.,
         * a consumer op ended up on the global queue. */
        rd_kafka_assert(NULL, handled == cnt);

	rd_kafka_q_destroy(&localq);

	return cnt;
}
Beispiel #5
0
/**
 * Allocate a new queue and initialize it.
 */
rd_kafka_q_t *rd_kafka_q_new (rd_kafka_t *rk) {
        rd_kafka_q_t *rkq = rd_malloc(sizeof(*rkq));
        rd_kafka_q_init(rkq, rk);
        rkq->rkq_flags |= RD_KAFKA_Q_F_ALLOCATED;
        return rkq;
}