Ejemplo n.º 1
0
/**
 * Enqueue ERR__THROTTLE op, if desired.
 */
void rd_kafka_op_throttle_time (rd_kafka_broker_t *rkb,
				rd_kafka_q_t *rkq,
				int throttle_time) {
	rd_kafka_op_t *rko;

	rd_avg_add(&rkb->rkb_avg_throttle, throttle_time);

	if (!rkb->rkb_rk->rk_conf.quota_support)
		return;

	/* We send throttle events when:
	 *  - throttle_time > 0
	 *  - throttle_time == 0 and last throttle_time > 0
	 */
	if (!throttle_time && !rd_atomic32_get(&rkb->rkb_rk->rk_last_throttle))
		return;

	rd_atomic32_set(&rkb->rkb_rk->rk_last_throttle, throttle_time);

	rko = rd_kafka_op_new(RD_KAFKA_OP_THROTTLE);
	rko->rko_nodename      = rd_strdup(rkb->rkb_nodename);
	rko->rko_flags        |= RD_KAFKA_OP_F_FREE; /* free nodename */
	rko->rko_nodeid        = rkb->rkb_nodeid;
	rko->rko_throttle_time = throttle_time;
	rd_kafka_q_enq(rkq, rko);
}
Ejemplo n.º 2
0
/**
 * Dispatch timers.
 * Will block up to 'timeout' microseconds before returning.
 */
void rd_kafka_timers_run (rd_kafka_timers_t *rkts, int timeout_us) {
	rd_ts_t now = rd_clock();
	rd_ts_t end = now + timeout_us;

        rd_kafka_timers_lock(rkts);

	while (!rd_atomic32_get(&rkts->rkts_rk->rk_terminate) && now <= end) {
		int64_t sleeptime;
		rd_kafka_timer_t *rtmr;

		if (timeout_us != RD_POLL_NOWAIT) {
			sleeptime = rd_kafka_timers_next(rkts,
							 timeout_us,
							 0/*no-lock*/);

			if (sleeptime > 0) {
				cnd_timedwait_ms(&rkts->rkts_cond,
						 &rkts->rkts_lock,
						 (int)(sleeptime / 1000));

			}
		}

		now = rd_clock();

		while ((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) &&
		       rtmr->rtmr_next <= now) {

			rd_kafka_timer_unschedule(rkts, rtmr);
                        rd_kafka_timers_unlock(rkts);

			rtmr->rtmr_callback(rkts, rtmr->rtmr_arg);

                        rd_kafka_timers_lock(rkts);
			/* Restart timer, unless it has been stopped, or
			 * already reschedueld (start()ed) from callback. */
			if (rd_kafka_timer_started(rtmr) &&
			    !rd_kafka_timer_scheduled(rtmr))
				rd_kafka_timer_schedule(rkts, rtmr, 0);
		}

		if (timeout_us == RD_POLL_NOWAIT) {
			/* Only iterate once, even if rd_clock doesn't change */
			break;
		}
	}

	rd_kafka_timers_unlock(rkts);
}
Ejemplo n.º 3
0
/**
 * Filters out outdated ops.
 */
static RD_INLINE rd_kafka_op_t *rd_kafka_op_filter (rd_kafka_q_t *rkq,
                                                   rd_kafka_op_t *rko) {
        if (unlikely(!rko))
                return NULL;

        if (unlikely(rko->rko_version && rko->rko_rktp &&
                     rko->rko_version <
                     rd_atomic32_get(&rd_kafka_toppar_s2i(rko->rko_rktp)->
                                     rktp_version))) {
		rd_kafka_q_deq0(rkq, rko);
                rd_kafka_op_destroy(rko);
                return NULL;
        }

        return rko;
}
/**
 * Enqueue ERR__THROTTLE op, if desired.
 */
void rd_kafka_op_throttle_time (rd_kafka_broker_t *rkb,
				rd_kafka_q_t *rkq,
				int throttle_time) {
	rd_kafka_op_t *rko;

	rd_avg_add(&rkb->rkb_avg_throttle, throttle_time);

	/* We send throttle events when:
	 *  - throttle_time > 0
	 *  - throttle_time == 0 and last throttle_time > 0
	 */
	if (!rkb->rkb_rk->rk_conf.throttle_cb ||
	    (!throttle_time && !rd_atomic32_get(&rkb->rkb_rk->rk_last_throttle)))
		return;

	rd_atomic32_set(&rkb->rkb_rk->rk_last_throttle, throttle_time);

	rko = rd_kafka_op_new(RD_KAFKA_OP_THROTTLE);
        rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH);
	rko->rko_u.throttle.nodename = rd_strdup(rkb->rkb_nodename);
	rko->rko_u.throttle.nodeid   = rkb->rkb_nodeid;
	rko->rko_u.throttle.throttle_time = throttle_time;
	rd_kafka_q_enq(rkq, rko);
}
void rd_kafka_op_print (FILE *fp, const char *prefix, rd_kafka_op_t *rko) {
	fprintf(fp,
		"%s((rd_kafka_op_t*)%p)\n"
		"%s Type: %s (0x%x), Version: %"PRId32"\n",
		prefix, rko,
		prefix, rd_kafka_op2str(rko->rko_type), rko->rko_type,
		rko->rko_version);
	if (rko->rko_err)
		fprintf(fp, "%s Error: %s\n",
			prefix, rd_kafka_err2str(rko->rko_err));
	if (rko->rko_replyq.q)
		fprintf(fp, "%s Replyq %p v%d (%s)\n",
			prefix, rko->rko_replyq.q, rko->rko_replyq.version,
#if ENABLE_DEVEL
			rko->rko_replyq._id
#else
			""
#endif
			);
	if (rko->rko_rktp) {
		rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
		fprintf(fp, "%s ((rd_kafka_toppar_t*)%p) "
			"%s [%"PRId32"] v%d (shptr %p)\n",
			prefix, rktp, rktp->rktp_rkt->rkt_topic->str,
			rktp->rktp_partition,
			rd_atomic32_get(&rktp->rktp_version), rko->rko_rktp);
	}

	switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK)
	{
	case RD_KAFKA_OP_FETCH:
		fprintf(fp,  "%s Offset: %"PRId64"\n",
			prefix, rko->rko_u.fetch.rkm.rkm_offset);
		break;
	case RD_KAFKA_OP_CONSUMER_ERR:
		fprintf(fp,  "%s Offset: %"PRId64"\n",
			prefix, rko->rko_u.err.offset);
		/* FALLTHRU */
	case RD_KAFKA_OP_ERR:
		fprintf(fp, "%s Reason: %s\n", prefix, rko->rko_u.err.errstr);
		break;
	case RD_KAFKA_OP_DR:
		fprintf(fp, "%s %"PRId32" messages on %s\n", prefix,
			rd_atomic32_get(&rko->rko_u.dr.msgq.rkmq_msg_cnt),
			rko->rko_u.dr.s_rkt ?
			rd_kafka_topic_s2i(rko->rko_u.dr.s_rkt)->
			rkt_topic->str : "(n/a)");
		break;
	case RD_KAFKA_OP_OFFSET_COMMIT:
		fprintf(fp, "%s Callback: %p (opaque %p)\n",
			prefix, rko->rko_u.offset_commit.cb,
			rko->rko_u.offset_commit.opaque);
		fprintf(fp, "%s %d partitions\n",
			prefix,
			rko->rko_u.offset_commit.partitions ?
			rko->rko_u.offset_commit.partitions->cnt : 0);
		break;

        case RD_KAFKA_OP_LOG:
                fprintf(fp, "%s Log: %%%d %s: %s\n",
                        prefix, rko->rko_u.log.level,
                        rko->rko_u.log.fac,
                        rko->rko_u.log.str);
                break;

	default:
		break;
	}
}
/**
 * @brief Sockem connect, called from **internal librdkafka thread** through
 *        librdkafka's connect_cb
 */
static int connect_cb (struct test *test, sockem_t *skm, const char *id) {
        if (rd_atomic32_get(&refuse_connect) > 0)
                return -1;
        else
                return 0;
}
Ejemplo n.º 7
0
int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms,
                                 rd_kafka_message_t **rkmessages,
                                 size_t rkmessages_size) {
	unsigned int cnt = 0;
        TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq);
        rd_kafka_op_t *rko, *next;
        rd_kafka_t *rk = rkq->rkq_rk;

	mtx_lock(&rkq->rkq_lock);
	if (rkq->rkq_fwdq) {
                rd_kafka_q_t *fwdq = rkq->rkq_fwdq;
                rd_kafka_q_keep(fwdq);
                /* Since the q_pop may block we need to release the parent
                 * queue's lock. */
                mtx_unlock(&rkq->rkq_lock);
		cnt = rd_kafka_q_serve_rkmessages(fwdq, timeout_ms,
						  rkmessages, rkmessages_size);
                rd_kafka_q_destroy(fwdq);
		return cnt;
	}
        mtx_unlock(&rkq->rkq_lock);

	while (cnt < rkmessages_size) {

                mtx_lock(&rkq->rkq_lock);

		while (!(rko = TAILQ_FIRST(&rkq->rkq_q))) {
			if (cnd_timedwait_ms(&rkq->rkq_cond, &rkq->rkq_lock,
                                             timeout_ms) == thrd_timedout)
				break;
		}

		if (!rko) {
                        mtx_unlock(&rkq->rkq_lock);
			break; /* Timed out */
                }

		rd_kafka_q_deq0(rkq, rko);

                mtx_unlock(&rkq->rkq_lock);

                if (rko->rko_version && rko->rko_rktp &&
                    rko->rko_version <
                    rd_atomic32_get(&rd_kafka_toppar_s2i(rko->rko_rktp)->
                                    rktp_version)) {
                        /* Outdated op, put on discard queue */
                        TAILQ_INSERT_TAIL(&tmpq, rko, rko_link);
                        continue;
                }

                /* Serve callbacks */
                if (rd_kafka_poll_cb(rk, rko, _Q_CB_CONSUMER, NULL)) {
                        /* Callback served, rko is done, put on discard queue */
                        TAILQ_INSERT_TAIL(&tmpq, rko, rko_link);
                        continue;
                }

		/* Auto-commit offset, if enabled. */
		if (!rko->rko_err) {
                        rd_kafka_toppar_t *rktp;
                        rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
			rd_kafka_toppar_lock(rktp);
			rktp->rktp_app_offset = rko->rko_offset+1;
                        if (rktp->rktp_cgrp &&
			    rk->rk_conf.enable_auto_offset_store)
                                rd_kafka_offset_store0(rktp,
                                                       rko->rko_offset+1,
                                                       0/* no lock */);
			rd_kafka_toppar_unlock(rktp);
                }

		/* Get rkmessage from rko and append to array. */
		rkmessages[cnt++] = rd_kafka_message_get(rko);
	}

        /* Discard non-desired and already handled ops */
        next = TAILQ_FIRST(&tmpq);
        while (next) {
                rko = next;
                next = TAILQ_NEXT(next, rko_link);
                rd_kafka_op_destroy(rko);
        }


	return cnt;
}