static void rd_kafka_offset_reset_op_cb (rd_kafka_t *rk, rd_kafka_op_t *rko) { rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(rko->rko_rktp); rd_kafka_toppar_lock(rktp); rd_kafka_offset_reset(rktp, rko->rko_offset, rko->rko_err, rko->rko_payload); rd_kafka_toppar_unlock(rktp); }
void rd_kafka_offset_query_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { rd_kafka_toppar_t *rktp = arg; rd_kafka_toppar_lock(rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "Topic %s [%"PRId32"]: timed offset query for %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rd_kafka_offset2str(rktp->rktp_query_offset)); rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_offset, 0); rd_kafka_toppar_unlock(rktp); }
/** * Offset file sync timer callback */ static void rd_kafka_offset_file_sync_tmr_cb (rd_kafka_t *rk, void *arg) { rd_kafka_toppar_t *rktp = arg; rd_kafka_toppar_lock(rktp); if (rktp->rktp_offset_fd != -1) { rd_kafka_dbg(rk, TOPIC, "SYNC", "%s [%"PRId32"]: offset file sync", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); fsync(rktp->rktp_offset_fd); } rd_kafka_toppar_unlock(rktp); }
/** * Offset file commit timer callback. */ static void rd_kafka_offset_file_commit_tmr_cb (rd_kafka_t *rk, void *arg) { rd_kafka_toppar_t *rktp = arg; rd_kafka_toppar_lock(rktp); rd_kafka_dbg(rk, TOPIC, "OFFSET", "%s [%"PRId32"]: periodic commit: " "stored offset %"PRId64" > commited offset %"PRId64" ?", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_stored_offset, rktp->rktp_commited_offset); if (rktp->rktp_stored_offset > rktp->rktp_commited_offset) rd_kafka_offset_file_commit(rktp, rktp->rktp_stored_offset); rd_kafka_toppar_unlock(rktp); }
/** * @brief Store offset for fetched message. */ void rd_kafka_op_offset_store (rd_kafka_t *rk, rd_kafka_op_t *rko, const rd_kafka_message_t *rkmessage) { rd_kafka_toppar_t *rktp; if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH || rko->rko_err)) return; rktp = rd_kafka_toppar_s2i(rko->rko_rktp); if (unlikely(!rk)) rk = rktp->rktp_rkt->rkt_rk; rd_kafka_toppar_lock(rktp); rktp->rktp_app_offset = rkmessage->offset+1; if (rk->rk_conf.enable_auto_offset_store) rd_kafka_offset_store0(rktp, rkmessage->offset+1, 0/*no lock*/); rd_kafka_toppar_unlock(rktp); }
/** * Called when a broker commit is done. * * Locality: toppar handler thread * Locks: none */ static void rd_kafka_offset_broker_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque) { shptr_rd_kafka_toppar_t *s_rktp; rd_kafka_toppar_t *rktp; if (!(s_rktp = rd_kafka_topic_partition_list_get_toppar(rk, offsets, 0))) { rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT", "No local partition found for %s [%"PRId32"] " "while parsing OffsetCommit response " "(offset %"PRId64", error \"%s\")", offsets->elems[0].topic, offsets->elems[0].partition, offsets->elems[0].offset, rd_kafka_err2str(offsets->elems[0].err)); return; } rktp = rd_kafka_toppar_s2i(s_rktp); if (!err) err = offsets->elems[0].err; rd_kafka_toppar_offset_commit_result(rktp, err, offsets); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: offset %"PRId64" committed: %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, offsets->elems[0].offset, rd_kafka_err2str(err)); rktp->rktp_committing_offset = 0; rd_kafka_toppar_lock(rktp); if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING) rd_kafka_offset_store_term(rktp, err); rd_kafka_toppar_unlock(rktp); rd_kafka_toppar_destroy(s_rktp); }
int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size) { unsigned int cnt = 0; TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); rd_kafka_op_t *rko, *next; rd_kafka_t *rk = rkq->rkq_rk; mtx_lock(&rkq->rkq_lock); if (rkq->rkq_fwdq) { rd_kafka_q_t *fwdq = rkq->rkq_fwdq; rd_kafka_q_keep(fwdq); /* Since the q_pop may block we need to release the parent * queue's lock. */ mtx_unlock(&rkq->rkq_lock); cnt = rd_kafka_q_serve_rkmessages(fwdq, timeout_ms, rkmessages, rkmessages_size); rd_kafka_q_destroy(fwdq); return cnt; } mtx_unlock(&rkq->rkq_lock); while (cnt < rkmessages_size) { mtx_lock(&rkq->rkq_lock); while (!(rko = TAILQ_FIRST(&rkq->rkq_q))) { if (cnd_timedwait_ms(&rkq->rkq_cond, &rkq->rkq_lock, timeout_ms) == thrd_timedout) break; } if (!rko) { mtx_unlock(&rkq->rkq_lock); break; /* Timed out */ } rd_kafka_q_deq0(rkq, rko); mtx_unlock(&rkq->rkq_lock); if (rd_kafka_op_version_outdated(rko, 0)) { /* Outdated op, put on discard queue */ TAILQ_INSERT_TAIL(&tmpq, rko, rko_link); continue; } /* Serve callbacks */ if (rd_kafka_poll_cb(rk, rko, _Q_CB_CONSUMER, NULL)) { /* Callback served, rko is done, put on discard queue */ TAILQ_INSERT_TAIL(&tmpq, rko, rko_link); continue; } /* Auto-commit offset, if enabled. */ if (!rko->rko_err && rko->rko_type == RD_KAFKA_OP_FETCH) { rd_kafka_toppar_t *rktp; rktp = rd_kafka_toppar_s2i(rko->rko_rktp); rd_kafka_toppar_lock(rktp); rktp->rktp_app_offset = rko->rko_u.fetch.rkm.rkm_offset+1; if (rktp->rktp_cgrp && rk->rk_conf.enable_auto_offset_store) rd_kafka_offset_store0(rktp, rktp->rktp_app_offset, 0/* no lock */); rd_kafka_toppar_unlock(rktp); } /* Get rkmessage from rko and append to array. */ rkmessages[cnt++] = rd_kafka_message_get(rko); } /* Discard non-desired and already handled ops */ next = TAILQ_FIRST(&tmpq); while (next) { rko = next; next = TAILQ_NEXT(next, rko_link); rd_kafka_op_destroy(rko); } return cnt; }