/** * Pop an op from a queue. * * Locality: any thread. */ rd_kafka_op_t *rd_kafka_q_pop (rd_kafka_q_t *rkq, int timeout_ms, int32_t version) { rd_kafka_op_t *rko; if (timeout_ms == RD_POLL_INFINITE) timeout_ms = INT_MAX; mtx_lock(&rkq->rkq_lock); if (!rkq->rkq_fwdq) { do { /* Filter out outdated ops */ while ((rko = TAILQ_FIRST(&rkq->rkq_q)) && !(rko = rd_kafka_op_filter(rkq, rko))) ; if (rko) { /* Proper versioned op */ rd_kafka_q_deq0(rkq, rko); break; } /* No op, wait for one */ rd_ts_t pre = rd_clock(); if (cnd_timedwait_ms(&rkq->rkq_cond, &rkq->rkq_lock, timeout_ms) == thrd_timedout) { mtx_unlock(&rkq->rkq_lock); return NULL; } /* Remove spent time */ timeout_ms -= (int) (rd_clock()-pre) / 1000; if (timeout_ms < 0) timeout_ms = RD_POLL_NOWAIT; } while (timeout_ms != RD_POLL_NOWAIT); mtx_unlock(&rkq->rkq_lock); } else { rd_kafka_q_t *fwdq = rkq->rkq_fwdq; rd_kafka_q_keep(fwdq); /* Since the q_pop may block we need to release the parent * queue's lock. */ mtx_unlock(&rkq->rkq_lock); rko = rd_kafka_q_pop(fwdq, timeout_ms, version); rd_kafka_q_destroy(fwdq); } return rko; }
/** * Helper: wait for single op on 'rkq', and return its error, * or .._TIMED_OUT on timeout. */ rd_kafka_resp_err_t rd_kafka_q_wait_result (rd_kafka_q_t *rkq, int timeout_ms) { rd_kafka_op_t *rko; rd_kafka_resp_err_t err; rko = rd_kafka_q_pop(rkq, timeout_ms, 0); if (!rko) err = RD_KAFKA_RESP_ERR__TIMED_OUT; else { err = rko->rko_err; rd_kafka_op_destroy(rko); } return err; }
/** * @brief Send request to queue, wait for response. * * @returns response on success or NULL if destq is disabled. */ rd_kafka_op_t *rd_kafka_op_req0 (rd_kafka_q_t *destq, rd_kafka_q_t *recvq, rd_kafka_op_t *rko, int timeout_ms) { rd_kafka_op_t *reply; /* Indicate to destination where to send reply. */ rd_kafka_op_set_replyq(rko, recvq, NULL); /* Enqueue op */ if (!rd_kafka_q_enq(destq, rko)) return NULL; /* Wait for reply */ reply = rd_kafka_q_pop(recvq, timeout_ms, 0); /* May be NULL for timeout */ return reply; }
/** * Send request to queue, wait for response. */ rd_kafka_op_t *rd_kafka_op_req0 (rd_kafka_q_t *destq, rd_kafka_q_t *recvq, rd_kafka_op_t *rko, int timeout_ms) { rd_kafka_op_t *reply; /* Indicate to destination where to send reply. */ rko->rko_replyq = recvq; if (recvq) rd_kafka_q_keep(rko->rko_replyq); /* Enqueue op */ rd_kafka_q_enq(destq, rko); /* Wait for reply */ reply = rd_kafka_q_pop(recvq, timeout_ms, 0); /* May be NULL for timeout */ return reply; }
rd_kafka_resp_err_t rd_kafka_commit_queue (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb) (rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque), void *opaque) { rd_kafka_q_t *rkq; rd_kafka_resp_err_t err; if (!rd_kafka_cgrp_get(rk)) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; if (rkqu) rkq = rkqu->rkqu_q; else rkq = rd_kafka_q_new(rk); err = rd_kafka_commit0(rk, offsets, NULL, RD_KAFKA_REPLYQ(rkq, 0), cb, opaque); if (!rkqu) { rd_kafka_op_t *rko = rd_kafka_q_pop(rkq, RD_POLL_INFINITE, 0); if (!rko) err = RD_KAFKA_RESP_ERR__TIMED_OUT; else { err = rko->rko_err; rd_kafka_op_handle_std(rk, rko); rd_kafka_op_destroy(rko); } rd_kafka_q_destroy(rkq); } return err; }
/** * NOTE: 'offsets' may be NULL, see official documentation. */ rd_kafka_resp_err_t rd_kafka_commit (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async) { rd_kafka_cgrp_t *rkcg; rd_kafka_resp_err_t err; rd_kafka_q_t *tmpq = NULL; if (!(rkcg = rd_kafka_cgrp_get(rk))) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; if (!async) tmpq = rd_kafka_q_new(rk); err = rd_kafka_commit0(rk, offsets, async ? &rkcg->rkcg_ops : tmpq, NULL); if (!async) { rd_kafka_op_t *rko = rd_kafka_q_pop(tmpq, RD_POLL_INFINITE, 0); err = rko->rko_err; /* Enqueue offset_commit_cb if configured */ if (rko->rko_payload /* offset list */) { rd_kafka_offset_commit_cb_op( rk, rko->rko_err, (rd_kafka_topic_partition_list_t *) rko->rko_payload); rko->rko_payload = NULL; } rd_kafka_op_destroy(rko); rd_kafka_q_destroy(tmpq); } else { err = RD_KAFKA_RESP_ERR_NO_ERROR; } return err; }
rd_kafka_resp_err_t rd_kafka_metadata (rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms) { rd_kafka_q_t *rkq; rd_kafka_broker_t *rkb; rd_kafka_op_t *rko; rd_ts_t ts_end = rd_timeout_init(timeout_ms); rd_list_t topics; /* Query any broker that is up, and if none are up pick the first one, * if we're lucky it will be up before the timeout */ rkb = rd_kafka_broker_any_usable(rk, timeout_ms, 1, "application metadata request"); if (!rkb) return RD_KAFKA_RESP_ERR__TRANSPORT; rkq = rd_kafka_q_new(rk); rd_list_init(&topics, 0, rd_free); if (!all_topics) { if (only_rkt) rd_list_add(&topics, rd_strdup(rd_kafka_topic_a2i(only_rkt)-> rkt_topic->str)); else rd_kafka_local_topics_to_list(rkb->rkb_rk, &topics); } /* Async: request metadata */ rko = rd_kafka_op_new(RD_KAFKA_OP_METADATA); rd_kafka_op_set_replyq(rko, rkq, 0); rko->rko_u.metadata.force = 1; /* Force metadata request regardless * of outstanding metadata requests. */ rd_kafka_MetadataRequest(rkb, &topics, "application requested", rko); rd_list_destroy(&topics); rd_kafka_broker_destroy(rkb); /* Wait for reply (or timeout) */ rko = rd_kafka_q_pop(rkq, rd_timeout_remains(ts_end), 0); rd_kafka_q_destroy_owner(rkq); /* Timeout */ if (!rko) return RD_KAFKA_RESP_ERR__TIMED_OUT; /* Error */ if (rko->rko_err) { rd_kafka_resp_err_t err = rko->rko_err; rd_kafka_op_destroy(rko); return err; } /* Reply: pass metadata pointer to application who now owns it*/ rd_kafka_assert(rk, rko->rko_u.metadata.md); *metadatap = rko->rko_u.metadata.md; rko->rko_u.metadata.md = NULL; rd_kafka_op_destroy(rko); return RD_KAFKA_RESP_ERR_NO_ERROR; }