void rd_kafka_op_destroy (rd_kafka_op_t *rko) { /* Decrease refcount on rkbuf to eventually rd_free the shared buffer*/ if (rko->rko_rkbuf) rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY); else if (rko->rko_payload && rko->rko_flags & RD_KAFKA_OP_F_FREE) { if (rko->rko_free_cb) rko->rko_free_cb(rko->rko_payload); else rd_free(rko->rko_payload); } if (rko->rko_rkt) rd_kafka_topic_destroy0(rd_kafka_topic_a2s(rko->rko_rkt)); if (rko->rko_rktp) rd_kafka_toppar_destroy(rko->rko_rktp); if (rko->rko_metadata) rd_kafka_metadata_destroy(rko->rko_metadata); if (rko->rko_replyq) rd_kafka_q_destroy(rko->rko_replyq); if (rd_atomic32_sub(&rd_kafka_op_cnt, 1) < 0) rd_kafka_assert(NULL, !*"rd_kafka_op_cnt < 0"); rd_free(rko); }
/** * @brief Handle a Metadata response message. * * @param topics are the requested topics (may be NULL) * * The metadata will be marshalled into 'struct rd_kafka_metadata*' structs. * * The marshalled metadata is returned in \p *mdp, (NULL on error). * @returns an error code on parse failure, else NO_ERRRO. * * @locality rdkafka main thread */ rd_kafka_resp_err_t rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, rd_kafka_buf_t *request, rd_kafka_buf_t *rkbuf, struct rd_kafka_metadata **mdp) { rd_kafka_t *rk = rkb->rkb_rk; int i, j, k; rd_tmpabuf_t tbuf; struct rd_kafka_metadata *md; size_t rkb_namelen; const int log_decode_errors = LOG_ERR; rd_list_t *missing_topics = NULL; const rd_list_t *requested_topics = request->rkbuf_u.Metadata.topics; int all_topics = request->rkbuf_u.Metadata.all_topics; const char *reason = request->rkbuf_u.Metadata.reason ? request->rkbuf_u.Metadata.reason : "(no reason)"; int ApiVersion = request->rkbuf_reqhdr.ApiVersion; rd_kafkap_str_t cluster_id = RD_ZERO_INIT; int32_t controller_id = -1; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; int broadcast_changes = 0; rd_kafka_assert(NULL, thrd_is_current(rk->rk_thread)); /* Remove topics from missing_topics as they are seen in Metadata. */ if (requested_topics) missing_topics = rd_list_copy(requested_topics, rd_list_string_copy, NULL); rd_kafka_broker_lock(rkb); rkb_namelen = strlen(rkb->rkb_name)+1; /* We assume that the marshalled representation is * no more than 4 times larger than the wire representation. */ rd_tmpabuf_new(&tbuf, sizeof(*md) + rkb_namelen + (rkbuf->rkbuf_totlen * 4), 0/*dont assert on fail*/); if (!(md = rd_tmpabuf_alloc(&tbuf, sizeof(*md)))) { err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; goto err; } md->orig_broker_id = rkb->rkb_nodeid; md->orig_broker_name = rd_tmpabuf_write(&tbuf, rkb->rkb_name, rkb_namelen); rd_kafka_broker_unlock(rkb); /* Read Brokers */ rd_kafka_buf_read_i32a(rkbuf, md->broker_cnt); if (md->broker_cnt > RD_KAFKAP_BROKERS_MAX) rd_kafka_buf_parse_fail(rkbuf, "Broker_cnt %i > BROKERS_MAX %i", md->broker_cnt, RD_KAFKAP_BROKERS_MAX); if (!(md->brokers = rd_tmpabuf_alloc(&tbuf, md->broker_cnt * sizeof(*md->brokers)))) rd_kafka_buf_parse_fail(rkbuf, "%d brokers: tmpabuf memory shortage", md->broker_cnt); for (i = 0 ; i < md->broker_cnt ; i++) { rd_kafka_buf_read_i32a(rkbuf, md->brokers[i].id); rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, md->brokers[i].host); rd_kafka_buf_read_i32a(rkbuf, md->brokers[i].port); if (ApiVersion >= 1) { rd_kafkap_str_t rack; rd_kafka_buf_read_str(rkbuf, &rack); } } if (ApiVersion >= 2) rd_kafka_buf_read_str(rkbuf, &cluster_id); if (ApiVersion >= 1) { rd_kafka_buf_read_i32(rkbuf, &controller_id); rd_rkb_dbg(rkb, METADATA, "METADATA", "ClusterId: %.*s, ControllerId: %"PRId32, RD_KAFKAP_STR_PR(&cluster_id), controller_id); } /* Read TopicMetadata */ rd_kafka_buf_read_i32a(rkbuf, md->topic_cnt); rd_rkb_dbg(rkb, METADATA, "METADATA", "%i brokers, %i topics", md->broker_cnt, md->topic_cnt); if (md->topic_cnt > RD_KAFKAP_TOPICS_MAX) rd_kafka_buf_parse_fail(rkbuf, "TopicMetadata_cnt %"PRId32 " > TOPICS_MAX %i", md->topic_cnt, RD_KAFKAP_TOPICS_MAX); if (!(md->topics = rd_tmpabuf_alloc(&tbuf, md->topic_cnt * sizeof(*md->topics)))) rd_kafka_buf_parse_fail(rkbuf, "%d topics: tmpabuf memory shortage", md->topic_cnt); for (i = 0 ; i < md->topic_cnt ; i++) { rd_kafka_buf_read_i16a(rkbuf, md->topics[i].err); rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, md->topics[i].topic); if (ApiVersion >= 1) { int8_t is_internal; rd_kafka_buf_read_i8(rkbuf, &is_internal); } /* PartitionMetadata */ rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partition_cnt); if (md->topics[i].partition_cnt > RD_KAFKAP_PARTITIONS_MAX) rd_kafka_buf_parse_fail(rkbuf, "TopicMetadata[%i]." "PartitionMetadata_cnt %i " "> PARTITIONS_MAX %i", i, md->topics[i].partition_cnt, RD_KAFKAP_PARTITIONS_MAX); if (!(md->topics[i].partitions = rd_tmpabuf_alloc(&tbuf, md->topics[i].partition_cnt * sizeof(*md->topics[i].partitions)))) rd_kafka_buf_parse_fail(rkbuf, "%s: %d partitions: " "tmpabuf memory shortage", md->topics[i].topic, md->topics[i].partition_cnt); for (j = 0 ; j < md->topics[i].partition_cnt ; j++) { rd_kafka_buf_read_i16a(rkbuf, md->topics[i].partitions[j].err); rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].id); rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].leader); /* Replicas */ rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].replica_cnt); if (md->topics[i].partitions[j].replica_cnt > RD_KAFKAP_BROKERS_MAX) rd_kafka_buf_parse_fail(rkbuf, "TopicMetadata[%i]." "PartitionMetadata[%i]." "Replica_cnt " "%i > BROKERS_MAX %i", i, j, md->topics[i]. partitions[j]. replica_cnt, RD_KAFKAP_BROKERS_MAX); if (!(md->topics[i].partitions[j].replicas = rd_tmpabuf_alloc(&tbuf, md->topics[i]. partitions[j].replica_cnt * sizeof(*md->topics[i]. partitions[j].replicas)))) rd_kafka_buf_parse_fail( rkbuf, "%s [%"PRId32"]: %d replicas: " "tmpabuf memory shortage", md->topics[i].topic, md->topics[i].partitions[j].id, md->topics[i].partitions[j].replica_cnt); for (k = 0 ; k < md->topics[i].partitions[j].replica_cnt; k++) rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j]. replicas[k]); /* Isrs */ rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].isr_cnt); if (md->topics[i].partitions[j].isr_cnt > RD_KAFKAP_BROKERS_MAX) rd_kafka_buf_parse_fail(rkbuf, "TopicMetadata[%i]." "PartitionMetadata[%i]." "Isr_cnt " "%i > BROKERS_MAX %i", i, j, md->topics[i]. partitions[j].isr_cnt, RD_KAFKAP_BROKERS_MAX); if (!(md->topics[i].partitions[j].isrs = rd_tmpabuf_alloc(&tbuf, md->topics[i]. partitions[j].isr_cnt * sizeof(*md->topics[i]. partitions[j].isrs)))) rd_kafka_buf_parse_fail( rkbuf, "%s [%"PRId32"]: %d isrs: " "tmpabuf memory shortage", md->topics[i].topic, md->topics[i].partitions[j].id, md->topics[i].partitions[j].isr_cnt); for (k = 0 ; k < md->topics[i].partitions[j].isr_cnt; k++) rd_kafka_buf_read_i32a(rkbuf, md->topics[i]. partitions[j].isrs[k]); } /* Sort partitions by partition id */ qsort(md->topics[i].partitions, md->topics[i].partition_cnt, sizeof(*md->topics[i].partitions), rd_kafka_metadata_partition_id_cmp); } /* Entire Metadata response now parsed without errors: * update our internal state according to the response. */ /* Avoid metadata updates when we're terminating. */ if (rd_kafka_terminating(rkb->rkb_rk)) { err = RD_KAFKA_RESP_ERR__DESTROY; goto done; } if (md->broker_cnt == 0 && md->topic_cnt == 0) { rd_rkb_dbg(rkb, METADATA, "METADATA", "No brokers or topics in metadata: should retry"); err = RD_KAFKA_RESP_ERR__PARTIAL; goto err; } /* Update our list of brokers. */ for (i = 0 ; i < md->broker_cnt ; i++) { rd_rkb_dbg(rkb, METADATA, "METADATA", " Broker #%i/%i: %s:%i NodeId %"PRId32, i, md->broker_cnt, md->brokers[i].host, md->brokers[i].port, md->brokers[i].id); rd_kafka_broker_update(rkb->rkb_rk, rkb->rkb_proto, &md->brokers[i]); } /* Update partition count and leader for each topic we know about */ for (i = 0 ; i < md->topic_cnt ; i++) { rd_kafka_metadata_topic_t *mdt = &md->topics[i]; rd_rkb_dbg(rkb, METADATA, "METADATA", " Topic #%i/%i: %s with %i partitions%s%s", i, md->topic_cnt, mdt->topic, mdt->partition_cnt, mdt->err ? ": " : "", mdt->err ? rd_kafka_err2str(mdt->err) : ""); /* Ignore topics in blacklist */ if (rkb->rkb_rk->rk_conf.topic_blacklist && rd_kafka_pattern_match(rkb->rkb_rk->rk_conf.topic_blacklist, mdt->topic)) { rd_rkb_dbg(rkb, TOPIC, "BLACKLIST", "Ignoring blacklisted topic \"%s\" " "in metadata", mdt->topic); continue; } /* Ignore metadata completely for temporary errors. (issue #513) * LEADER_NOT_AVAILABLE: Broker is rebalancing */ if (mdt->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE && mdt->partition_cnt == 0) { rd_rkb_dbg(rkb, TOPIC, "METADATA", "Temporary error in metadata reply for " "topic %s (PartCnt %i): %s: ignoring", mdt->topic, mdt->partition_cnt, rd_kafka_err2str(mdt->err)); if (missing_topics) rd_list_free_cb( missing_topics, rd_list_remove_cmp(missing_topics, mdt->topic, (void *)strcmp)); continue; } /* Update local topic & partition state based on metadata */ rd_kafka_topic_metadata_update2(rkb, mdt); if (requested_topics) { rd_list_free_cb(missing_topics, rd_list_remove_cmp(missing_topics, mdt->topic, (void*)strcmp)); if (!all_topics) { rd_kafka_wrlock(rk); rd_kafka_metadata_cache_topic_update(rk, mdt); rd_kafka_wrunlock(rk); } } } /* Requested topics not seen in metadata? Propogate to topic code. */ if (missing_topics) { char *topic; rd_rkb_dbg(rkb, TOPIC, "METADATA", "%d/%d requested topic(s) seen in metadata", rd_list_cnt(requested_topics) - rd_list_cnt(missing_topics), rd_list_cnt(requested_topics)); for (i = 0 ; i < rd_list_cnt(missing_topics) ; i++) rd_rkb_dbg(rkb, TOPIC, "METADATA", "wanted %s", (char *)(missing_topics->rl_elems[i])); RD_LIST_FOREACH(topic, missing_topics, i) { shptr_rd_kafka_itopic_t *s_rkt; s_rkt = rd_kafka_topic_find(rkb->rkb_rk, topic, 1/*lock*/); if (s_rkt) { rd_kafka_topic_metadata_none( rd_kafka_topic_s2i(s_rkt)); rd_kafka_topic_destroy0(s_rkt); } } }
void rd_kafka_op_destroy (rd_kafka_op_t *rko) { switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) { case RD_KAFKA_OP_FETCH: rd_kafka_msg_destroy(NULL, &rko->rko_u.fetch.rkm); /* Decrease refcount on rkbuf to eventually rd_free shared buf*/ if (rko->rko_u.fetch.rkbuf) rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY); break; case RD_KAFKA_OP_OFFSET_FETCH: if (rko->rko_u.offset_fetch.partitions && rko->rko_u.offset_fetch.do_free) rd_kafka_topic_partition_list_destroy( rko->rko_u.offset_fetch.partitions); break; case RD_KAFKA_OP_OFFSET_COMMIT: RD_IF_FREE(rko->rko_u.offset_commit.partitions, rd_kafka_topic_partition_list_destroy); RD_IF_FREE(rko->rko_u.offset_commit.reason, rd_free); break; case RD_KAFKA_OP_SUBSCRIBE: case RD_KAFKA_OP_GET_SUBSCRIPTION: RD_IF_FREE(rko->rko_u.subscribe.topics, rd_kafka_topic_partition_list_destroy); break; case RD_KAFKA_OP_ASSIGN: case RD_KAFKA_OP_GET_ASSIGNMENT: RD_IF_FREE(rko->rko_u.assign.partitions, rd_kafka_topic_partition_list_destroy); break; case RD_KAFKA_OP_REBALANCE: RD_IF_FREE(rko->rko_u.rebalance.partitions, rd_kafka_topic_partition_list_destroy); break; case RD_KAFKA_OP_NAME: RD_IF_FREE(rko->rko_u.name.str, rd_free); break; case RD_KAFKA_OP_ERR: case RD_KAFKA_OP_CONSUMER_ERR: RD_IF_FREE(rko->rko_u.err.errstr, rd_free); rd_kafka_msg_destroy(NULL, &rko->rko_u.err.rkm); break; break; case RD_KAFKA_OP_THROTTLE: RD_IF_FREE(rko->rko_u.throttle.nodename, rd_free); break; case RD_KAFKA_OP_STATS: RD_IF_FREE(rko->rko_u.stats.json, rd_free); break; case RD_KAFKA_OP_XMIT_RETRY: case RD_KAFKA_OP_XMIT_BUF: case RD_KAFKA_OP_RECV_BUF: if (rko->rko_u.xbuf.rkbuf) rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY); RD_IF_FREE(rko->rko_u.xbuf.rkbuf, rd_kafka_buf_destroy); break; case RD_KAFKA_OP_DR: rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq); if (rko->rko_u.dr.do_purge2) rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq2); if (rko->rko_u.dr.s_rkt) rd_kafka_topic_destroy0(rko->rko_u.dr.s_rkt); break; case RD_KAFKA_OP_OFFSET_RESET: RD_IF_FREE(rko->rko_u.offset_reset.reason, rd_free); break; case RD_KAFKA_OP_METADATA: RD_IF_FREE(rko->rko_u.metadata.md, rd_kafka_metadata_destroy); break; case RD_KAFKA_OP_LOG: rd_free(rko->rko_u.log.str); break; default: break; } if (rko->rko_type & RD_KAFKA_OP_CB && rko->rko_op_cb) { rd_kafka_op_res_t res; /* Let callback clean up */ rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY; res = rko->rko_op_cb(rko->rko_rk, NULL, rko); assert(res != RD_KAFKA_OP_RES_YIELD); } RD_IF_FREE(rko->rko_rktp, rd_kafka_toppar_destroy); rd_kafka_replyq_destroy(&rko->rko_replyq); #if ENABLE_DEVEL if (rd_atomic32_sub(&rd_kafka_op_cnt, 1) < 0) rd_kafka_assert(NULL, !*"rd_kafka_op_cnt < 0"); #endif rd_free(rko); }