/** * @brief This is called prior to parsing the ProduceResponse, * we use it to inject errors. * * @locality an internal rdkafka thread */ static rd_kafka_resp_err_t handle_ProduceResponse (rd_kafka_t *rk, int32_t brokerid, uint64_t msgseq, rd_kafka_resp_err_t err) { rd_kafka_resp_err_t new_err = err; int n; if (err == RD_KAFKA_RESP_ERR__RETRY) return err; /* Skip internal retries, such as triggered by * rd_kafka_broker_bufq_purge_by_toppar() */ n = rd_atomic32_add(&state.produce_cnt, 1); /* Let the first N ProduceRequests fail with request timeout. * Do allow the first request through. */ if (n > 1 && n <= state.initial_fail_batch_cnt) { if (err) TEST_WARN("First %d ProduceRequests should not " "have failed, this is #%d with error %s for " "brokerid %"PRId32" and msgseq %"PRIu64"\n", state.initial_fail_batch_cnt, n, rd_kafka_err2name(err), brokerid, msgseq); assert(!err && *"First N ProduceRequests should not have failed"); new_err = RD_KAFKA_RESP_ERR__TIMED_OUT; } TEST_SAY("handle_ProduceResponse(broker %"PRId32 ", MsgSeq %"PRId64", Error %s) -> new Error %s\n", brokerid, msgseq, rd_kafka_err2name(err), rd_kafka_err2name(new_err)); return new_err; }
rd_kafka_op_t *rd_kafka_op_new (rd_kafka_op_type_t type) { rd_kafka_op_t *rko; rko = rd_calloc(1, sizeof(*rko)); rko->rko_type = type; rd_atomic32_add(&rd_kafka_op_cnt, 1); return rko; }
rd_kafka_op_t *rd_kafka_op_new0 (const char *source, rd_kafka_op_type_t type) { rd_kafka_op_t *rko; static const size_t op2size[RD_KAFKA_OP__END] = { [RD_KAFKA_OP_FETCH] = sizeof(rko->rko_u.fetch), [RD_KAFKA_OP_ERR] = sizeof(rko->rko_u.err), [RD_KAFKA_OP_CONSUMER_ERR] = sizeof(rko->rko_u.err), [RD_KAFKA_OP_DR] = sizeof(rko->rko_u.dr), [RD_KAFKA_OP_STATS] = sizeof(rko->rko_u.stats), [RD_KAFKA_OP_OFFSET_COMMIT] = sizeof(rko->rko_u.offset_commit), [RD_KAFKA_OP_NODE_UPDATE] = sizeof(rko->rko_u.node), [RD_KAFKA_OP_XMIT_BUF] = sizeof(rko->rko_u.xbuf), [RD_KAFKA_OP_RECV_BUF] = sizeof(rko->rko_u.xbuf), [RD_KAFKA_OP_XMIT_RETRY] = sizeof(rko->rko_u.xbuf), [RD_KAFKA_OP_FETCH_START] = sizeof(rko->rko_u.fetch_start), [RD_KAFKA_OP_FETCH_STOP] = 0, [RD_KAFKA_OP_SEEK] = sizeof(rko->rko_u.fetch_start), [RD_KAFKA_OP_PAUSE] = sizeof(rko->rko_u.pause), [RD_KAFKA_OP_OFFSET_FETCH] = sizeof(rko->rko_u.offset_fetch), [RD_KAFKA_OP_PARTITION_JOIN] = 0, [RD_KAFKA_OP_PARTITION_LEAVE] = 0, [RD_KAFKA_OP_REBALANCE] = sizeof(rko->rko_u.rebalance), [RD_KAFKA_OP_TERMINATE] = 0, [RD_KAFKA_OP_COORD_QUERY] = 0, [RD_KAFKA_OP_SUBSCRIBE] = sizeof(rko->rko_u.subscribe), [RD_KAFKA_OP_ASSIGN] = sizeof(rko->rko_u.assign), [RD_KAFKA_OP_GET_SUBSCRIPTION] = sizeof(rko->rko_u.subscribe), [RD_KAFKA_OP_GET_ASSIGNMENT] = sizeof(rko->rko_u.assign), [RD_KAFKA_OP_THROTTLE] = sizeof(rko->rko_u.throttle), [RD_KAFKA_OP_NAME] = sizeof(rko->rko_u.name), [RD_KAFKA_OP_OFFSET_RESET] = sizeof(rko->rko_u.offset_reset), [RD_KAFKA_OP_METADATA] = sizeof(rko->rko_u.metadata), [RD_KAFKA_OP_LOG] = sizeof(rko->rko_u.log), [RD_KAFKA_OP_WAKEUP] = 0, }; size_t tsize = op2size[type & ~RD_KAFKA_OP_FLAGMASK]; rko = rd_calloc(1, sizeof(*rko)-sizeof(rko->rko_u)+tsize); rko->rko_type = type; #if ENABLE_DEVEL rko->rko_source = source; rd_atomic32_add(&rd_kafka_op_cnt, 1); #endif return rko; }