void dr_cb (RdKafka::Message &message) {
    if (message.err()) {
      state.producer.numErr++;
      errorString("producer_send_error", message.errstr(),
		  message.topic_name(),
		  message.key(),
		  std::string(static_cast<const char*>(message.payload()),
			      message.len()));
    } else {
      successString("producer_send_success",
		    message.topic_name(),
		    (int)message.partition(),
		    message.offset(),
		    message.key(),
		    std::string(static_cast<const char*>(message.payload()),
				message.len()));
      state.producer.numAcked++;
    }
  }
Пример #2
0
    const void* KafkaStreamedDataset::nextRow()
    {
        const void* result = NULL;
        __int32 maxAttempts = 10;   //!< Maximum number of tries if local queue is full
        __int32 timeoutWait = 100;  //!< Amount of time to wait between retries
        __int32 attemptNum = 0;

        if (maxRecords <= 0 || consumedRecCount < maxRecords)
        {
            RdKafka::Message* messageObjPtr = NULL;
            bool messageConsumed = false;

            while (!messageConsumed && shouldRead && attemptNum < maxAttempts)
            {
                messageObjPtr = consumerPtr->getOneMessage(); // messageObjPtr must be deleted when we are through with it

                if (messageObjPtr)
                {
                    try
                    {
                        switch (messageObjPtr->err())
                        {
                            case RdKafka::ERR_NO_ERROR:
                                {
                                    RtlDynamicRowBuilder rowBuilder(resultAllocator);
                                    unsigned len = sizeof(__int32) + sizeof(__int64) + sizeof(size32_t) + messageObjPtr->len();
                                    byte* row = rowBuilder.ensureCapacity(len, NULL);

                                    // Populating this structure:
                                    //  EXPORT KafkaMessage := RECORD
                                    //      UNSIGNED4   partitionNum;
                                    //      UNSIGNED8   offset;
                                    //      STRING      message;
                                    //  END;

                                    *(__int32*)(row) = messageObjPtr->partition();
                                    *(__int64*)(row + sizeof(__int32)) = messageObjPtr->offset();
                                    *(size32_t*)(row + sizeof(__int32) + sizeof(__int64)) = messageObjPtr->len();
                                    memcpy(row + sizeof(__int32) + sizeof(__int64) + sizeof(size32_t), messageObjPtr->payload(), messageObjPtr->len());

                                    result = rowBuilder.finalizeRowClear(len);

                                    lastMsgOffset = messageObjPtr->offset();
                                    ++consumedRecCount;

                                    // Give opportunity for consumer to pull in any additional messages
                                    consumerPtr->handle()->poll(0);

                                    // Mark as loaded so we don't retry
                                    messageConsumed = true;
                                }
                                break;

                            case RdKafka::ERR__TIMED_OUT:
                                // No new messages arrived and we timed out waiting
                                ++attemptNum;
                                consumerPtr->handle()->poll(timeoutWait);
                                break;

                            case RdKafka::ERR__PARTITION_EOF:
                                // We reached the end of the messages in the partition
                                if (traceLevel > 4)
                                {
                                    DBGLOG("Kafka: EOF reading message from partition %d", messageObjPtr->partition());
                                }
                                shouldRead = false;
                                break;

                            case RdKafka::ERR__UNKNOWN_PARTITION:
                                // Unknown partition; don't throw an error here because
                                // in some configurations (e.g. more Thor slaves than
                                // partitions) not all consumers will have a partition
                                // to read
                                if (traceLevel > 4)
                                {
                                    DBGLOG("Kafka: Unknown partition while trying to read");
                                }
                                shouldRead = false;
                                break;

                            case RdKafka::ERR__UNKNOWN_TOPIC:
                                throw MakeStringException(-1, "Kafka: Error while reading message: '%s'", messageObjPtr->errstr().c_str());
                                break;
                        }
                    }
                    catch (...)
                    {
                        delete(messageObjPtr);
                        throw;
                    }

                    delete(messageObjPtr);
                    messageObjPtr = NULL;
                }
            }
        }

        return result;
    }