Exemple #1
0
uint64_t S3KeyReader::read(char* buf, uint64_t count) {
    uint64_t fileLen = this->offsetMgr.getKeySize();
    uint64_t readLen = 0;

    do {
        // confirm there is no more available data, done with this file
        if (this->transferredKeyLen >= fileLen) {
            return 0;
        }

        ChunkBuffer& buffer = chunkBuffers[this->curReadingChunk % this->numOfChunks];

        readLen = buffer.read(buf, count);

        CHECK_OR_DIE_MSG(!this->sharedError, "%s", this->sharedErrorMessage.c_str());

        this->transferredKeyLen += readLen;

        if (readLen < count) {
            this->curReadingChunk++;
            CHECK_OR_DIE_MSG(!buffer.isError(), "%s", "Error occurs while downloading, skip");
        }

        // retry to confirm whether thread reading is finished or chunk size is
        // divisible by get()'s buffer size
    } while (readLen == 0);

    return readLen;
}
Exemple #2
0
void S3KeyReader::open(const ReaderParams& params) {
    this->sharedError = false;
    this->sharedErrorMessage.clear();

    this->numOfChunks = params.getNumOfChunks();
    CHECK_OR_DIE_MSG(this->numOfChunks > 0, "%s", "numOfChunks must not be zero");

    this->region = params.getRegion();
    this->credential = params.getCred();

    this->offsetMgr.setKeySize(params.getKeySize());
    this->offsetMgr.setChunkSize(params.getChunkSize());

    CHECK_OR_DIE_MSG(params.getChunkSize() > 0, "%s", "chunk size must be greater than zero");

    for (uint64_t i = 0; i < this->numOfChunks; i++) {
        // when vector reallocate memory, it will copy object.
        // chunkData must be initialized after all copy.
        this->chunkBuffers.push_back(ChunkBuffer(params.getKeyUrl(), *this));
    }

    for (uint64_t i = 0; i < this->numOfChunks; i++) {
        this->chunkBuffers[i].init();
        this->chunkBuffers[i].setS3interface(this->s3interface);

        pthread_t thread;
        pthread_create(&thread, NULL, DownloadThreadFunc, &this->chunkBuffers[i]);
        this->threads.push_back(thread);
    }

    return;
}
Exemple #3
0
// Copy constructor will copy members, but chunkData must not be initialized before copy.
// otherwise when worked with vector it will be freed twice.
void ChunkBuffer::init() {
    CHECK_OR_DIE_MSG(chunkData == NULL, "%s", "Error: reinitializing chunkBuffer.");

    chunkData = new char[offsetMgr.getChunkSize()];
    CHECK_OR_DIE_MSG(chunkData != NULL, "%s", "Failed to allocate Buffer, no enough memory?");

    pthread_mutex_init(&this->statusMutex, NULL);
    pthread_cond_init(&this->statusCondVar, NULL);
}
Exemple #4
0
uint64_t CompressWriter::write(const char* buf, uint64_t count) {
    if (buf == NULL || count == 0) {
        return 0;
    }

    // we assume data block from upper layer is always smaller than gzip chunkbuffer
    CHECK_OR_DIE_MSG(count < S3_ZIP_COMPRESS_CHUNKSIZE,
                     "Data size " PRIu64 " is larger than S3_ZIP_COMPRESS_CHUNKSIZE", count);

    this->zstream.next_in = (Byte*)buf;
    this->zstream.avail_in = count;

    int status = deflate(&this->zstream, Z_NO_FLUSH);
    if (status < 0 && status != Z_BUF_ERROR) {
        deflateEnd(&this->zstream);
        CHECK_OR_DIE_MSG(false, "Failed to compress data: %d, %s", status, this->zstream.msg);
    }

    this->flush();

    return count;
}
Exemple #5
0
void InitRemoteLog() {
    if (loginited) {
        return;
    }

    s3ext_logsock_udp = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
    CHECK_OR_DIE_MSG(s3ext_logsock_udp != -1, "Failed to create socket: %s", strerror(errno));

    memset(&s3ext_logserveraddr, 0, sizeof(struct sockaddr_in));
    s3ext_logserveraddr.sin_family = AF_INET;
    s3ext_logserveraddr.sin_port = htons(s3ext_logserverport);
    inet_aton(s3ext_logserverhost.c_str(), &s3ext_logserveraddr.sin_addr);

    loginited = true;
}
Exemple #6
0
// ret < len means EMPTY
// that's why it checks if leftLen is larger than *or equal to* len below[1], provides a chance ret
// is 0, which is smaller than len. Otherwise, other functions won't know when to read next buffer.
uint64_t ChunkBuffer::read(char* buf, uint64_t len) {
    // QueryCancelPending stops s3_import(), this check is not needed if
    // s3_import() every time calls ChunkBuffer->Read() only once, otherwise(as we did in
    // downstreamReader->read() for decompression feature before), first call sets buffer to
    // ReadyToFill, second call hangs.
    CHECK_OR_DIE_MSG(!QueryCancelPending, "%s", "ChunkBuffer reading is interrupted by GPDB");

    pthread_mutex_lock(&this->statusMutex);
    while (this->status != ReadyToRead) {
        pthread_cond_wait(&this->statusCondVar, &this->statusMutex);
    }

    // Error is shared between all chunks.
    if (this->isError()) {
        pthread_mutex_unlock(&this->statusMutex);
        // Don't throw here. Other chunks will set the shared error message,
        // it will be handled by S3KeyReader.
        return 0;
    }

    uint64_t leftLen = this->chunkDataSize - this->curChunkOffset;
    uint64_t lenToRead = std::min(len, leftLen);

    if (lenToRead != 0) {
        memcpy(buf, this->chunkData + this->curChunkOffset, lenToRead);
    }

    if (len <= leftLen) {                   // [1]
        this->curChunkOffset += lenToRead;  // not empty
    } else {                                // empty, reset everything
        this->curChunkOffset = 0;

        if (!this->isEOF()) {
            this->status = ReadyToFill;

            Range range = this->offsetMgr.getNextOffset();
            this->curFileOffset = range.offset;
            this->chunkDataSize = range.length;

            pthread_cond_signal(&this->statusCondVar);
        }
    }

    pthread_mutex_unlock(&this->statusMutex);

    return lenToRead;
}
Exemple #7
0
void S3CommonReader::open(const ReaderParams &params) {
    this->keyReader.setS3interface(s3service);
    S3CompressionType compressionType =
        s3service->checkCompressionType(params.getKeyUrl(), params.getRegion(), params.getCred());
    switch (compressionType) {
        case S3_COMPRESSION_GZIP:
            this->upstreamReader = &this->decompressReader;
            this->decompressReader.setReader(&this->keyReader);
            break;
        case S3_COMPRESSION_PLAIN:
            this->upstreamReader = &this->keyReader;
            break;
        default:
            CHECK_OR_DIE_MSG(false, "%s", "unknown file type");
    };
    this->upstreamReader->open(params);
}
Exemple #8
0
void CompressWriter::open(const WriterParams& params) {
    this->zstream.zalloc = Z_NULL;
    this->zstream.zfree = Z_NULL;
    this->zstream.opaque = Z_NULL;

    // With S3_DEFLATE_WINDOWSBITS, it generates gzip stream with header and trailer
    int ret = deflateInit2(&this->zstream, Z_DEFAULT_COMPRESSION, Z_DEFLATED,
                           S3_DEFLATE_WINDOWSBITS, 8, Z_DEFAULT_STRATEGY);

    // init them here to get ready for both writer() and close()
    this->zstream.next_in = NULL;
    this->zstream.avail_in = 0;
    this->zstream.next_out = (Byte*)this->out;
    this->zstream.avail_out = S3_ZIP_COMPRESS_CHUNKSIZE;

    CHECK_OR_DIE_MSG(ret == Z_OK, "Failed to initialize zlib library: %s", this->zstream.msg);

    this->writer->open(params);
    this->isClosed = false;
}
Exemple #9
0
void CompressWriter::close() {
    if (this->isClosed) {
        return;
    }

    int status;
    do {
        status = deflate(&this->zstream, Z_FINISH);
        this->flush();
    } while (status == Z_OK);

    deflateEnd(&this->zstream);

    if (status != Z_STREAM_END) {
        CHECK_OR_DIE_MSG(false, "Failed to finish data compression: %d, %s", status,
                         this->zstream.msg);
    }

    S3DEBUG("Compression finished: Z_STREAM_END.");

    this->writer->close();
    this->isClosed = true;
}