Exemple #1
0
void S3KeyWriter::checkQueryCancelSignal() {
    if (S3QueryIsAbortInProgress() && !this->uploadId.empty()) {
        // to avoid dead-lock when other upload threads hold the lock
        pthread_mutex_unlock(&this->mutex);

        // wait for all threads to complete
        for (size_t i = 0; i < threadList.size(); i++) {
            pthread_join(threadList[i], NULL);
        }
        this->threadList.clear();

        // to avoid double unlock as other parts may lock it
        pthread_mutex_lock(&this->mutex);

        S3DEBUG("Start aborting multipart uploading (uploadID: %s, %lu parts uploaded)",
                this->uploadId.c_str(), this->etagList.size());
        this->s3Interface->abortUpload(this->params.getS3Url(), this->uploadId);
        S3DEBUG("Finished aborting multipart uploading (uploadID: %s)", this->uploadId.c_str());

        this->etagList.clear();
        this->uploadId.clear();

        S3_DIE(S3QueryAbort, "Uploading is interrupted");
    }
}
Exemple #2
0
void* DownloadThreadFunc(void* data) {
    ChunkBuffer* buffer = static_cast<ChunkBuffer*>(data);

    uint64_t filledSize = 0;
    S3DEBUG("Downloading thread starts");
    do {
        if (QueryCancelPending) {
            S3INFO("Downloading thread is interrupted by GPDB");

            // error is shared between all chunks, so all chunks will stop.
            buffer->setSharedError(true, "Downloading thread is interrupted by GPDB");

            // have to unlock ChunkBuffer::read in some certain conditions, for instance, status is
            // not ReadyToRead, and read() is waiting for signal stat_cond.
            buffer->setStatus(ReadyToRead);
            pthread_cond_signal(buffer->getStatCond());

            return NULL;
        }

        filledSize = buffer->fill();

        if (filledSize != 0) {
            if (buffer->isError()) {
                S3DEBUG("Failed to fill downloading buffer");
                break;
            } else {
                S3DEBUG("Size of filled data is %" PRIu64, filledSize);
            }
        }
    } while (!buffer->isEOF());
    S3DEBUG("Downloading thread ended");
    return NULL;
}
Exemple #3
0
void S3KeyWriter::completeKeyWriting() {
    // make sure the buffer is clear
    this->flushBuffer();

    // wait for all threads to complete
    for (size_t i = 0; i < threadList.size(); i++) {
        pthread_join(threadList[i], NULL);
    }
    this->threadList.clear();

    this->checkQueryCancelSignal();

    vector<string> etags;
    // it is equivalent to foreach(e in etagList) push_back(e.second);
    // transform(etagList.begin(), etagList.end(), etags.begin(),
    //          [](std::pair<const uint64_t, string>& p) { return p.second; });
    etags.reserve(etagList.size());

    for (map<uint64_t, string>::iterator i = etagList.begin(); i != etagList.end(); i++) {
        etags.push_back(i->second);
    }

    if (!this->etagList.empty() && !this->uploadId.empty()) {
        this->s3Interface->completeMultiPart(this->params.getS3Url(), this->uploadId, etags);
    }

    S3DEBUG("Segment %d has finished uploading \"%s\"", s3ext_segid,
            this->params.getS3Url().getFullUrlForCurl().c_str());

    this->buffer.clear();
    this->etagList.clear();
    this->uploadId.clear();
}
Exemple #4
0
uint64_t S3BucketReader::read(char* buf, uint64_t count) {
    S3_CHECK_OR_DIE(this->upstreamReader != NULL, S3RuntimeError, "upstreamReader is NULL");
    uint64_t readCount = 0;
    while (true) {
        if (this->needNewReader) {
            if (this->keyIndex >= this->keyList.contents.size()) {
                S3DEBUG("Read finished for segment: %d", s3ext_segid);
                return 0;
            }
            BucketContent& key = this->getNextKey();

            this->upstreamReader->open(constructReaderParams(key));
            this->needNewReader = false;

            // ignore header line if it is not the first file
            if (hasHeader && !this->isFirstFile) {
                readCount = readWithoutHeaderLine(buf, count);
                if (readCount != 0) {
                    return readCount;
                }
            }
        }

        readCount = this->upstreamReader->read(buf, count);
        if (readCount != 0) {
            return readCount;
        }

        // Finished one file, continue to next
        this->upstreamReader->close();
        this->needNewReader = true;
        this->isFirstFile = false;
    }
}
Exemple #5
0
// returning uint64_t(-1) means error
uint64_t ChunkBuffer::fill() {
    UniqueLock statusLock(&this->statusMutex);

    while (this->status != ReadyToFill) {
        pthread_cond_wait(&this->statusCondVar, &this->statusMutex);
    }

    if (S3QueryIsAbortInProgress() || this->isError()) {
        this->setSharedError(true);
        this->status = ReadyToRead;
        pthread_cond_signal(&this->statusCondVar);
        return -1;
    }

    uint64_t offset = this->curFileOffset;
    uint64_t leftLen = this->chunkDataSize;

    uint64_t readLen = 0;

    if (leftLen != 0) {
        try {
            readLen = this->s3Interface->fetchData(offset, this->chunkData, leftLen, this->s3Url);
            if (readLen != leftLen) {
                S3DEBUG("Failed to fetch expected data from S3");
                this->setSharedError(true, S3PartialResponseError(leftLen, readLen));
            } else {
                S3DEBUG("Got %" PRIu64 " bytes from S3", readLen);
            }
        } catch (S3Exception& e) {
            S3DEBUG("Failed to fetch expected data from S3");
            this->setSharedError(true);
        }
    }

    if (offset + leftLen >= offsetMgr.getKeySize()) {
        readLen = 0;  // Nothing to read, EOF
        S3DEBUG("Reached the end of file");
        this->eof = true;
    }

    this->status = ReadyToRead;
    pthread_cond_signal(&this->statusCondVar);

    return (this->isError()) ? -1 : readLen;
}
Exemple #6
0
// returning -1 means error
uint64_t ChunkBuffer::fill() {
    pthread_mutex_lock(&this->statusMutex);
    while (this->status != ReadyToFill) {
        pthread_cond_wait(&this->statusCondVar, &this->statusMutex);
    }

    uint64_t offset = this->curFileOffset;
    uint64_t leftLen = this->chunkDataSize;

    uint64_t readLen = 0;

    if (leftLen != 0) {
        try {
            readLen = this->s3interface->fetchData(
                offset, this->chunkData, leftLen, this->sourceUrl,
                this->sharedKeyReader.getRegion(), this->sharedKeyReader.getCredential());
        } catch (std::exception& e) {
            S3DEBUG("Failed to fetch expected data from S3");
            this->setSharedError(true, e.what());
        }

        if (readLen != leftLen) {
            S3DEBUG("Failed to fetch expected data from S3");
            this->setSharedError(true, "Failed to fetch expected data from S3");
        } else {
            S3DEBUG("Got %" PRIu64 " bytes from S3", readLen);
        }
    }

    if (offset + leftLen >= offsetMgr.getKeySize()) {
        readLen = 0;  // Nothing to read, EOF
        S3DEBUG("Reached the end of file");
        this->eof = true;
    }

    this->status = ReadyToRead;
    pthread_cond_signal(&this->statusCondVar);
    pthread_mutex_unlock(&this->statusMutex);

    return (this->isError()) ? -1 : readLen;
}
Exemple #7
0
void S3KeyWriter::open(const S3Params& params) {
    this->params = params;

    S3_CHECK_OR_DIE(this->s3Interface != NULL, S3RuntimeError, "s3Interface must not be NULL");
    S3_CHECK_OR_DIE(this->params.getChunkSize() > 0, S3RuntimeError, "chunkSize must not be zero");

    buffer.reserve(this->params.getChunkSize());

    this->uploadId = this->s3Interface->getUploadId(this->params.getS3Url());
    S3_CHECK_OR_DIE(!this->uploadId.empty(), S3RuntimeError, "Failed to get upload id");

    S3DEBUG("key: %s, upload id: %s", this->params.getS3Url().getFullUrlForCurl().c_str(),
            this->uploadId.c_str());
}
Exemple #8
0
S3Params S3BucketReader::constructReaderParams(BucketContent& key) {
    // encode the key name but leave the "/"
    // "/encoded_path/encoded_name"
    string keyEncoded = UriEncode(key.getName());
    FindAndReplace(keyEncoded, "%2F", "/");

    S3Params readerParams = this->params.setPrefix(keyEncoded);

    readerParams.setKeySize(key.getSize());

    S3DEBUG("key: %s, size: %" PRIu64, readerParams.getS3Url().getFullUrlForCurl().c_str(),
            readerParams.getKeySize());
    return readerParams;
}
Exemple #9
0
void* S3KeyWriter::UploadThreadFunc(void* data) {
    MaskThreadSignals();

    ThreadParams* params = (ThreadParams*)data;
    S3KeyWriter* writer = params->keyWriter;

    try {
        S3DEBUG("Upload thread start: %" PRIX64 ", part number: %" PRIu64 ", data size: %zu",
                (uint64_t) pthread_self(), params->currentNumber, params->data.size());
        string etag = writer->s3Interface->uploadPartOfData(
            params->data, writer->params.getS3Url(), params->currentNumber, writer->uploadId);

        // when unique_lock destructs it will automatically unlock the mutex.
        UniqueLock threadLock(&writer->mutex);

        // etag is empty if the query is cancelled by user.
        if (!etag.empty()) {
            writer->etagList[params->currentNumber] = etag;
        }
        writer->activeThreads--;
        pthread_cond_broadcast(&writer->cv);
        S3DEBUG("Upload part finish: %" PRIX64 ", eTag: %s, part number: %" PRIu64, (uint64_t) pthread_self(),
                etag.c_str(), params->currentNumber);
    } catch (S3Exception& e) {
        S3ERROR("Upload thread error: %s", e.getMessage().c_str());
        UniqueLock exceptLock(&writer->exceptionMutex);
        writer->sharedError = true;
        writer->sharedException = std::current_exception();

        // notify the flushBuffer, otherwise it will be locked when trying to create a new thread.
        writer->activeThreads--;
        pthread_cond_broadcast(&writer->cv);
    }

    delete params;
    return NULL;
}
S3Params S3BucketReader::constructReaderParams(BucketContent& key) {
    S3Params readerParams = this->params;

    // encode the key name but leave the "/"
    // "/encoded_path/encoded_name"
    string keyEncoded = uri_encode(key.getName());
    find_replace(keyEncoded, "%2F", "/");

    readerParams.setKeyUrl(this->getKeyURL(keyEncoded));
    readerParams.setRegion(this->region);
    readerParams.setKeySize(key.getSize());

    S3DEBUG("key: %s, size: %" PRIu64, readerParams.getKeyUrl().c_str(), readerParams.getKeySize());
    return readerParams;
}
Exemple #11
0
void CompressWriter::close() {
    if (this->isClosed) {
        return;
    }

    int status;
    do {
        status = deflate(&this->zstream, Z_FINISH);
        this->flush();
    } while (status == Z_OK);

    deflateEnd(&this->zstream);

    if (status != Z_STREAM_END) {
        CHECK_OR_DIE_MSG(false, "Failed to finish data compression: %d, %s", status,
                         this->zstream.msg);
    }

    S3DEBUG("Compression finished: Z_STREAM_END.");

    this->writer->close();
    this->isClosed = true;
}
Exemple #12
0
void CompressWriter::close() {
    if (this->isClosed) {
        return;
    }

    int status;
    do {
        status = deflate(&this->zstream, Z_FINISH);
        this->flush();
    } while (status == Z_OK);

    deflateEnd(&this->zstream);

    if (status != Z_STREAM_END) {
        S3_CHECK_OR_DIE(false, S3RuntimeError,
                        string("Failed to compress data: ") +
                            std::to_string((unsigned long long)status) + ", " + this->zstream.msg);
    }

    S3DEBUG("Compression finished: Z_STREAM_END.");

    this->writer->close();
    this->isClosed = true;
}