GpgME::KeyListResult Kleo::QGpgMEKeyListJob::exec( const QStringList & pats, bool secretOnly, std::vector<GpgME::Key> & keys ) { setup( pats, secretOnly ); // The communication channel between gpgme and gpgsm is limited in // the number of patterns that can be transported, but they won't // say to how much, so we need to find out ourselves if we get a // LINE_TOO_LONG error back... // We could of course just feed them single patterns, and that would // probably be easier, but the performance penalty would currently // be noticable. for (;;) { keys.clear(); mResult = attemptSyncKeyListing( keys ); if ( !mResult.error() || mResult.error().code() != GPG_ERR_LINE_TOO_LONG ) return mResult; // got LINE_TOO_LONG, try a smaller chunksize: setChunkSize( chunkSize()/2 ); if ( chunkSize() < 1 ) // chunks smaller than one can't be -> return the error. return mResult; kdDebug(5150) << "QGpgMEKeyListJob::exec(): retrying keylisting with chunksize " << chunkSize() << endl; } kdFatal(5150) << "QGpgMEKeyListJob::exec(): Oops, this is not supposed to happen!" << endl; return GpgME::KeyListResult(); }
void PropagateUploadFileQNAM::slotUploadProgress(qint64 sent, qint64 total) { // Completion is signaled with sent=0, total=0; avoid accidentally // resetting progress due to the sent being zero by ignoring it. // finishedSignal() is bound to be emitted soon anyway. // See https://bugreports.qt.io/browse/QTBUG-44782. if (sent == 0 && total == 0) { return; } int progressChunk = _currentChunk + _startChunk - 1; if (progressChunk >= _chunkCount) progressChunk = _currentChunk - 1; // amount is the number of bytes already sent by all the other chunks that were sent // not including this one. // FIXME: this assumes all chunks have the same size, which is true only if the last chunk // has not been finished (which should not happen because the last chunk is sent sequentially) quint64 amount = progressChunk * chunkSize(); sender()->setProperty("byteWritten", sent); if (_jobs.count() > 1) { amount -= (_jobs.count() -1) * chunkSize(); foreach (QObject *j, _jobs) { amount += j->property("byteWritten").toULongLong(); }
GpgME::Error Kleo::QGpgMEKeyListJob::start( const QStringList & pats, bool secretOnly ) { setup( pats, secretOnly ); hookupContextToEventLoopInteractor(); connect( QGpgME::EventLoopInteractor::instance(), SIGNAL(nextKeyEventSignal(GpgME::Context*,const GpgME::Key&)), SLOT(slotNextKeyEvent(GpgME::Context*,const GpgME::Key&)) ); // The communication channel between gpgme and gpgsm is limited in // the number of patterns that can be transported, but they won't // say to how much, so we need to find out ourselves if we get a // LINE_TOO_LONG error back... // We could of course just feed them single patterns, and that would // probably be easier, but the performance penalty would currently // be noticable. while ( const GpgME::Error err = mCtx->startKeyListing( patterns(), mSecretOnly ) ) { if ( err.code() == GPG_ERR_LINE_TOO_LONG ) { setChunkSize( chunkSize()/2 ); if ( chunkSize() >= 1 ) { kdDebug(5150) << "QGpgMEKeyListJob::start(): retrying keylisting with chunksize " << chunkSize() << endl; continue; } } deleteLater(); mResult = GpgME::KeyListResult( 0, err ); return err; } mResult = GpgME::KeyListResult( 0, 0 ); return 0; }
void PropagateUploadFileQNAM::start() { if (_propagator->_abortRequested.fetchAndAddRelaxed(0)) return; _file = new QFile(_propagator->_localDir + _item._file, this); if (!_file->open(QIODevice::ReadOnly)) { done(SyncFileItem::NormalError, _file->errorString()); delete _file; return; } quint64 fileSize = _file->size(); _chunkCount = std::ceil(fileSize/double(chunkSize())); _startChunk = 0; _transferId = qrand() ^ _item._modtime ^ (_item._size << 16); const SyncJournalDb::UploadInfo progressInfo = _propagator->_journal->getUploadInfo(_item._file); if (progressInfo._valid && Utility::qDateTimeToTime_t(progressInfo._modtime) == _item._modtime ) { _startChunk = progressInfo._chunk; _transferId = progressInfo._transferid; qDebug() << Q_FUNC_INFO << _item._file << ": Resuming from chunk " << _startChunk; } _currentChunk = 0; _duration.start(); _propagator->_activeJobs++; emit progress(_item, 0); emitReady(); this->startNextChunk(); }
void PropagateUploadFileQNAM::slotStartUpload(const QByteArray& transmissionChecksumType, const QByteArray& transmissionChecksum) { _transmissionChecksum = transmissionChecksum; _transmissionChecksumType = transmissionChecksumType; if (_item->_contentChecksum.isEmpty() && _item->_contentChecksumType.isEmpty()) { // If the _contentChecksum was not set, reuse the transmission checksum as the content checksum. _item->_contentChecksum = transmissionChecksum; _item->_contentChecksumType = transmissionChecksumType; } const QString fullFilePath = _propagator->getFilePath(_item->_file); if (!FileSystem::fileExists(fullFilePath)) { done(SyncFileItem::SoftError, tr("File Removed")); return; } _stopWatch.addLapTime(QLatin1String("TransmissionChecksum")); time_t prevModtime = _item->_modtime; // the _item value was set in PropagateUploadFileQNAM::start() // but a potential checksum calculation could have taken some time during which the file could // have been changed again, so better check again here. _item->_modtime = FileSystem::getModTime(fullFilePath); if( prevModtime != _item->_modtime ) { _propagator->_anotherSyncNeeded = true; done(SyncFileItem::SoftError, tr("Local file changed during syncing. It will be resumed.")); return; } quint64 fileSize = FileSystem::getSize(fullFilePath); _item->_size = fileSize; // But skip the file if the mtime is too close to 'now'! // That usually indicates a file that is still being changed // or not yet fully copied to the destination. if (fileIsStillChanging(*_item)) { _propagator->_anotherSyncNeeded = true; done(SyncFileItem::SoftError, tr("Local file changed during sync.")); return; } _chunkCount = std::ceil(fileSize/double(chunkSize())); _startChunk = 0; _transferId = qrand() ^ _item->_modtime ^ (_item->_size << 16); const SyncJournalDb::UploadInfo progressInfo = _propagator->_journal->getUploadInfo(_item->_file); if (progressInfo._valid && Utility::qDateTimeToTime_t(progressInfo._modtime) == _item->_modtime ) { _startChunk = progressInfo._chunk; _transferId = progressInfo._transferid; qDebug() << Q_FUNC_INFO << _item->_file << ": Resuming from chunk " << _startChunk; } _currentChunk = 0; _duration.start(); emit progress(*_item, 0); this->startNextChunk(); }
virtual qint64 readData(char* data, qint64 maxlen) { maxlen = qMin(maxlen, chunkSize() - _read); if (maxlen == 0) return 0; qint64 ret = _file->read(data, maxlen); _read += ret; return ret; }
UploadDevice::UploadDevice(QIODevice *file, qint64 start, qint64 size, BandwidthManager *bwm) : QIODevice(file), _file(file), _read(0), _size(size), _start(start), _bandwidthManager(bwm), _bandwidthQuota(0), _readWithProgress(0), _bandwidthLimited(false), _choked(false) { qDebug() << Q_FUNC_INFO << start << size << chunkSize(); _bandwidthManager->registerUploadDevice(this); _file = QPointer<QIODevice>(file); }
BSONObj SettingsType::toBSON() const { BSONObjBuilder builder; if (_key) builder.append(key(), getKey()); if (_chunkSize) builder.append(chunkSize(), getChunkSize()); if (_balancerStopped) builder.append(balancerStopped(), getBalancerStopped()); if (_secondaryThrottle) { builder.append(deprecated_secondaryThrottle(), getSecondaryThrottle()); } if (_migrationWriteConcern) { builder.append(migrationWriteConcern(), getMigrationWriteConcern().toBSON()); } if (_waitForDelete) builder.append(waitForDelete(), getWaitForDelete()); return builder.obj(); }
void PropagateUploadFileQNAM::start() { if (_propagator->_abortRequested.fetchAndAddRelaxed(0)) return; _file = new QFile(_propagator->getFilePath(_item._file), this); if (!_file->open(QIODevice::ReadOnly)) { done(SyncFileItem::NormalError, _file->errorString()); delete _file; return; } // Update the mtime and size, it might have changed since discovery. _item._modtime = FileSystem::getModTime(_file->fileName()); quint64 fileSize = _file->size(); _item._size = fileSize; // But skip the file if the mtime is too close to 'now'! // That usually indicates a file that is still being changed // or not yet fully copied to the destination. QDateTime modtime = Utility::qDateTimeFromTime_t(_item._modtime); if (modtime.msecsTo(QDateTime::currentDateTime()) < minFileAgeForUpload) { _propagator->_anotherSyncNeeded = true; done(SyncFileItem::SoftError, tr("Local file changed during sync.")); delete _file; return; } _chunkCount = std::ceil(fileSize/double(chunkSize())); _startChunk = 0; _transferId = qrand() ^ _item._modtime ^ (_item._size << 16); const SyncJournalDb::UploadInfo progressInfo = _propagator->_journal->getUploadInfo(_item._file); if (progressInfo._valid && Utility::qDateTimeToTime_t(progressInfo._modtime) == _item._modtime ) { _startChunk = progressInfo._chunk; _transferId = progressInfo._transferid; qDebug() << Q_FUNC_INFO << _item._file << ": Resuming from chunk " << _startChunk; } _currentChunk = 0; _duration.start(); emit progress(_item, 0); this->startNextChunk(); }
int WaveFile::open(const char *path) { int status = RiffFile::open(path); if(status != 0) return status; char s[5]; FOURCC2STR(formatCode(), s); printf("type = %s\n", s); if(formatCode() != STR2FOURCC("WAVE")) { return 1; // error } // Jump into the "fmt " chunk if(push(STR2FOURCC("fmt "))) { memset(&mHeader, 0, sizeof(mHeader)); UInt32 fmtSize = chunkSize(); if(RiffFile::readData(&mHeader, fmtSize) != fmtSize) { return 1; // error } assert(mHeader.formatTag == 1); // only uncompressed files are supported // Some writers put a bad block align and/or avgBytesPerSecond value. // Let's recalculate it. mHeader.bytesPerFrame = mHeader.bitsPerSample / 8 * mHeader.numChannels; mHeader.averageBytesPerSecond = mHeader.bytesPerFrame * mHeader.samplesPerSecond; } // Jump out of the "fmt " chunk pop(); // Jump into the "data" chunk if(!push(STR2FOURCC("data"))) { return 1; // error, couldn't find the data chunk } return 0; // success }
void PropagateUploadFileQNAM::start() { if (_propagator->_abortRequested.fetchAndAddRelaxed(0)) { return; } const QString fullFilePath(_propagator->getFilePath(_item._file)); if (!FileSystem::fileExists(fullFilePath)) { done(SyncFileItem::SoftError, tr("File Removed")); return; } // Update the mtime and size, it might have changed since discovery. _item._modtime = FileSystem::getModTime(fullFilePath); quint64 fileSize = FileSystem::getSize(fullFilePath); _item._size = fileSize; // But skip the file if the mtime is too close to 'now'! // That usually indicates a file that is still being changed // or not yet fully copied to the destination. if (fileIsStillChanging(_item)) { _propagator->_anotherSyncNeeded = true; done(SyncFileItem::SoftError, tr("Local file changed during sync.")); return; } _chunkCount = std::ceil(fileSize/double(chunkSize())); _startChunk = 0; _transferId = qrand() ^ _item._modtime ^ (_item._size << 16); const SyncJournalDb::UploadInfo progressInfo = _propagator->_journal->getUploadInfo(_item._file); if (progressInfo._valid && Utility::qDateTimeToTime_t(progressInfo._modtime) == _item._modtime ) { _startChunk = progressInfo._chunk; _transferId = progressInfo._transferid; qDebug() << Q_FUNC_INFO << _item._file << ": Resuming from chunk " << _startChunk; } _currentChunk = 0; _duration.start(); emit progress(_item, 0); this->startNextChunk(); }
//! Write the image (internal) void SpectrumRecorder::useBuffer() { // libvlc_media_player_get_length/time if(mCurrentRow < mMaxHeight) { mExtr.extract(mBuffer, chunkSize(), mChannels); mExtr.normalize(255); for(int i = 0; i < mWindowSize / 2; ++i) { int val = mExtr.spectrum()[i]; if(val > 255) val = 255; mOut.setPixel(i, mCurrentRow, qRgb(0, val, 0)); } mCurrentRow++; } else { sequenceEnds(); } }
void PropagateUploadFileQNAM::startNextChunk() { if (_propagator->_abortRequested.fetchAndAddRelaxed(0)) return; if (! _jobs.isEmpty() && _currentChunk + _startChunk >= _chunkCount - 1) { // Don't do parallel upload of chunk if this might be the last chunk because the server cannot handle that // https://github.com/owncloud/core/issues/11106 // We return now and when the _jobs will be finished we will proceed the last chunk return; } quint64 fileSize = _item._size; QMap<QByteArray, QByteArray> headers; headers["OC-Total-Length"] = QByteArray::number(fileSize); headers["OC-Async"] = "1"; headers["Content-Type"] = "application/octet-stream"; headers["X-OC-Mtime"] = QByteArray::number(qint64(_item._modtime)); if (!_item._etag.isEmpty() && _item._etag != "empty_etag" && _item._instruction != CSYNC_INSTRUCTION_NEW // On new files never send a If-Match ) { // We add quotes because the owncloud server always add quotes around the etag, and // csync_owncloud.c's owncloud_file_id always strip the quotes. headers["If-Match"] = '"' + _item._etag + '"'; } QString path = _item._file; UploadDevice *device = 0; if (_chunkCount > 1) { int sendingChunk = (_currentChunk + _startChunk) % _chunkCount; // XOR with chunk size to make sure everything goes well if chunk size change between runs uint transid = _transferId ^ chunkSize(); path += QString("-chunking-%1-%2-%3").arg(transid).arg(_chunkCount).arg(sendingChunk); headers["OC-Chunked"] = "1"; int currentChunkSize = chunkSize(); if (sendingChunk == _chunkCount - 1) { // last chunk currentChunkSize = (fileSize % chunkSize()); if( currentChunkSize == 0 ) { // if the last chunk pretents to be 0, its actually the full chunk size. currentChunkSize = chunkSize(); } } device = new UploadDevice(_file, chunkSize() * quint64(sendingChunk), currentChunkSize, &_propagator->_bandwidthManager); } else { device = new UploadDevice(_file, 0, fileSize, &_propagator->_bandwidthManager); } bool isOpen = true; if (!device->isOpen()) { isOpen = device->open(QIODevice::ReadOnly); } if( isOpen ) { PUTFileJob* job = new PUTFileJob(AccountManager::instance()->account(), _propagator->_remoteFolder + path, device, headers, _currentChunk); _jobs.append(job); job->setTimeout(_propagator->httpTimeout() * 1000); connect(job, SIGNAL(finishedSignal()), this, SLOT(slotPutFinished())); connect(job, SIGNAL(uploadProgress(qint64,qint64)), this, SLOT(slotUploadProgress(qint64,qint64))); connect(job, SIGNAL(uploadProgress(qint64,qint64)), device, SLOT(slotJobUploadProgress(qint64,qint64))); connect(job, SIGNAL(destroyed(QObject*)), this, SLOT(slotJobDestroyed(QObject*))); job->start(); _propagator->_activeJobs++; _currentChunk++; QByteArray env = qgetenv("OWNCLOUD_PARALLEL_CHUNK"); bool parallelChunkUpload = env=="true" || env =="1";; if (_currentChunk + _startChunk >= _chunkCount - 1) { // Don't do parallel upload of chunk if this might be the last chunk because the server cannot handle that // https://github.com/owncloud/core/issues/11106 parallelChunkUpload = false; } if (parallelChunkUpload && (_propagator->_activeJobs < _propagator->maximumActiveJob()) && _currentChunk < _chunkCount ) { startNextChunk(); } if (!parallelChunkUpload || _chunkCount - _currentChunk <= 0) { emitReady(); } } else {
void PropagateUploadFileQNAM::startNextChunk() { if (_propagator->_abortRequested.fetchAndAddRelaxed(0)) return; if (! _jobs.isEmpty() && _currentChunk + _startChunk >= _chunkCount - 1) { // Don't do parallel upload of chunk if this might be the last chunk because the server cannot handle that // https://github.com/owncloud/core/issues/11106 // We return now and when the _jobs will be finished we will proceed the last chunk // NOTE: Some other part of the code such as slotUploadProgress assume also that the last chunk // is sent last. return; } quint64 fileSize = _item._size; QMap<QByteArray, QByteArray> headers; headers["OC-Total-Length"] = QByteArray::number(fileSize); headers["OC-Async"] = "1"; headers["OC-Chunk-Size"]= QByteArray::number(quint64(chunkSize())); headers["Content-Type"] = "application/octet-stream"; headers["X-OC-Mtime"] = QByteArray::number(qint64(_item._modtime)); if (!_item._etag.isEmpty() && _item._etag != "empty_etag" && _item._instruction != CSYNC_INSTRUCTION_NEW // On new files never send a If-Match ) { // We add quotes because the owncloud server always add quotes around the etag, and // csync_owncloud.c's owncloud_file_id always strip the quotes. headers["If-Match"] = '"' + _item._etag + '"'; } QString path = _item._file; UploadDevice *device = new UploadDevice(&_propagator->_bandwidthManager); qint64 chunkStart = 0; qint64 currentChunkSize = fileSize; if (_chunkCount > 1) { int sendingChunk = (_currentChunk + _startChunk) % _chunkCount; // XOR with chunk size to make sure everything goes well if chunk size change between runs uint transid = _transferId ^ chunkSize(); path += QString("-chunking-%1-%2-%3").arg(transid).arg(_chunkCount).arg(sendingChunk); headers["OC-Chunked"] = "1"; chunkStart = chunkSize() * quint64(sendingChunk); currentChunkSize = chunkSize(); if (sendingChunk == _chunkCount - 1) { // last chunk currentChunkSize = (fileSize % chunkSize()); if( currentChunkSize == 0 ) { // if the last chunk pretents to be 0, its actually the full chunk size. currentChunkSize = chunkSize(); } } } if (! device->prepareAndOpen(_propagator->getFilePath(_item._file), chunkStart, currentChunkSize)) { qDebug() << "ERR: Could not prepare upload device: " << device->errorString(); // Soft error because this is likely caused by the user modifying his files while syncing abortWithError( SyncFileItem::SoftError, device->errorString() ); delete device; return; } // job takes ownership of device via a QScopedPointer. Job deletes itself when finishing PUTFileJob* job = new PUTFileJob(_propagator->account(), _propagator->_remoteFolder + path, device, headers, _currentChunk); _jobs.append(job); connect(job, SIGNAL(finishedSignal()), this, SLOT(slotPutFinished())); connect(job, SIGNAL(uploadProgress(qint64,qint64)), this, SLOT(slotUploadProgress(qint64,qint64))); connect(job, SIGNAL(uploadProgress(qint64,qint64)), device, SLOT(slotJobUploadProgress(qint64,qint64))); connect(job, SIGNAL(destroyed(QObject*)), this, SLOT(slotJobDestroyed(QObject*))); job->start(); _propagator->_activeJobs++; _currentChunk++; bool parallelChunkUpload = true; QByteArray env = qgetenv("OWNCLOUD_PARALLEL_CHUNK"); if (!env.isEmpty()) { parallelChunkUpload = env != "false" && env != "0"; } else { auto version = _propagator->account()->serverVersion(); auto components = version.split('.'); int versionNum = (components.value(0).toInt() << 16) + (components.value(1).toInt() << 8) + components.value(2).toInt(); if (versionNum < 0x080003) { // Disable parallel chunk upload severs older than 8.0.3 to avoid too many // internal sever errors (#2743, #2938) parallelChunkUpload = false; } } if (_currentChunk + _startChunk >= _chunkCount - 1) { // Don't do parallel upload of chunk if this might be the last chunk because the server cannot handle that // https://github.com/owncloud/core/issues/11106 parallelChunkUpload = false; } if (parallelChunkUpload && (_propagator->_activeJobs < _propagator->maximumActiveJob()) && _currentChunk < _chunkCount ) { startNextChunk(); } if (!parallelChunkUpload || _chunkCount - _currentChunk <= 0) { emit ready(); } }
UInt32 WaveFile::getNumberOfFrames() { return chunkSize() / mHeader.bytesPerFrame; }
void PropagateUploadFileQNAM::startNextChunk() { if (_propagator->_abortRequested.fetchAndAddRelaxed(0)) return; if (! _jobs.isEmpty() && _currentChunk + _startChunk >= _chunkCount - 1) { // Don't do parallel upload of chunk if this might be the last chunk because the server cannot handle that // https://github.com/owncloud/core/issues/11106 // We return now and when the _jobs are finished we will proceed with the last chunk // NOTE: Some other parts of the code such as slotUploadProgress also assume that the last chunk // is sent last. return; } quint64 fileSize = _item->_size; QMap<QByteArray, QByteArray> headers; headers["OC-Total-Length"] = QByteArray::number(fileSize); headers["OC-Async"] = "1"; headers["OC-Chunk-Size"]= QByteArray::number(quint64(chunkSize())); headers["Content-Type"] = "application/octet-stream"; headers["X-OC-Mtime"] = QByteArray::number(qint64(_item->_modtime)); if(_item->_file.contains(".sys.admin#recall#")) { // This is a file recall triggered by the admin. Note: the // recall list file created by the admin and downloaded by the // client (.sys.admin#recall#) also falls into this category // (albeit users are not supposed to mess up with it) // We use a special tag header so that the server may decide to store this file away in some admin stage area // And not directly in the user's area (which would trigger redownloads etc). headers["OC-Tag"] = ".sys.admin#recall#"; } if (!_item->_etag.isEmpty() && _item->_etag != "empty_etag" && _item->_instruction != CSYNC_INSTRUCTION_NEW // On new files never send a If-Match && _item->_instruction != CSYNC_INSTRUCTION_TYPE_CHANGE && !_deleteExisting ) { // We add quotes because the owncloud server always adds quotes around the etag, and // csync_owncloud.c's owncloud_file_id always strips the quotes. headers["If-Match"] = '"' + _item->_etag + '"'; } QString path = _item->_file; UploadDevice *device = new UploadDevice(&_propagator->_bandwidthManager); qint64 chunkStart = 0; qint64 currentChunkSize = fileSize; bool isFinalChunk = false; if (_chunkCount > 1) { int sendingChunk = (_currentChunk + _startChunk) % _chunkCount; // XOR with chunk size to make sure everything goes well if chunk size changes between runs uint transid = _transferId ^ chunkSize(); qDebug() << "Upload chunk" << sendingChunk << "of" << _chunkCount << "transferid(remote)=" << transid; path += QString("-chunking-%1-%2-%3").arg(transid).arg(_chunkCount).arg(sendingChunk); headers["OC-Chunked"] = "1"; chunkStart = chunkSize() * quint64(sendingChunk); currentChunkSize = chunkSize(); if (sendingChunk == _chunkCount - 1) { // last chunk currentChunkSize = (fileSize % chunkSize()); if( currentChunkSize == 0 ) { // if the last chunk pretends to be 0, its actually the full chunk size. currentChunkSize = chunkSize(); } isFinalChunk = true; } } else { // if there's only one chunk, it's the final one isFinalChunk = true; } if (isFinalChunk && !_transmissionChecksumType.isEmpty()) { headers[checkSumHeaderC] = makeChecksumHeader( _transmissionChecksumType, _transmissionChecksum); } const QString fileName = _propagator->getFilePath(_item->_file); if (! device->prepareAndOpen(fileName, chunkStart, currentChunkSize)) { qDebug() << "ERR: Could not prepare upload device: " << device->errorString(); // If the file is currently locked, we want to retry the sync // when it becomes available again. if (FileSystem::isFileLocked(fileName)) { emit _propagator->seenLockedFile(fileName); } // Soft error because this is likely caused by the user modifying his files while syncing abortWithError( SyncFileItem::SoftError, device->errorString() ); delete device; return; } // job takes ownership of device via a QScopedPointer. Job deletes itself when finishing PUTFileJob* job = new PUTFileJob(_propagator->account(), _propagator->_remoteFolder + path, device, headers, _currentChunk); _jobs.append(job); connect(job, SIGNAL(finishedSignal()), this, SLOT(slotPutFinished())); connect(job, SIGNAL(uploadProgress(qint64,qint64)), this, SLOT(slotUploadProgress(qint64,qint64))); connect(job, SIGNAL(uploadProgress(qint64,qint64)), device, SLOT(slotJobUploadProgress(qint64,qint64))); connect(job, SIGNAL(destroyed(QObject*)), this, SLOT(slotJobDestroyed(QObject*))); job->start(); _propagator->_activeJobList.append(this); _currentChunk++; bool parallelChunkUpload = true; QByteArray env = qgetenv("OWNCLOUD_PARALLEL_CHUNK"); if (!env.isEmpty()) { parallelChunkUpload = env != "false" && env != "0"; } else { int versionNum = _propagator->account()->serverVersionInt(); if (versionNum < 0x080003) { // Disable parallel chunk upload severs older than 8.0.3 to avoid too many // internal sever errors (#2743, #2938) parallelChunkUpload = false; } } if (_currentChunk + _startChunk >= _chunkCount - 1) { // Don't do parallel upload of chunk if this might be the last chunk because the server cannot handle that // https://github.com/owncloud/core/issues/11106 parallelChunkUpload = false; } if (parallelChunkUpload && (_propagator->_activeJobList.count() < _propagator->maximumActiveJob()) && _currentChunk < _chunkCount ) { startNextChunk(); } if (!parallelChunkUpload || _chunkCount - _currentChunk <= 0) { emit ready(); } }
int Diff::execute() { Options sourceOptions; { sourceOptions.add<std::string>("filename", m_sourceFile); sourceOptions.add<bool>("debug", isDebug()); sourceOptions.add<boost::uint32_t>("verbose", getVerboseLevel()); } boost::scoped_ptr<Stage> source(AppSupport::makeReader(sourceOptions)); source->initialize(); boost::uint32_t chunkSize(source->getNumPoints()); if (m_chunkSize) chunkSize = m_chunkSize; PointBuffer source_data(source->getSchema(), chunkSize); boost::scoped_ptr<StageSequentialIterator> source_iter(source->createSequentialIterator(source_data)); ptree errors; Options candidateOptions; { candidateOptions.add<std::string>("filename", m_candidateFile); candidateOptions.add<bool>("debug", isDebug()); candidateOptions.add<boost::uint32_t>("verbose", getVerboseLevel()); } boost::scoped_ptr<Stage> candidate(AppSupport::makeReader(candidateOptions)); candidate->initialize(); PointBuffer candidate_data(candidate->getSchema(), chunkSize); boost::scoped_ptr<StageSequentialIterator> candidate_iter(candidate->createSequentialIterator(candidate_data)); if (candidate->getNumPoints() != source->getNumPoints()) { std::ostringstream oss; oss << "Source and candidate files do not have the same point count"; errors.put<std::string>("count.error", oss.str()); errors.put<boost::uint32_t>("count.candidate" , candidate->getNumPoints()); errors.put<boost::uint32_t>("count.source" , source->getNumPoints()); } pdal::Metadata source_metadata = source->collectMetadata(); pdal::Metadata candidate_metadata = candidate->collectMetadata(); if (source_metadata != candidate_metadata) { std::ostringstream oss; oss << "Source and candidate files do not have the same metadata count"; errors.put<std::string>("metadata.error", oss.str()); errors.put_child("metadata.source", source_metadata.toPTree()); errors.put_child("metadata.candidate", candidate_metadata.toPTree()); } Schema const& candidate_schema = candidate_data.getSchema(); Schema const& source_schema = source_data.getSchema(); if (! ( candidate_schema == source_schema)) { std::ostringstream oss; oss << "Source and candidate files do not have the same schema"; errors.put<std::string>("schema.error", oss.str()); errors.put_child("schema.source", source_schema.toPTree()); errors.put_child("schema.candidate", candidate_schema.toPTree()); } if (errors.size()) { write_json(std::cout, errors); return 1; } else { // If we made it this far with no errors, now we'll // check the points. checkPoints(source_iter.get(), source_data, candidate_iter.get(), candidate_data, errors); if (errors.size()) { write_json(std::cout, errors); return 1; } } return 0; }
virtual bool atEnd() const { return _read >= chunkSize() || _file->atEnd(); }