void PropagateRemoteMove::slotMoveJobFinished() { propagator()->_activeJobList.removeOne(this); ASSERT(_job); QNetworkReply::NetworkError err = _job->reply()->error(); _item->_httpErrorCode = _job->reply()->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(); _item->_responseTimeStamp = _job->responseTimestamp(); _item->_requestId = _job->requestId(); if (err != QNetworkReply::NoError) { SyncFileItem::Status status = classifyError(err, _item->_httpErrorCode, &propagator()->_anotherSyncNeeded); done(status, _job->errorString()); return; } if (_item->_httpErrorCode != 201) { // Normally we expect "201 Created" // If it is not the case, it might be because of a proxy or gateway intercepting the request, so we must // throw an error. done(SyncFileItem::NormalError, tr("Wrong HTTP code returned by server. Expected 201, but received \"%1 %2\".") .arg(_item->_httpErrorCode) .arg(_job->reply()->attribute(QNetworkRequest::HttpReasonPhraseAttribute).toString())); return; } finalize(); }
void PropagateUploadFileNG::doStartUpload() { propagator()->_activeJobList.append(this); const SyncJournalDb::UploadInfo progressInfo = propagator()->_journal->getUploadInfo(_item->_file); if (progressInfo._valid && progressInfo._modtime == _item->_modtime) { _transferId = progressInfo._transferid; auto url = chunkUrl(); auto job = new LsColJob(propagator()->account(), url, this); _jobs.append(job); job->setProperties(QList<QByteArray>() << "resourcetype" << "getcontentlength"); connect(job, &LsColJob::finishedWithoutError, this, &PropagateUploadFileNG::slotPropfindFinished); connect(job, &LsColJob::finishedWithError, this, &PropagateUploadFileNG::slotPropfindFinishedWithError); connect(job, &QObject::destroyed, this, &PropagateUploadFileCommon::slotJobDestroyed); connect(job, &LsColJob::directoryListingIterated, this, &PropagateUploadFileNG::slotPropfindIterate); job->start(); return; } else if (progressInfo._valid) { // The upload info is stale. remove the stale chunks on the server _transferId = progressInfo._transferid; // Fire and forget. Any error will be ignored. (new DeleteJob(propagator()->account(), chunkUrl(), this))->start(); // startNewUpload will reset the _transferId and the UploadInfo in the db. } startNewUpload(); }
void PropagateUploadFileNG::startNewUpload() { ASSERT(propagator()->_activeJobList.count(this) == 1); _transferId = qrand() ^ _item->_modtime ^ (_item->_size << 16) ^ qHash(_item->_file); _sent = 0; _currentChunk = 0; propagator()->reportProgress(*_item, 0); SyncJournalDb::UploadInfo pi; pi._valid = true; pi._transferid = _transferId; pi._modtime = _item->_modtime; pi._contentChecksum = _item->_checksumHeader; propagator()->_journal->setUploadInfo(_item->_file, pi); propagator()->_journal->commit("Upload info"); QMap<QByteArray, QByteArray> headers; headers["OC-Total-Length"] = QByteArray::number(_item->_size); auto job = new MkColJob(propagator()->account(), chunkUrl(), headers, this); connect(job, SIGNAL(finished(QNetworkReply::NetworkError)), this, SLOT(slotMkColFinished(QNetworkReply::NetworkError))); connect(job, &QObject::destroyed, this, &PropagateUploadFileCommon::slotJobDestroyed); job->start(); }
void PropagateRemoteMkdir::success() { // save the file id already so we can detect rename or remove SyncJournalFileRecord record = _item->toSyncJournalFileRecordWithInode(propagator()->_localDir + _item->destination()); if (!propagator()->_journal->setFileRecord(record)) { done(SyncFileItem::FatalError, tr("Error writing metadata to the database")); return; } done(SyncFileItem::Success); }
void PropagateRemoteMkdir::slotStartMkcolJob() { if (propagator()->_abortRequested.fetchAndAddRelaxed(0)) return; qCDebug(lcPropagateRemoteMkdir) << _item->_file; _job = new MkColJob(propagator()->account(), propagator()->_remoteFolder + _item->_file, this); connect(_job, SIGNAL(finished(QNetworkReply::NetworkError)), this, SLOT(slotMkcolJobFinished())); _job->start(); }
void PropagateUploadFileNG::slotPropfindFinishedWithError() { auto job = qobject_cast<LsColJob *>(sender()); slotJobDestroyed(job); // remove it from the _jobs list QNetworkReply::NetworkError err = job->reply()->error(); auto httpErrorCode = job->reply()->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(); auto status = classifyError(err, httpErrorCode, &propagator()->_anotherSyncNeeded); if (status == SyncFileItem::FatalError) { propagator()->_activeJobList.removeOne(this); abortWithError(status, job->errorStringParsingBody()); return; } startNewUpload(); }
void BDSimulator::step() { last_reactions_.clear(); { BDPropagator propagator(*model_, *world_, *rng(), dt(), last_reactions_); while (propagator()) { ; // do nothing here } } set_t(t() + dt()); num_steps_++; }
void PropagateRemoteMkdir::slotMkcolJobFinished() { propagator()->_activeJobList.removeOne(this); ASSERT(_job); QNetworkReply::NetworkError err = _job->reply()->error(); _item->_httpErrorCode = _job->reply()->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(); if (_item->_httpErrorCode == 405) { // This happens when the directory already exists. Nothing to do. } else if (err != QNetworkReply::NoError) { SyncFileItem::Status status = classifyError(err, _item->_httpErrorCode, &propagator()->_anotherSyncNeeded); done(status, _job->errorString()); return; } else if (_item->_httpErrorCode != 201) { // Normally we expect "201 Created" // If it is not the case, it might be because of a proxy or gateway intercepting the request, so we must // throw an error. done(SyncFileItem::NormalError, tr("Wrong HTTP code returned by server. Expected 201, but received \"%1 %2\".") .arg(_item->_httpErrorCode) .arg(_job->reply()->attribute(QNetworkRequest::HttpReasonPhraseAttribute).toString())); return; } _item->_responseTimeStamp = _job->responseTimestamp(); _item->_fileId = _job->reply()->rawHeader("OC-FileId"); if (_item->_fileId.isEmpty()) { // Owncloud 7.0.0 and before did not have a header with the file id. // (https://github.com/owncloud/core/issues/9000) // So we must get the file id using a PROPFIND // This is required so that we can detect moves even if the folder is renamed on the server // while files are still uploading propagator()->_activeJobList.append(this); auto propfindJob = new PropfindJob(_job->account(), _job->path(), this); propfindJob->setProperties(QList<QByteArray>() << "getetag" << "http://owncloud.org/ns:id"); QObject::connect(propfindJob, &PropfindJob::result, this, &PropagateRemoteMkdir::propfindResult); QObject::connect(propfindJob, &PropfindJob::finishedWithError, this, &PropagateRemoteMkdir::propfindError); propfindJob->start(); _job = propfindJob; return; } success(); }
void PropagateUploadFileNG::slotMkColFinished(QNetworkReply::NetworkError) { propagator()->_activeJobList.removeOne(this); auto job = qobject_cast<MkColJob *>(sender()); slotJobDestroyed(job); // remove it from the _jobs list QNetworkReply::NetworkError err = job->reply()->error(); _item->_httpErrorCode = job->reply()->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(); if (err != QNetworkReply::NoError || _item->_httpErrorCode != 201) { SyncFileItem::Status status = classifyError(err, _item->_httpErrorCode, &propagator()->_anotherSyncNeeded); abortWithError(status, job->errorStringParsingBody()); return; } startNextChunk(); }
void PropagateUploadFileNG::slotDeleteJobFinished() { auto job = qobject_cast<DeleteJob *>(sender()); ASSERT(job); _jobs.remove(_jobs.indexOf(job)); QNetworkReply::NetworkError err = job->reply()->error(); if (err != QNetworkReply::NoError && err != QNetworkReply::ContentNotFoundError) { const int httpStatus = job->reply()->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(); SyncFileItem::Status status = classifyError(err, httpStatus); if (status == SyncFileItem::FatalError) { abortWithError(status, job->errorString()); return; } else { qCWarning(lcPropagateUpload) << "DeleteJob errored out" << job->errorString() << job->reply()->url(); _removeJobError = true; // Let the other jobs finish } } if (_jobs.isEmpty()) { propagator()->_activeJobList.removeOne(this); if (_removeJobError) { // There was an error removing some files, just start over startNewUpload(); } else { startNextChunk(); } } }
/** * For delete or remove, check that we are not removing from a shared directory. * If we are, try to restore the file * * Return true if the problem is handled. */ bool PropagateItemJob::checkForProblemsWithShared(int httpStatusCode, const QString &msg) { PropagateItemJob *newJob = NULL; if (httpStatusCode == 403 && propagator()->isInSharedDirectory(_item->_file)) { if (!_item->isDirectory()) { SyncFileItemPtr downloadItem(new SyncFileItem(*_item)); if (downloadItem->_instruction == CSYNC_INSTRUCTION_NEW || downloadItem->_instruction == CSYNC_INSTRUCTION_TYPE_CHANGE) { // don't try to recover pushing new files return false; } else if (downloadItem->_instruction == CSYNC_INSTRUCTION_SYNC) { // we modified the file locally, just create a conflict then downloadItem->_instruction = CSYNC_INSTRUCTION_CONFLICT; // HACK to avoid continuation: See task #1448: We do not know the _modtime from the // server, at this point, so just set the current one. (rather than the one locally) downloadItem->_modtime = Utility::qDateTimeToTime_t(QDateTime::currentDateTimeUtc()); } else { // the file was removed or renamed, just recover the old one downloadItem->_instruction = CSYNC_INSTRUCTION_SYNC; } downloadItem->_direction = SyncFileItem::Down; newJob = new PropagateDownloadFile(propagator(), downloadItem); } else { // Directories are harder to recover. // But just re-create the directory, next sync will be able to recover the files SyncFileItemPtr mkdirItem(new SyncFileItem(*_item)); mkdirItem->_instruction = CSYNC_INSTRUCTION_NEW; mkdirItem->_direction = SyncFileItem::Down; newJob = new PropagateLocalMkdir(propagator(), mkdirItem); // Also remove the inodes and fileid from the db so no further renames are tried for // this item. propagator()->_journal->avoidRenamesOnNextSync(_item->_file); propagator()->_anotherSyncNeeded = true; } if (newJob) { newJob->setRestoreJobMsg(msg); _restoreJob.reset(newJob); connect(_restoreJob.data(), &PropagatorJob::finished, this, &PropagateItemJob::slotRestoreJobFinished); QMetaObject::invokeMethod(newJob, "start"); } return true; } return false; }
void PropagateUploadFileNG::slotPropfindFinished() { auto job = qobject_cast<LsColJob *>(sender()); slotJobDestroyed(job); // remove it from the _jobs list propagator()->_activeJobList.removeOne(this); _currentChunk = 0; _sent = 0; while (_serverChunks.contains(_currentChunk)) { _sent += _serverChunks[_currentChunk].size; _serverChunks.remove(_currentChunk); ++_currentChunk; } if (_sent > _item->_size) { // Normally this can't happen because the size is xor'ed with the transfer id, and it is // therefore impossible that there is more data on the server than on the file. qCCritical(lcPropagateUpload) << "Inconsistency while resuming " << _item->_file << ": the size on the server (" << _sent << ") is bigger than the size of the file (" << _item->_size << ")"; startNewUpload(); return; } qCInfo(lcPropagateUpload) << "Resuming " << _item->_file << " from chunk " << _currentChunk << "; sent =" << _sent; if (!_serverChunks.isEmpty()) { qCInfo(lcPropagateUpload) << "To Delete" << _serverChunks.keys(); propagator()->_activeJobList.append(this); _removeJobError = false; // Make sure that if there is a "hole" and then a few more chunks, on the server // we should remove the later chunks. Otherwise when we do dynamic chunk sizing, we may end up // with corruptions if there are too many chunks, or if we abort and there are still stale chunks. for (auto it = _serverChunks.begin(); it != _serverChunks.end(); ++it) { auto job = new DeleteJob(propagator()->account(), Utility::concatUrlPath(chunkUrl(), it->originalName), this); QObject::connect(job, &DeleteJob::finishedSignal, this, &PropagateUploadFileNG::slotDeleteJobFinished); _jobs.append(job); job->start(); } _serverChunks.clear(); return; } startNextChunk(); }
PropagateItemJob::~PropagateItemJob() { if (auto p = propagator()) { // Normally, every job should clean itself from the _activeJobList. So this should not be // needed. But if a job has a bug or is deleted before the network jobs signal get received, // we might risk end up with dangling pointer in the list which may cause crashes. p->_activeJobList.removeAll(this); } }
void PropagateRemoteMkdir::start() { if (propagator()->_abortRequested.fetchAndAddRelaxed(0)) return; qCDebug(lcPropagateRemoteMkdir) << _item->_file; propagator()->_activeJobList.append(this); if (!_deleteExisting) { return slotStartMkcolJob(); } _job = new DeleteJob(propagator()->account(), propagator()->_remoteFolder + _item->_file, this); connect(_job, SIGNAL(finishedSignal()), SLOT(slotStartMkcolJob())); _job->start(); }
forceinline void ChannelBool<View>::IndexAdvisor::dispose(Space& home, Council<A>& c) { ChannelBool<View>& p = static_cast<ChannelBool<View>&>(propagator()); if (idx == -1) p.y.cancel(home,*this); else { p.x[idx].cancel(home,*this); } Advisor::dispose(home,c); }
int filterTaint() { char* objA = source(); char objB; propagator(objA, &objB); filter(objA); sink(objA); sink(objB); free(objA); return 0; }
void PropagateRemoteMkdir::propfindResult(const QVariantMap &result) { propagator()->_activeJobList.removeOne(this); if (result.contains("getetag")) { _item->_etag = result["getetag"].toByteArray(); } if (result.contains("id")) { _item->_fileId = result["id"].toByteArray(); } success(); }
void PropagateUploadFileNG::slotUploadProgress(qint64 sent, qint64 total) { // Completion is signaled with sent=0, total=0; avoid accidentally // resetting progress due to the sent being zero by ignoring it. // finishedSignal() is bound to be emitted soon anyway. // See https://bugreports.qt.io/browse/QTBUG-44782. if (sent == 0 && total == 0) { return; } propagator()->reportProgress(*_item, _sent + sent - total); }
void PropagateUploadFileNG::doStartUpload() { propagator()->_activeJobList.append(this); const SyncJournalDb::UploadInfo progressInfo = propagator()->_journal->getUploadInfo(_item->_file); if (progressInfo._valid && Utility::qDateTimeToTime_t(progressInfo._modtime) == _item->_modtime) { _transferId = progressInfo._transferid; auto url = chunkUrl(); auto job = new LsColJob(propagator()->account(), url, this); _jobs.append(job); job->setProperties(QList<QByteArray>() << "resourcetype" << "getcontentlength"); connect(job, SIGNAL(finishedWithoutError()), this, SLOT(slotPropfindFinished())); connect(job, SIGNAL(finishedWithError(QNetworkReply *)), this, SLOT(slotPropfindFinishedWithError())); connect(job, SIGNAL(destroyed(QObject *)), this, SLOT(slotJobDestroyed(QObject *))); connect(job, SIGNAL(directoryListingIterated(QString, QMap<QString, QString>)), this, SLOT(slotPropfindIterate(QString, QMap<QString, QString>))); job->start(); return; } else if (progressInfo._valid) {
/* * Set event for a constraint. */ static void solver_event(cons_t c, event_t e) { sym_t sym = c->sym; prop_t props = propagator(c); for (size_t i = 0; i < sym->propinfo_len; i++) { prop_t prop = props+i; if (iskilled(prop)) continue; if (isscheduled(prop)) continue; if (shouldwake(prop, e)) schedule(prop); } }
void PropagateUploadFileNG::slotMoveJobFinished() { propagator()->_activeJobList.removeOne(this); auto job = qobject_cast<MoveJob *>(sender()); slotJobDestroyed(job); // remove it from the _jobs list QNetworkReply::NetworkError err = job->reply()->error(); _item->_httpErrorCode = job->reply()->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(); if (err != QNetworkReply::NoError) { commonErrorHandling(job); return; } if (_item->_httpErrorCode != 201 && _item->_httpErrorCode != 204) { abortWithError(SyncFileItem::NormalError, tr("Unexpected return code from server (%1)").arg(_item->_httpErrorCode)); return; } QByteArray fid = job->reply()->rawHeader("OC-FileID"); if (fid.isEmpty()) { qCWarning(lcPropagateUpload) << "Server did not return a OC-FileID" << _item->_file; abortWithError(SyncFileItem::NormalError, tr("Missing File ID from server")); return; } else { // the old file id should only be empty for new files uploaded if (!_item->_fileId.isEmpty() && _item->_fileId != fid) { qCWarning(lcPropagateUpload) << "File ID changed!" << _item->_fileId << fid; } _item->_fileId = fid; } _item->_etag = getEtagFromReply(job->reply()); ; if (_item->_etag.isEmpty()) { qCWarning(lcPropagateUpload) << "Server did not return an ETAG" << _item->_file; abortWithError(SyncFileItem::NormalError, tr("Missing ETag from server")); return; } _item->_responseTimeStamp = job->responseTimestamp(); finalize(); }
void PropagateRemoteMove::finalize() { SyncJournalFileRecord oldRecord; propagator()->_journal->getFileRecord(_item->_originalFile, &oldRecord); // if reading from db failed still continue hoping that deleteFileRecord // reopens the db successfully. // The db is only queried to transfer the content checksum from the old // to the new record. It is not a problem to skip it here. propagator()->_journal->deleteFileRecord(_item->_originalFile); SyncFileItem newItem(*_item); newItem._type = _item->_type; if (oldRecord.isValid()) { newItem._checksumHeader = oldRecord._checksumHeader; if (newItem._size != oldRecord._fileSize) { qCWarning(lcPropagateRemoteMove) << "File sizes differ on server vs sync journal: " << newItem._size << oldRecord._fileSize; // the server might have claimed a different size, we take the old one from the DB newItem._size = oldRecord._fileSize; } } if (!propagator()->updateMetadata(newItem)) { done(SyncFileItem::FatalError, tr("Error writing metadata to the database")); return; } if (_item->isDirectory()) { propagator()->_renamedDirectories.insert(_item->_file, _item->_renameTarget); if (!adjustSelectiveSync(propagator()->_journal, _item->_file, _item->_renameTarget)) { done(SyncFileItem::FatalError, tr("Error writing metadata to the database")); return; } } propagator()->_journal->commit("Remote Rename"); done(SyncFileItem::Success); }
void PropagateNoContraction(const glslang::TIntermediate& intermediate) { // First, traverses the AST, records symbols with their defining operations // and collects the initial set of precise symbols (symbol nodes that marked // as 'noContraction') and precise return nodes. auto mappings_and_precise_objects = getSymbolToDefinitionMappingAndPreciseSymbolIDs(intermediate); // The mapping of symbol node IDs to their defining nodes. This enables us // to get the defining node directly from a given symbol ID without // traversing the tree again. NodeMapping& symbol_definition_mapping = std::get<0>(mappings_and_precise_objects); // The mapping of object nodes to their accesschains recorded. AccessChainMapping& accesschain_mapping = std::get<1>(mappings_and_precise_objects); // The initial set of 'precise' objects which are represented as the // accesschain toward them. ObjectAccesschainSet& precise_object_accesschains = std::get<2>(mappings_and_precise_objects); // The set of 'precise' return nodes. ReturnBranchNodeSet& precise_return_nodes = std::get<3>(mappings_and_precise_objects); // Second, uses the initial set of precise objects as a worklist, pops an // accesschain, extract the symbol ID from it. Then: // 1) Check the assignee object, see if it is 'precise' object node or // contains 'precise' object. Obtain the incremental accesschain from the // assignee node to its nested 'precise' node (if any). // 2) If the assignee object node is 'precise' or it contains 'precise' // objects, traverses the right side of the assignment operation // expression to mark arithmetic operations as 'noContration' and update // 'precise' accesschain worklist with new found object nodes. // Repeat above steps until the worklist is empty. TNoContractionAssigneeCheckingTraverser checker(accesschain_mapping); TNoContractionPropagator propagator(&precise_object_accesschains, accesschain_mapping); // We have two initial precise worklists to handle: // 1) precise return nodes // 2) precise object accesschains // We should process the precise return nodes first and the involved // objects in the return expression should be added to the precise object // accesschain set. while (!precise_return_nodes.empty()) { glslang::TIntermBranch* precise_return_node = *precise_return_nodes.begin(); propagator.propagateNoContractionInReturnNode(precise_return_node); precise_return_nodes.erase(precise_return_node); } while (!precise_object_accesschains.empty()) { // Get the accesschain of a precise object from the worklist. ObjectAccessChain precise_object_accesschain = *precise_object_accesschains.begin(); // Get the symbol id from the accesschain. ObjectAccessChain symbol_id = getFrontElement(precise_object_accesschain); // Get all the defining nodes of that symbol ID. std::pair<NodeMapping::iterator, NodeMapping::iterator> range = symbol_definition_mapping.equal_range(symbol_id); // Visits all the assignment nodes of that symbol ID and // 1) Check if the assignee node is 'precise' or contains 'precise' // objects. // 2) Propagate the 'precise' to the top layer object ndoes // in the right side of the assignment operation, update the 'precise' // worklist with new accesschains representing the new 'precise' // objects, and mark arithmetic operations as 'noContraction'. for (NodeMapping::iterator defining_node_iter = range.first; defining_node_iter != range.second; defining_node_iter++) { TIntermOperator* defining_node = defining_node_iter->second; // Check the assignee node. auto checker_result = checker.getPrecisenessAndRemainedAccessChain( defining_node, precise_object_accesschain); bool& contain_precise = std::get<0>(checker_result); ObjectAccessChain& remained_accesschain = std::get<1>(checker_result); // If the assignee node is 'precise' or contains 'precise', propagate the // 'precise' to the right. Otherwise just skip this assignment node. if (contain_precise) { propagator.propagateNoContractionInOneExpression(defining_node, remained_accesschain); } } // Remove the last processed 'precise' object from the worklist. precise_object_accesschains.erase(precise_object_accesschain); } }
void PropagateItemJob::done(SyncFileItem::Status statusArg, const QString &errorString) { _item->_status = statusArg; _state = Finished; if (_item->_isRestoration) { if (_item->_status == SyncFileItem::Success || _item->_status == SyncFileItem::Conflict) { _item->_status = SyncFileItem::Restoration; } else { _item->_errorString += tr("; Restoration Failed: %1").arg(errorString); } } else { if (_item->_errorString.isEmpty()) { _item->_errorString = errorString; } } if (propagator()->_abortRequested.fetchAndAddRelaxed(0) && (_item->_status == SyncFileItem::NormalError || _item->_status == SyncFileItem::FatalError)) { // an abort request is ongoing. Change the status to Soft-Error _item->_status = SyncFileItem::SoftError; } // Blacklist handling switch (_item->_status) { case SyncFileItem::SoftError: case SyncFileItem::FatalError: case SyncFileItem::NormalError: case SyncFileItem::DetailError: // Check the blacklist, possibly adjusting the item (including its status) blacklistUpdate(propagator()->_journal, *_item); break; case SyncFileItem::Success: case SyncFileItem::Restoration: if (_item->_hasBlacklistEntry) { // wipe blacklist entry. propagator()->_journal->wipeErrorBlacklistEntry(_item->_file); // remove a blacklist entry in case the file was moved. if (_item->_originalFile != _item->_file) { propagator()->_journal->wipeErrorBlacklistEntry(_item->_originalFile); } } break; case SyncFileItem::Conflict: case SyncFileItem::FileIgnored: case SyncFileItem::NoStatus: case SyncFileItem::BlacklistedError: // nothing break; } if (_item->hasErrorStatus()) qCWarning(lcPropagator) << "Could not complete propagation of" << _item->destination() << "by" << this << "with status" << _item->_status << "and error:" << _item->_errorString; else qCInfo(lcPropagator) << "Completed propagation of" << _item->destination() << "by" << this << "with status" << _item->_status; emit propagator()->itemCompleted(_item); emit finished(_item->_status); if (_item->_status == SyncFileItem::FatalError) { // Abort all remaining jobs. propagator()->abort(); } }
void OpenSMTContext::staticCheckSAT( ) { if ( config.verbosity > 1 ) cerr << "# OpenSMTContext::Statically Checking" << endl; // Retrieve the formula Enode * formula = egraph.getUncheckedAssertions( ); if ( config.dump_formula != 0 ) egraph.dumpToFile( "original.smt2", formula ); if ( formula == NULL ) opensmt_error( "formula undefined" ); if ( config.logic == UNDEF ) opensmt_error( "unable to determine logic" ); // Removes ITEs if there is any if ( egraph.hasItes( ) ) { ExpandITEs expander( egraph, config ); formula = expander.doit( formula ); if ( config.dump_formula != 0 ) egraph.dumpToFile( "ite_expanded.smt2", formula ); } // Gather interface terms for DTC if ( ( config.logic == QF_UFIDL || config.logic == QF_UFLRA ) // Don't use with DTC of course && config.sat_lazy_dtc == 1 // Don't use when dumping interpolants && config.sat_dump_rnd_inter == 0 ) { Purify purifier( egraph, config ); purifier.doit( formula ); } // Ackermanize away functional symbols if ( ( config.logic == QF_UFIDL || config.logic == QF_UFLRA ) // Don't use with DTC of course && config.sat_lazy_dtc == 0 // Don't use when dumping interpolants && config.sat_dump_rnd_inter == 0 ) { Ackermanize ackermanizer( egraph, config ); formula = ackermanizer.doit( formula ); if ( config.dump_formula != 0 ) egraph.dumpToFile( "ackermanized.smt2", formula ); } // Artificially create a boolean // abstraction, if necessary if ( config.logic == QF_BV ) { BVBooleanize booleanizer( egraph, config ); formula = booleanizer.doit( formula ); } if ( config.dump_formula != 0 ) egraph.dumpToFile( "prepropagated.smt2", formula ); // Top-Level Propagator. It also canonize atoms TopLevelProp propagator( egraph, config ); // Only if sat_dump_rnd_inter is not set if ( config.sat_dump_rnd_inter == 0 ) formula = propagator.doit( formula ); if ( config.dump_formula != 0 ) egraph.dumpToFile( "propagated.smt2", formula ); AXDiffPreproc2 axdiffpreproc( egraph, sstore, config ); if ( config.logic == QF_AX || config.logic == QF_AXDIFF ) { formula = axdiffpreproc.doit( formula ); if ( config.dump_formula != 0 ) egraph.dumpToFile( "axdiffpreproc.smt2", formula ); } // Convert RDL into IDL, also compute if GMP is needed if ( config.logic == QF_RDL ) { DLRescale rescaler( egraph, config ); rescaler.doit( formula ); } // For static checking, make sure that if DTC is used // then incrementality is enabled if ( ( config.logic == QF_UFIDL || config.logic == QF_UFLRA ) && config.sat_lazy_dtc != 0 ) { config.incremental = 1; config.sat_polarity_mode = 4; } if ( config.dump_formula != 0 ) egraph.dumpToFile( "presolve.smt2", formula ); // Solve only if not simplified already if ( formula->isTrue( ) ) { state = l_True; } else if ( formula->isFalse( ) ) { state = l_False; } else { assert(egraph.isInitialized()); // Initialize theory solvers // egraph.initializeTheorySolvers( &solver ); // Compute polarities egraph.computePolarities( formula ); // CNFize the input formula and feed clauses to the solver state = cnfizer.cnfizeAndGiveToSolver( formula ); // Solve if ( state == l_Undef ) { state = solver.smtSolve( config.sat_preprocess_booleans != 0 || config.sat_preprocess_theory != 0 ); } // If computation has been stopped, return undef if ( opensmt::stop ) state = l_Undef; } }
void OpenSMTContext::staticCheckSATInterp( ) { assert( config.produce_inter != 0 ); // From now on coloring for new enodes // is automatically computed based on // the colors of the arguments. Unless // forced otherwise ... egraph.setAutomaticColoring( ); // Propagate ABcommon terms if // tagged as Alocal/Blocal egraph.maximizeColors( ); if ( config.verbosity > 1 ) cerr << "# OpenSMTContext::Statically Checking" << endl; if ( config.logic == UNDEF ) opensmt_error( "unable to determine logic" ); if ( config.logic == QF_UFIDL || config.logic == QF_UFLRA ) { if ( config.sat_lazy_dtc == 0 ) opensmt_warning( "Overriding option sat_lazy_dtc" ); config.sat_lazy_dtc = 1; config.incremental = 1; config.sat_polarity_mode = 4; } // Gather partitions vector< Enode * > assertions; for ( ;; ) { // Get partition Enode * formula = egraph.getNextAssertion( ); if ( formula == NULL ) break; assertions.push_back( formula ); } // Purifier for DTC if ( config.logic == QF_UFIDL || config.logic == QF_UFLRA ) { Purify purifier( egraph, config ); purifier.doit( assertions ); } // Ite expander ExpandITEs expander( egraph, config ); // Top-Level Propagator. It also canonize atoms TopLevelProp propagator( egraph, config ); // Initialize theory solvers egraph.initializeTheorySolvers( &solver ); // Initialize AXDIFF preprocessor AXDiffPreproc axdiffpreproc( egraph, sstore, config ); for ( size_t in = 0 ; in < assertions.size( ) ; in ++ ) { // Get formula Enode * formula = assertions[ in ]; // const ipartitions_t partition = SETBIT( in + 1 ); ipartitions_t partition = 0; setbit( partition, in + 1 ); assert( in != 0 || formula != NULL ); // Remove ites formula = expander.doit( formula ); // Canonize atoms formula = propagator.doit( formula ); // Preprocessing for AX if ( config.logic == QF_AX || config.logic == QF_AXDIFF ) { formula = axdiffpreproc.doit( formula, partition ); } // Some predicates may have been introduced // by the last steps. Color them if they are // not colored already egraph.finalizeColors( formula, partition ); if ( config.dump_formula != 0 ) { char buf[ 32 ]; sprintf( buf, "presolve_%ld.smt2", in + 1 ); egraph.dumpToFile( buf, formula ); } // Restore assertions[ in ] = formula; } // // Now give to solver // for ( size_t in = 0 ; in < assertions.size( ) ; in ++ ) { // const ipartitions_t partition = SETBIT( in + 1 ); ipartitions_t partition = 0; setbit( partition, in + 1 ); // Get partition Enode * formula = assertions[ in ]; // CNFize the input formula and feed clauses to the solver state = cnfizer.cnfizeAndGiveToSolver( formula, partition ); } // Solve if ( state == l_Undef ) { if ( config.sat_preprocess_booleans != 0 || config.sat_preprocess_theory != 0 ) opensmt_warning( "not using SMT-preprocessing with interpolation" ); state = solver.smtSolve( false ); } // If computation has been stopped, return undef if ( opensmt::stop ) state = l_Undef; }
void PropagateUploadFileNG::startNextChunk() { if (propagator()->_abortRequested.fetchAndAddRelaxed(0)) return; quint64 fileSize = _item->_size; ENFORCE(fileSize >= _sent, "Sent data exceeds file size"); // prevent situation that chunk size is bigger then required one to send _currentChunkSize = qMin(propagator()->_chunkSize, fileSize - _sent); if (_currentChunkSize == 0) { Q_ASSERT(_jobs.isEmpty()); // There should be no running job anymore _finished = true; // Finish with a MOVE QString destination = QDir::cleanPath(propagator()->account()->url().path() + QLatin1Char('/') + propagator()->account()->davPath() + propagator()->_remoteFolder + _item->_file); auto headers = PropagateUploadFileCommon::headers(); // "If-Match applies to the source, but we are interested in comparing the etag of the destination auto ifMatch = headers.take("If-Match"); if (!ifMatch.isEmpty()) { headers["If"] = "<" + destination.toUtf8() + "> ([" + ifMatch + "])"; } if (!_transmissionChecksumHeader.isEmpty()) { qCInfo(lcPropagateUpload) << destination << _transmissionChecksumHeader; headers[checkSumHeaderC] = _transmissionChecksumHeader; } headers["OC-Total-Length"] = QByteArray::number(fileSize); auto job = new MoveJob(propagator()->account(), Utility::concatUrlPath(chunkUrl(), "/.file"), destination, headers, this); _jobs.append(job); connect(job, &MoveJob::finishedSignal, this, &PropagateUploadFileNG::slotMoveJobFinished); connect(job, &QObject::destroyed, this, &PropagateUploadFileCommon::slotJobDestroyed); propagator()->_activeJobList.append(this); job->start(); return; } auto device = new UploadDevice(&propagator()->_bandwidthManager); const QString fileName = propagator()->getFilePath(_item->_file); if (!device->prepareAndOpen(fileName, _sent, _currentChunkSize)) { qCWarning(lcPropagateUpload) << "Could not prepare upload device: " << device->errorString(); // If the file is currently locked, we want to retry the sync // when it becomes available again. if (FileSystem::isFileLocked(fileName)) { emit propagator()->seenLockedFile(fileName); } // Soft error because this is likely caused by the user modifying his files while syncing abortWithError(SyncFileItem::SoftError, device->errorString()); return; } QMap<QByteArray, QByteArray> headers; headers["OC-Chunk-Offset"] = QByteArray::number(_sent); _sent += _currentChunkSize; QUrl url = chunkUrl(_currentChunk); // job takes ownership of device via a QScopedPointer. Job deletes itself when finishing PUTFileJob *job = new PUTFileJob(propagator()->account(), url, device, headers, _currentChunk, this); _jobs.append(job); connect(job, &PUTFileJob::finishedSignal, this, &PropagateUploadFileNG::slotPutFinished); connect(job, &PUTFileJob::uploadProgress, this, &PropagateUploadFileNG::slotUploadProgress); connect(job, &PUTFileJob::uploadProgress, device, &UploadDevice::slotJobUploadProgress); connect(job, &QObject::destroyed, this, &PropagateUploadFileCommon::slotJobDestroyed); job->start(); propagator()->_activeJobList.append(this); _currentChunk++; }
void PropagateRemoteMkdir::propfindError() { // ignore the PROPFIND error propagator()->_activeJobList.removeOne(this); done(SyncFileItem::Success); }
void PropagateUploadFileNG::slotPutFinished() { PUTFileJob *job = qobject_cast<PUTFileJob *>(sender()); ASSERT(job); slotJobDestroyed(job); // remove it from the _jobs list propagator()->_activeJobList.removeOne(this); if (_finished) { // We have sent the finished signal already. We don't need to handle any remaining jobs return; } QNetworkReply::NetworkError err = job->reply()->error(); if (err != QNetworkReply::NoError) { _item->_httpErrorCode = job->reply()->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(); commonErrorHandling(job); return; } ENFORCE(_sent <= _item->_size, "can't send more than size"); // Adjust the chunk size for the time taken. // // Dynamic chunk sizing is enabled if the server configured a // target duration for each chunk upload. double targetDuration = propagator()->syncOptions()._targetChunkUploadDuration; if (targetDuration > 0) { double uploadTime = job->msSinceStart() + 1; // add one to avoid div-by-zero auto predictedGoodSize = static_cast<quint64>( _currentChunkSize / uploadTime * targetDuration); // The whole targeting is heuristic. The predictedGoodSize will fluctuate // quite a bit because of external factors (like available bandwidth) // and internal factors (like number of parallel uploads). // // We use an exponential moving average here as a cheap way of smoothing // the chunk sizes a bit. quint64 targetSize = (propagator()->_chunkSize + predictedGoodSize) / 2; // Adjust the dynamic chunk size _chunkSize used for sizing of the item's chunks to be send propagator()->_chunkSize = qBound( propagator()->syncOptions()._minChunkSize, targetSize, propagator()->syncOptions()._maxChunkSize); qCInfo(lcPropagateUpload) << "Chunked upload of" << _currentChunkSize << "bytes took" << uploadTime << "ms, desired is" << targetDuration << "ms, expected good chunk size is" << predictedGoodSize << "bytes and nudged next chunk size to " << propagator()->_chunkSize << "bytes"; } bool finished = _sent == _item->_size; // Check if the file still exists const QString fullFilePath(propagator()->getFilePath(_item->_file)); if (!FileSystem::fileExists(fullFilePath)) { if (!finished) { abortWithError(SyncFileItem::SoftError, tr("The local file was removed during sync.")); return; } else { propagator()->_anotherSyncNeeded = true; } } // Check whether the file changed since discovery. if (!FileSystem::verifyFileUnchanged(fullFilePath, _item->_size, _item->_modtime)) { propagator()->_anotherSyncNeeded = true; if (!finished) { abortWithError(SyncFileItem::SoftError, tr("Local file changed during sync.")); return; } } if (!finished) { // Deletes an existing blacklist entry on successful chunk upload if (_item->_hasBlacklistEntry) { propagator()->_journal->wipeErrorBlacklistEntry(_item->_file); _item->_hasBlacklistEntry = false; } // Reset the error count on successful chunk upload auto uploadInfo = propagator()->_journal->getUploadInfo(_item->_file); uploadInfo._errorCount = 0; propagator()->_journal->setUploadInfo(_item->_file, uploadInfo); propagator()->_journal->commit("Upload info"); } startNextChunk(); }
void PropagateRemoteMove::start() { if (propagator()->_abortRequested.fetchAndAddRelaxed(0)) return; QString origin = propagator()->adjustRenamedPath(_item->_file); qCDebug(lcPropagateRemoteMove) << origin << _item->_renameTarget; QString targetFile(propagator()->getFilePath(_item->_renameTarget)); if (origin == _item->_renameTarget) { // The parent has been renamed already so there is nothing more to do. finalize(); return; } QString remoteSource = propagator()->_remoteFolder + origin; QString remoteDestination = QDir::cleanPath(propagator()->account()->davUrl().path() + propagator()->_remoteFolder + _item->_renameTarget); auto &vfs = propagator()->syncOptions()._vfs; auto itype = _item->_type; ASSERT(itype != ItemTypeVirtualFileDownload && itype != ItemTypeVirtualFileDehydration); if (vfs->mode() == Vfs::WithSuffix && itype != ItemTypeDirectory) { const auto suffix = vfs->fileSuffix(); bool sourceHadSuffix = remoteSource.endsWith(suffix); bool destinationHadSuffix = remoteDestination.endsWith(suffix); // Remote source and destination definitely shouldn't have the suffix if (sourceHadSuffix) remoteSource.chop(suffix.size()); if (destinationHadSuffix) remoteDestination.chop(suffix.size()); QString folderTarget = _item->_renameTarget; // Users can rename the file *and at the same time* add or remove the vfs // suffix. That's a complicated case where a remote rename plus a local hydration // change is requested. We don't currently deal with that. Instead, the rename // is propagated and the local vfs suffix change is reverted. // The discovery would still set up _renameTarget without the changed // suffix, since that's what must be propagated to the remote but the local // file may have a different name. folderTargetAlt will contain this potential // name. QString folderTargetAlt = folderTarget; if (itype == ItemTypeFile) { ASSERT(!sourceHadSuffix && !destinationHadSuffix); // If foo -> bar.owncloud, the rename target will be "bar" folderTargetAlt = folderTarget + suffix; } else if (itype == ItemTypeVirtualFile) { ASSERT(sourceHadSuffix && destinationHadSuffix); // If foo.owncloud -> bar, the rename target will be "bar.owncloud" folderTargetAlt.chop(suffix.size()); } QString localTarget = propagator()->getFilePath(folderTarget); QString localTargetAlt = propagator()->getFilePath(folderTargetAlt); // If the expected target doesn't exist but a file with different hydration // state does, rename the local file to bring it in line with what the discovery // has set up. if (!FileSystem::fileExists(localTarget) && FileSystem::fileExists(localTargetAlt)) { QString error; if (!FileSystem::uncheckedRenameReplace(localTargetAlt, localTarget, &error)) { done(SyncFileItem::NormalError, tr("Could not rename %1 to %2, error: %3") .arg(folderTargetAlt, folderTarget, error)); return; } qCInfo(lcPropagateRemoteMove) << "Suffix vfs required local rename of" << folderTargetAlt << "to" << folderTarget; } } qCDebug(lcPropagateRemoteMove) << remoteSource << remoteDestination; _job = new MoveJob(propagator()->account(), remoteSource, remoteDestination, this); connect(_job.data(), &MoveJob::finishedSignal, this, &PropagateRemoteMove::slotMoveJobFinished); propagator()->_activeJobList.append(this); _job->start(); }