void ShareManager::applyAndClose() { // std::cerr << "ShareManager:::close(): updating!" << std::endl; updateFlags() ; close() ; }
boolean TempSensor::updateTemperatureValue() { boolean result = false; if (DS18B20->reset()) { isPresent = true; result = true; DS18B20->select(romCode); DS18B20->write(0xBE); // Command read scratchpad byte scratchpad[8]; for (int i = 0; i < 8; i++) { scratchpad[i] = DS18B20->read(); } unsigned int raw = (scratchpad[1] << 8) | scratchpad[0]; temperatureValue = (float)raw / 16.0; } else { isPresent = false; result = false; } isConvertCalled = false; updateFlags(); return result; }
boolean TempSensor::setResolution(Resolution& resolutionConfig) { boolean result = false; resolution = &resolutionConfig; if (DS18B20->reset()) { isPresent = true; result = true; DS18B20->select(romCode); DS18B20->write(0x4E); // Write 3 bytes to scratchpad DS18B20->write(0); // 1st byte - TL DS18B20->write(0); // 2nd byte - TH DS18B20->write(resolutionConfig.configByte); // 3rd byte - configuration register (defines resolution) DS18B20->write(0x48); // Copy scratchpad to EEPROM isResolutionConfigured = true; } else { isPresent = false; isResolutionConfigured = false; result = false; } updateFlags(); return result; }
void EventManager::removeDelegate(EventDelegate *target) { FZ_ASSERT(target != NULL, "Target argument must be non-NULL."); // INVALIDATE if(invalidateDelegate(target)) updateFlags(); }
Internal8085::Internal8085(QObject *parent) : QObject(parent), upAddressLimit(0x3000), lowAddressLimit(0x2000) { instructionTypeMapping(); byteCntMapping(); for(int address = lowAddressLimit;address < upAddressLimit;++address){ //qDebug() << address << "\n"; cells[address - lowAddressLimit].resize(8); } A.resize(8); B.resize(8); C.resize(8); D.resize(8); E.resize(8); F.resize(8); H.resize(8); L.resize(8); PC.resize(16); SP.resize(16); rIndexMap[tr("B")] = 0; rIndexMap[tr("C")] = 1; rIndexMap[tr("D")] = 2; rIndexMap[tr("E")] = 3; rIndexMap[tr("H")] = 4; rIndexMap[tr("L")] = 5; rIndexMap[tr("M")] = 6; rIndexMap[tr("A")] = 7; rpIndexMap[tr("B")] =0; rpIndexMap[tr("D")] =1; rpIndexMap[tr("H")] =2; rpIndexMap[tr("SP")] =3; rpsIndexMap[tr("B")] = 0; rpsIndexMap[tr("D")] = 1; rpwIndexMap[tr("B")] = 0; rpwIndexMap[tr("D")] = 1; rpwIndexMap[tr("H")] = 2; rpwIndexMap[tr("PSW")] = 3; regs[0] = &B; regs[1] = &C; regs[2] = &D; regs[3] = &E; regs[4] = &H; regs[5] = &L; regs[6] = NULL; regs[7] = &A; regs[8] = &temp; connect(this, SIGNAL(accumulatorChanged()), this, SLOT(updateFlags())); }
void EventManager::addDelegate(EventDelegate *target, uint16_t flags, fzInt priority, fzEventHandlerMode mode) { FZ_ASSERT(target != NULL, "Target argument must be non-NULL."); fzEventHandler *handler = getHandlerForTarget(target); if(handler) { handler->mode = mode; if(priority != handler->priority) { updateHandlerFlags(handler, 0); handler = NULL; }else if(flags != handler->flags) updateHandlerFlags(handler, flags); } if(handler == NULL && flags != 0) { fzEventHandler newHandle; newHandle.flags = flags; newHandle.mode = mode; newHandle.priority = priority; newHandle.delegate = target; m_handlers.insert(indexForPriority(priority), newHandle); } #if defined (FORZE_DEBUG) && FORZE_DEBUG > 0 uint16_t compatibility = checkCompatibility(flags); if((flags & kFZEventType_Tap) != kFZEventType_Tap) { if(compatibility & kFZEventType_Touch) FZLOGERROR("EventManager: Touch events are not available in this device."); if(compatibility & kFZEventType_Mouse) FZLOGERROR("EventManager: Mouse events are not available in this device."); } if(compatibility & kFZEventType_MouseRight) FZLOGERROR("EventManager: Mouse right events are not available in this device."); if(compatibility & kFZEventType_Keyboard) FZLOGERROR("EventManager: Keyboard events are not available in this device."); if(compatibility & kFZEventType_Trackpad) FZLOGERROR("EventManager: Trackpad events are not available in this device."); if(compatibility & kFZEventType_Accelerometer) FZLOGERROR("EventManager: Accelerometer events are not available in this device."); if(compatibility & kFZEventType_Gyro) FZLOGERROR("EventManager: Gyroscope events are not available in this device."); #endif updateFlags(); }
void MainWin::set_options(MainWinOptions::Settings settings) { ui.toolBar->setVisible(!settings.hide_toolbar); closeToTray = settings.close_to_tray; hideFrame = settings.hide_frame; toolWindow = settings.tool_window; setWindowOpacity(1 - (settings.trans_percent / 100.0)); roundCorners = (settings.round_corners && hideFrame); if(roundCorners && hideFrame) { QRegion r = roundRectRegion(0, 0, width(), height(), 6); setMask(r); } else clearMask(); onTop = settings.on_top; updateFlags(); }
void FeedModel::setAllAsUnread() { Settings *s = Settings::instance(); if (s->getSigninType() >= 10) { // setAllAsUnread not supported in API qWarning() << "Mark tab as unread is not supported!"; return; } DatabaseManager::Action action; int mode = s->getViewMode(); switch (mode) { case 0: // View mode: Tabs->Feeds->Entries _db->updateEntriesReadFlagByTab(_tabId,0); action.type = DatabaseManager::UnSetTabReadAll; action.id1 = _tabId; action.date1 = _db->readLastUpdateByTab(_tabId); break; case 1: // View mode: Tabs->Entries qWarning() << "Error: This should never happened"; return; case 2: // View mode: Feeds->Entries _db->updateEntriesReadFlagByDashboard(s->getDashboardInUse(),0); action.type = DatabaseManager::UnSetAllRead; action.id1 = s->getDashboardInUse(); action.date1 = _db->readLastUpdateByTab(_tabId); break; case 3: // View mode: Entries case 4: // View mode: Saved case 5: // View mode: Slow qWarning() << "Error: This should never happened"; return; } updateFlags(); _db->writeAction(action); }
boolean TempSensor::callConvert() { boolean result = false; if (DS18B20->reset()) { isPresent = true; result = true; DS18B20->select(romCode); DS18B20->write(0x44, 1); // Calling conversion command, no pull-up pulse for parasitic with parasite power mode isConvertCalled = true; convertCallTimeMillis = millis(); } else { isPresent = false; result = false; isConvertCalled = false; } updateFlags(); return result; }
boolean TempSensor::readRomCode() { boolean result = false; DS18B20->reset_search(); if (DS18B20->search(romCode) && // Checking if there's any 1-wire device on the network (OneWire::crc8(romCode, 7) == romCode[7]) && // Performing CRC check (8th byte of address ROM) (romCode[0] == 0x28)) // Checking if address belongs to DS18B20 (1st byte should be 0x28) { isPresent = true; result = true; } else { isPresent = false; result = false; } isConvertCalled = false; isConvertFinished = false; updateFlags(); return result; }
void syncFixUp(OperationContext* txn, FixUpInfo& fixUpInfo, OplogReader* oplogreader, ReplicationCoordinator* replCoord) { DBClientConnection* them = oplogreader->conn(); // fetch all first so we needn't handle interruption in a fancy way unsigned long long totalSize = 0; list< pair<DocID, BSONObj> > goodVersions; BSONObj newMinValid; // fetch all the goodVersions of each document from current primary DocID doc; unsigned long long numFetched = 0; try { for (set<DocID>::iterator it = fixUpInfo.toRefetch.begin(); it != fixUpInfo.toRefetch.end(); it++) { doc = *it; verify(!doc._id.eoo()); { // TODO : slow. lots of round trips. numFetched++; BSONObj good = them->findOne(doc.ns, doc._id.wrap(), NULL, QueryOption_SlaveOk).getOwned(); totalSize += good.objsize(); uassert(13410, "replSet too much data to roll back", totalSize < 300 * 1024 * 1024); // note good might be eoo, indicating we should delete it goodVersions.push_back(pair<DocID, BSONObj>(doc,good)); } } newMinValid = oplogreader->getLastOp(rsOplogName); if (newMinValid.isEmpty()) { error() << "rollback error newMinValid empty?"; return; } } catch (DBException& e) { LOG(1) << "rollback re-get objects: " << e.toString(); error() << "rollback couldn't re-get ns:" << doc.ns << " _id:" << doc._id << ' ' << numFetched << '/' << fixUpInfo.toRefetch.size(); throw e; } log() << "rollback 3.5"; if (fixUpInfo.rbid != getRBID(oplogreader->conn())) { // our source rolled back itself. so the data we received isn't necessarily consistent. warning() << "rollback rbid on source changed during rollback, cancelling this attempt"; return; } // update them log() << "rollback 4 n:" << goodVersions.size(); bool warn = false; invariant(!fixUpInfo.commonPointOurDiskloc.isNull()); invariant(txn->lockState()->isW()); // we have items we are writing that aren't from a point-in-time. thus best not to come // online until we get to that point in freshness. Timestamp minValid = newMinValid["ts"].timestamp(); log() << "minvalid=" << minValid.toStringLong(); setMinValid(txn, minValid); // any full collection resyncs required? if (!fixUpInfo.collectionsToResyncData.empty() || !fixUpInfo.collectionsToResyncMetadata.empty()) { for (const string& ns : fixUpInfo.collectionsToResyncData) { log() << "rollback 4.1.1 coll resync " << ns; fixUpInfo.collectionsToResyncMetadata.erase(ns); const NamespaceString nss(ns); Database* db = dbHolder().openDb(txn, nss.db().toString()); invariant(db); { WriteUnitOfWork wunit(txn); db->dropCollection(txn, ns); wunit.commit(); } { string errmsg; // This comes as a GlobalWrite lock, so there is no DB to be acquired after // resume, so we can skip the DB stability checks. Also // copyCollectionFromRemote will acquire its own database pointer, under the // appropriate locks, so just releasing and acquiring the lock is safe. invariant(txn->lockState()->isW()); Lock::TempRelease release(txn->lockState()); bool ok = copyCollectionFromRemote(txn, them->getServerAddress(), ns, errmsg); uassert(15909, str::stream() << "replSet rollback error resyncing collection " << ns << ' ' << errmsg, ok); } } for (const string& ns : fixUpInfo.collectionsToResyncMetadata) { log() << "rollback 4.1.2 coll metadata resync " << ns; const NamespaceString nss(ns); auto db = dbHolder().openDb(txn, nss.db().toString()); invariant(db); auto collection = db->getCollection(ns); invariant(collection); auto cce = collection->getCatalogEntry(); const std::list<BSONObj> info = them->getCollectionInfos(nss.db().toString(), BSON("name" << nss.coll())); if (info.empty()) { // Collection dropped by "them" so we should drop it too. log() << ns << " not found on remote host, dropping"; fixUpInfo.toDrop.insert(ns); continue; } invariant(info.size() == 1); CollectionOptions options; auto status = options.parse(info.front()); if (!status.isOK()) { throw RSFatalException(str::stream() << "Failed to parse options " << info.front() << ": " << status.toString()); } WriteUnitOfWork wuow(txn); if (options.flagsSet || cce->getCollectionOptions(txn).flagsSet) { cce->updateFlags(txn, options.flags); } status = collection->setValidator(txn, options.validator); if (!status.isOK()) { throw RSFatalException(str::stream() << "Failed to set validator: " << status.toString()); } wuow.commit(); } // we did more reading from primary, so check it again for a rollback (which would mess // us up), and make minValid newer. log() << "rollback 4.2"; string err; try { newMinValid = oplogreader->getLastOp(rsOplogName); if (newMinValid.isEmpty()) { err = "can't get minvalid from sync source"; } else { Timestamp minValid = newMinValid["ts"].timestamp(); log() << "minvalid=" << minValid.toStringLong(); setMinValid(txn, minValid); } } catch (DBException& e) { err = "can't get/set minvalid: "; err += e.what(); } if (fixUpInfo.rbid != getRBID(oplogreader->conn())) { // our source rolled back itself. so the data we received isn't necessarily // consistent. however, we've now done writes. thus we have a problem. err += "rbid at primary changed during resync/rollback"; } if (!err.empty()) { severe() << "rolling back : " << err << ". A full resync will be necessary."; // TODO: reset minvalid so that we are permanently in fatal state // TODO: don't be fatal, but rather, get all the data first. throw RSFatalException(); } log() << "rollback 4.3"; } map<string,shared_ptr<Helpers::RemoveSaver> > removeSavers; log() << "rollback 4.6"; // drop collections to drop before doing individual fixups - that might make things faster // below actually if there were subsequent inserts to rollback for (set<string>::iterator it = fixUpInfo.toDrop.begin(); it != fixUpInfo.toDrop.end(); it++) { log() << "rollback drop: " << *it; Database* db = dbHolder().get(txn, nsToDatabaseSubstring(*it)); if (db) { WriteUnitOfWork wunit(txn); shared_ptr<Helpers::RemoveSaver>& removeSaver = removeSavers[*it]; if (!removeSaver) removeSaver.reset(new Helpers::RemoveSaver("rollback", "", *it)); // perform a collection scan and write all documents in the collection to disk boost::scoped_ptr<PlanExecutor> exec( InternalPlanner::collectionScan(txn, *it, db->getCollection(*it))); BSONObj curObj; PlanExecutor::ExecState execState; while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, NULL))) { removeSaver->goingToDelete(curObj); } if (execState != PlanExecutor::IS_EOF) { if (execState == PlanExecutor::FAILURE && WorkingSetCommon::isValidStatusMemberObject(curObj)) { Status errorStatus = WorkingSetCommon::getMemberObjectStatus(curObj); severe() << "rolling back createCollection on " << *it << " failed with " << errorStatus << ". A full resync is necessary."; } else { severe() << "rolling back createCollection on " << *it << " failed. A full resync is necessary."; } throw RSFatalException(); } db->dropCollection(txn, *it); wunit.commit(); } } log() << "rollback 4.7"; OldClientContext ctx(txn, rsOplogName); Collection* oplogCollection = ctx.db()->getCollection(rsOplogName); uassert(13423, str::stream() << "replSet error in rollback can't find " << rsOplogName, oplogCollection); unsigned deletes = 0, updates = 0; time_t lastProgressUpdate = time(0); time_t progressUpdateGap = 10; for (list<pair<DocID, BSONObj> >::iterator it = goodVersions.begin(); it != goodVersions.end(); it++) { time_t now = time(0); if (now - lastProgressUpdate > progressUpdateGap) { log() << deletes << " delete and " << updates << " update operations processed out of " << goodVersions.size() << " total operations"; lastProgressUpdate = now; } const DocID& doc = it->first; BSONObj pattern = doc._id.wrap(); // { _id : ... } try { verify(doc.ns && *doc.ns); if (fixUpInfo.collectionsToResyncData.count(doc.ns)) { // we just synced this entire collection continue; } // keep an archive of items rolled back shared_ptr<Helpers::RemoveSaver>& removeSaver = removeSavers[doc.ns]; if (!removeSaver) removeSaver.reset(new Helpers::RemoveSaver("rollback", "", doc.ns)); // todo: lots of overhead in context, this can be faster OldClientContext ctx(txn, doc.ns); // Add the doc to our rollback file BSONObj obj; Collection* collection = ctx.db()->getCollection(doc.ns); // Do not log an error when undoing an insert on a no longer existent collection. // It is likely that the collection was dropped as part of rolling back a // createCollection command and regardless, the document no longer exists. if (collection) { bool found = Helpers::findOne(txn, collection, pattern, obj, false); if (found) { removeSaver->goingToDelete(obj); } else { error() << "rollback cannot find object: " << pattern << " in namespace " << doc.ns; } } if (it->second.isEmpty()) { // wasn't on the primary; delete. // TODO 1.6 : can't delete from a capped collection. need to handle that here. deletes++; if (collection) { if (collection->isCapped()) { // can't delete from a capped collection - so we truncate instead. if // this item must go, so must all successors!!! try { // TODO: IIRC cappedTruncateAfter does not handle completely empty. // this will crazy slow if no _id index. long long start = Listener::getElapsedTimeMillis(); RecordId loc = Helpers::findOne(txn, collection, pattern, false); if (Listener::getElapsedTimeMillis() - start > 200) warning() << "roll back slow no _id index for " << doc.ns << " perhaps?"; // would be faster but requires index: // RecordId loc = Helpers::findById(nsd, pattern); if (!loc.isNull()) { try { collection->temp_cappedTruncateAfter(txn, loc, true); } catch (DBException& e) { if (e.getCode() == 13415) { // hack: need to just make cappedTruncate do this... MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { WriteUnitOfWork wunit(txn); uassertStatusOK(collection->truncate(txn)); wunit.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( txn, "truncate", collection->ns().ns()); } else { throw e; } } } } catch (DBException& e) { error() << "rolling back capped collection rec " << doc.ns << ' ' << e.toString(); } } else {
//============================================================================== Error ParticleEmitterResource::load(const CString& filename) { U32 tmp; XmlDocument doc; ANKI_CHECK(doc.loadFile(filename, getTempAllocator())); XmlElement rel; // Root element ANKI_CHECK(doc.getChildElement("particleEmitter", rel)); // XML load // ANKI_CHECK(xmlF32(rel, "life", m_particle.m_life)); ANKI_CHECK(xmlF32(rel, "lifeDeviation", m_particle.m_lifeDeviation)); ANKI_CHECK(xmlF32(rel, "mass", m_particle.m_mass)); ANKI_CHECK(xmlF32(rel, "massDeviation", m_particle.m_massDeviation)); ANKI_CHECK(xmlF32(rel, "size", m_particle.m_size)); ANKI_CHECK(xmlF32(rel, "sizeDeviation", m_particle.m_sizeDeviation)); ANKI_CHECK(xmlF32(rel, "sizeAnimation", m_particle.m_sizeAnimation)); ANKI_CHECK(xmlF32(rel, "alpha", m_particle.m_alpha)); ANKI_CHECK(xmlF32(rel, "alphaDeviation", m_particle.m_alphaDeviation)); tmp = m_particle.m_alphaAnimation; ANKI_CHECK(xmlU32(rel, "alphaAnimationEnabled", tmp)); m_particle.m_alphaAnimation = tmp; ANKI_CHECK(xmlVec3(rel, "forceDirection", m_particle.m_forceDirection)); ANKI_CHECK(xmlVec3(rel, "forceDirectionDeviation", m_particle.m_forceDirectionDeviation)); ANKI_CHECK(xmlF32(rel, "forceMagnitude", m_particle.m_forceMagnitude)); ANKI_CHECK(xmlF32(rel, "forceMagnitudeDeviation", m_particle.m_forceMagnitudeDeviation)); ANKI_CHECK(xmlVec3(rel, "gravity", m_particle.m_gravity)); ANKI_CHECK(xmlVec3(rel, "gravityDeviation", m_particle.m_gravityDeviation)); ANKI_CHECK(xmlVec3(rel, "startingPosition", m_particle.m_startingPos)); ANKI_CHECK(xmlVec3(rel, "startingPositionDeviation", m_particle.m_startingPosDeviation)); ANKI_CHECK(xmlU32(rel, "maxNumberOfParticles", m_maxNumOfParticles)); ANKI_CHECK(xmlF32(rel, "emissionPeriod", m_emissionPeriod)); ANKI_CHECK(xmlU32(rel, "particlesPerEmittion", m_particlesPerEmittion)); tmp = m_usePhysicsEngine; ANKI_CHECK(xmlU32(rel, "usePhysicsEngine", tmp)); m_usePhysicsEngine = tmp; XmlElement el; CString cstr; ANKI_CHECK(rel.getChildElement("material", el)); ANKI_CHECK(el.getText(cstr)); ANKI_CHECK(m_material.load(cstr, &getManager())); // sanity checks // static const char* ERROR = "Particle emmiter: " "Incorrect or missing value %s"; if(m_particle.m_life <= 0.0) { ANKI_LOGE(ERROR, "life"); return ErrorCode::USER_DATA; } if(m_particle.m_life - m_particle.m_lifeDeviation <= 0.0) { ANKI_LOGE(ERROR, "lifeDeviation"); return ErrorCode::USER_DATA; } if(m_particle.m_size <= 0.0) { ANKI_LOGE(ERROR, "size"); return ErrorCode::USER_DATA; } if(m_maxNumOfParticles < 1) { ANKI_LOGE(ERROR, "maxNumOfParticles"); return ErrorCode::USER_DATA; } if(m_emissionPeriod <= 0.0) { ANKI_LOGE(ERROR, "emissionPeriod"); return ErrorCode::USER_DATA; } if(m_particlesPerEmittion < 1) { ANKI_LOGE(ERROR, "particlesPerEmission"); return ErrorCode::USER_DATA; } // Calc some stuff // updateFlags(); return ErrorCode::NONE; }
bool textSource::segment(int level ,int sstatus ,bool PrevIsField // True if previous sibling block contains a \field ,charprops CharProps ) { wint_t ch; curr_pos = Ftell(sourceFile);// After parsing a html-tag, seeking to curr_pos brings you back to the position where the parsed sequence started. if(Option.keepEOLsequence) { copyEOLsequence(); // SourceFile is rewinded } do { ch = Getc(sourceFile); end_offset = Ftell(sourceFile); if(curr_pos >= tagendpos) { // We are not inside an HTML-tag. if(flgs.inhtmltag) { flgs.firstafterhtmltag = true; flgs.inhtmltag = false; } // Check whether a well-formed HTML tag is ahead. Returns sourceFile // in same file position. flgs.htmltagcoming = isHTMLtagComing(ch); // assert(new_pos == Ftell(sourceFile)); assert(end_offset == Ftell(sourceFile)); } else if(flgs.htmltagcoming) { // We are leaving an HTML-tag and entering a new one. flgs.inhtmltag = true; flgs.htmltagcoming = false; } /* Scan in advance, checking whether the line to come is a heading and therefore must be preceded with a newline (WritePar will then be set to true.) */ if( ch == '\n' || ch == '\r' || ch == WEOF || ch == 26 ) { flgs.in_fileName = false; heading = isHeading(firsttext,ch,WriteParAfterHeadingOrField); if(!skipSegmentation(firsttext,ch)) { doTheSegmentation(CharProps,true,false); // Bart 20040120. true because: Suppose that end of line is end of segment if(!WriteParAfterHeadingOrField && heading) {// A normal line after a heading has WritePar==false and heading==true WriteParAfterHeadingOrField = true; heading = false; } } if(firsttext.EOL) firsttext.b.LS = 1; } else { updateFlags(ch,flgs); int EOL = firsttext.EOL; bool sentenceEnd = checkSentenceStartDueToBullet(ch); if( sentenceEnd || flgs.htmltagcoming || flgs.inhtmltag || (end_offset - begin_offset > MAXSEGMENTLENGTH && isSpace(ch)) // check for buffer overflow ) { doTheSegmentation(CharProps,false,false); firsttext.b.SD = 1; firsttext.b.LS = 0; } if(isSpace(ch)) { if(EOL) firsttext.b.LS = 1; } else { firsttext.b.LS = 0; firsttext.EOL = 0; // resets SD, CR and LF } } curr_pos = end_offset; } while(ch != WEOF && ch != 26); outputtext->PutHandlingLine('\n',flgs); // 20100106 Flush last line return false; }