void TopicManagerImpl::getContent(LogUpdate& llu, TopicContentSeq& content) { { Lock sync(*this); reap(); } DatabaseConnectionPtr connection = _connectionPool->newConnection(); for(;;) { try { content.clear(); for(map<string, TopicImplPtr>::const_iterator p = _topics.begin(); p != _topics.end(); ++p) { TopicContent rec = p->second->getContent(); content.push_back(rec); } LLUWrapperPtr lluWrapper = _connectionPool->getLLU(connection); llu = lluWrapper->get(); break; } catch(const DeadlockException&) { continue; } catch(const DatabaseException& ex) { halt(_instance->communicator(), ex); } } }
void TopicManagerImpl::getContent(LogUpdate& llu, TopicContentSeq& content) { { Lock sync(*this); reap(); } try { content.clear(); for(map<string, TopicImplPtr>::const_iterator p = _topics.begin(); p != _topics.end(); ++p) { TopicContent rec = p->second->getContent(); content.push_back(rec); } IceDB::ReadOnlyTxn txn(_instance->dbEnv()); _lluMap.get(txn, lluDbKey, llu); } catch(const IceDB::LMDBException& ex) { logError(_instance->communicator(), ex); throw; // will become UnknownException in caller } }
void TopicManagerImpl::initMaster(const set<GroupNodeInfo>& slaves, const LogUpdate& llu) { Lock sync(*this); reap(); TopicContentSeq content; // Update the database llu. This prevents the following case: // // Three replicas 1, 2, 3. 3 is the master. It accepts a change // (say A=10, old value 9), writes to disk and then crashes. Now 2 // becomes the master. The client can ask this master for A and it // returns 9. Now 3 comes back online, it has the last database // state, so it syncs this state with 1, 2. The client will now // magically get A==10. The solution here is when a new master is // elected and gets the latest database state it immediately // updates the llu stamp. // for(;;) { try { content.clear(); DatabaseConnectionPtr connection = _connectionPool->newConnection(); TransactionHolder txn(connection); for(map<string, TopicImplPtr>::const_iterator p = _topics.begin(); p != _topics.end(); ++p) { TopicContent rec = p->second->getContent(); content.push_back(rec); } LLUWrapperPtr lluWrapper = _connectionPool->getLLU(connection); lluWrapper->put(llu); txn.commit(); break; } catch(const DeadlockException&) { continue; } catch(const DatabaseException& ex) { halt(_instance->communicator(), ex); } } // Now initialize the observers. _instance->observers()->init(slaves, llu, content); }
void TopicManagerImpl::initMaster(const set<GroupNodeInfo>& slaves, const LogUpdate& llu) { Lock sync(*this); reap(); TopicContentSeq content; // Update the database llu. This prevents the following case: // // Three replicas 1, 2, 3. 3 is the master. It accepts a change // (say A=10, old value 9), writes to disk and then crashes. Now 2 // becomes the master. The client can ask this master for A and it // returns 9. Now 3 comes back online, it has the last database // state, so it syncs this state with 1, 2. The client will now // magically get A==10. The solution here is when a new master is // elected and gets the latest database state it immediately // updates the llu stamp. // try { content.clear(); IceDB::ReadWriteTxn txn(_instance->dbEnv()); for(map<string, TopicImplPtr>::const_iterator p = _topics.begin(); p != _topics.end(); ++p) { TopicContent rec = p->second->getContent(); content.push_back(rec); } _lluMap.put(txn, lluDbKey, llu); txn.commit(); } catch(const IceDB::LMDBException& ex) { logError(_instance->communicator(), ex); throw; // will become UnknownException in caller } // Now initialize the observers. _instance->observers()->init(slaves, llu, content); }