void TopicManagerImpl::getContent(LogUpdate& llu, TopicContentSeq& content) { { Lock sync(*this); reap(); } DatabaseConnectionPtr connection = _connectionPool->newConnection(); for(;;) { try { content.clear(); for(map<string, TopicImplPtr>::const_iterator p = _topics.begin(); p != _topics.end(); ++p) { TopicContent rec = p->second->getContent(); content.push_back(rec); } LLUWrapperPtr lluWrapper = _connectionPool->getLLU(connection); llu = lluWrapper->get(); break; } catch(const DeadlockException&) { continue; } catch(const DatabaseException& ex) { halt(_instance->communicator(), ex); } } }
void TopicManagerImpl::getContent(LogUpdate& llu, TopicContentSeq& content) { { Lock sync(*this); reap(); } try { content.clear(); for(map<string, TopicImplPtr>::const_iterator p = _topics.begin(); p != _topics.end(); ++p) { TopicContent rec = p->second->getContent(); content.push_back(rec); } IceDB::ReadOnlyTxn txn(_instance->dbEnv()); _lluMap.get(txn, lluDbKey, llu); } catch(const IceDB::LMDBException& ex) { logError(_instance->communicator(), ex); throw; // will become UnknownException in caller } }
void TopicManagerImpl::initMaster(const set<GroupNodeInfo>& slaves, const LogUpdate& llu) { Lock sync(*this); reap(); TopicContentSeq content; // Update the database llu. This prevents the following case: // // Three replicas 1, 2, 3. 3 is the master. It accepts a change // (say A=10, old value 9), writes to disk and then crashes. Now 2 // becomes the master. The client can ask this master for A and it // returns 9. Now 3 comes back online, it has the last database // state, so it syncs this state with 1, 2. The client will now // magically get A==10. The solution here is when a new master is // elected and gets the latest database state it immediately // updates the llu stamp. // for(;;) { try { content.clear(); DatabaseConnectionPtr connection = _connectionPool->newConnection(); TransactionHolder txn(connection); for(map<string, TopicImplPtr>::const_iterator p = _topics.begin(); p != _topics.end(); ++p) { TopicContent rec = p->second->getContent(); content.push_back(rec); } LLUWrapperPtr lluWrapper = _connectionPool->getLLU(connection); lluWrapper->put(llu); txn.commit(); break; } catch(const DeadlockException&) { continue; } catch(const DatabaseException& ex) { halt(_instance->communicator(), ex); } } // Now initialize the observers. _instance->observers()->init(slaves, llu, content); }
void TopicManagerImpl::initMaster(const set<GroupNodeInfo>& slaves, const LogUpdate& llu) { Lock sync(*this); reap(); TopicContentSeq content; // Update the database llu. This prevents the following case: // // Three replicas 1, 2, 3. 3 is the master. It accepts a change // (say A=10, old value 9), writes to disk and then crashes. Now 2 // becomes the master. The client can ask this master for A and it // returns 9. Now 3 comes back online, it has the last database // state, so it syncs this state with 1, 2. The client will now // magically get A==10. The solution here is when a new master is // elected and gets the latest database state it immediately // updates the llu stamp. // try { content.clear(); IceDB::ReadWriteTxn txn(_instance->dbEnv()); for(map<string, TopicImplPtr>::const_iterator p = _topics.begin(); p != _topics.end(); ++p) { TopicContent rec = p->second->getContent(); content.push_back(rec); } _lluMap.put(txn, lluDbKey, llu); txn.commit(); } catch(const IceDB::LMDBException& ex) { logError(_instance->communicator(), ex); throw; // will become UnknownException in caller } // Now initialize the observers. _instance->observers()->init(slaves, llu, content); }
void TopicManagerImpl::observerInit(const LogUpdate& llu, const TopicContentSeq& content) { Lock sync(*this); TraceLevelsPtr traceLevels = _instance->traceLevels(); if(traceLevels->topicMgr > 0) { Ice::Trace out(traceLevels->logger, traceLevels->topicMgrCat); out << "init"; for(TopicContentSeq::const_iterator p = content.begin(); p != content.end(); ++p) { out << " topic: " << _instance->communicator()->identityToString(p->id) << " subscribers: "; for(SubscriberRecordSeq::const_iterator q = p->records.begin(); q != p->records.end(); ++q) { if(q != p->records.begin()) { out << ","; } out << _instance->communicator()->identityToString(q->id); if(traceLevels->topicMgr > 1) { out << " endpoints: " << IceStormInternal::describeEndpoints(q->obj); } } } } // First we update the database state, and then we update our // internal state. for(;;) { try { DatabaseConnectionPtr connection = _connectionPool->newConnection(); TransactionHolder txn(connection); LLUWrapperPtr lluWrapper = _connectionPool->getLLU(connection); lluWrapper->put(llu); SubscribersWrapperPtr subscribersWrapper = _connectionPool->getSubscribers(connection); subscribersWrapper->clear(); for(TopicContentSeq::const_iterator p = content.begin(); p != content.end(); ++p) { SubscriberRecordKey key; key.topic = p->id; SubscriberRecord rec; rec.link = false; rec.cost = 0; subscribersWrapper->put(key, rec); for(SubscriberRecordSeq::const_iterator q = p->records.begin(); q != p->records.end(); ++q) { SubscriberRecordKey key; key.topic = p->id; key.id = q->id; subscribersWrapper->put(key, *q); } } txn.commit(); break; } catch(const DeadlockException&) { continue; } catch(const DatabaseException& ex) { halt(_instance->communicator(), ex); } } // We do this with two scans. The first runs through the topics // that we have and removes those not in the init list. The second // runs through the init list and either adds the ones that don't // exist, or updates those that do. map<string, TopicImplPtr>::iterator p = _topics.begin(); while(p != _topics.end()) { TopicContentSeq::const_iterator q; for(q = content.begin(); q != content.end(); ++q) { if(q->id == p->second->id()) { break; } } if(q == content.end()) { // Note that this destroy should not remove anything from // the database since we've already synced up the db // state. // // TODO: We could short circuit the database operations in // the topic by calling a third form of destroy. p->second->observerDestroyTopic(llu); _topics.erase(p++); } else { ++p; } } // Now run through the contents updating the topics that do exist, // and creating those that do not. for(TopicContentSeq::const_iterator q = content.begin(); q != content.end(); ++q) { string name = identityToTopicName(q->id); map<string, TopicImplPtr>::const_iterator p = _topics.find(name); if(p == _topics.end()) { installTopic(name, q->id, true, q->records); } else { p->second->update(q->records); } } // Clear the set of observers. _instance->observers()->clear(); }
void TopicManagerImpl::observerInit(const LogUpdate& llu, const TopicContentSeq& content) { Lock sync(*this); TraceLevelsPtr traceLevels = _instance->traceLevels(); if(traceLevels->topicMgr > 0) { Ice::Trace out(traceLevels->logger, traceLevels->topicMgrCat); out << "init"; for(TopicContentSeq::const_iterator p = content.begin(); p != content.end(); ++p) { out << " topic: " << _instance->communicator()->identityToString(p->id) << " subscribers: "; for(SubscriberRecordSeq::const_iterator q = p->records.begin(); q != p->records.end(); ++q) { if(q != p->records.begin()) { out << ","; } out << _instance->communicator()->identityToString(q->id); if(traceLevels->topicMgr > 1) { out << " endpoints: " << IceStormInternal::describeEndpoints(q->obj); } } } } // First we update the database state, and then we update our // internal state. try { IceDB::ReadWriteTxn txn(_instance->dbEnv()); _lluMap.put(txn, lluDbKey, llu); _subscriberMap.clear(txn); for(TopicContentSeq::const_iterator p = content.begin(); p != content.end(); ++p) { SubscriberRecordKey key; key.topic = p->id; SubscriberRecord rec; rec.link = false; rec.cost = 0; _subscriberMap.put(txn, key, rec); for(SubscriberRecordSeq::const_iterator q = p->records.begin(); q != p->records.end(); ++q) { SubscriberRecordKey key; key.topic = p->id; key.id = q->id; _subscriberMap.put(txn, key, *q); } } txn.commit(); } catch(const IceDB::LMDBException& ex) { logError(_instance->communicator(), ex); throw; // will become UnknownException in caller } // We do this with two scans. The first runs through the topics // that we have and removes those not in the init list. The second // runs through the init list and either adds the ones that don't // exist, or updates those that do. map<string, TopicImplPtr>::iterator p = _topics.begin(); while(p != _topics.end()) { TopicContentSeq::const_iterator q; for(q = content.begin(); q != content.end(); ++q) { if(q->id == p->second->id()) { break; } } if(q == content.end()) { // Note that this destroy should not remove anything from // the database since we've already synced up the db // state. // // TODO: We could short circuit the database operations in // the topic by calling a third form of destroy. p->second->observerDestroyTopic(llu); _topics.erase(p++); } else { ++p; } } // Now run through the contents updating the topics that do exist, // and creating those that do not. for(TopicContentSeq::const_iterator q = content.begin(); q != content.end(); ++q) { string name = identityToTopicName(q->id); map<string, TopicImplPtr>::const_iterator p = _topics.find(name); if(p == _topics.end()) { installTopic(name, q->id, true, q->records); } else { p->second->update(q->records); } } // Clear the set of observers. _instance->observers()->clear(); }