bool Subscriber::queue(bool forwarded, const EventDataSeq& events) { IceUtil::Monitor<IceUtil::RecMutex>::Lock sync(_lock); // If this is a link subscriber if the set of events were // forwarded from another IceStorm instance then do not queue the // events. if(forwarded && _rec.link) { return true; } switch(_state) { case SubscriberStateOffline: { if(IceUtil::Time::now(IceUtil::Time::Monotonic) < _next) { break; } // // State transition to online. // setState(SubscriberStateOnline); // fall through } case SubscriberStateOnline: copy(events.begin(), events.end(), back_inserter(_events)); if(_observer) { _observer->queued(static_cast<Ice::Int>(events.size())); } flush(); break; case SubscriberStateError: return false; case SubscriberStateReaped: break; } return true; }
void SubscriberBatch::doFlush() { IceUtil::Monitor<IceUtil::RecMutex>::Lock sync(_lock); // // If the subscriber isn't online we're done. // if(_state != SubscriberStateOnline) { return; } EventDataSeq v; v.swap(_events); assert(!v.empty()); if(_observer) { _outstandingCount = static_cast<Ice::Int>(v.size()); _observer->outstanding(_outstandingCount); } try { vector<Ice::Byte> dummy; for(EventDataSeq::const_iterator p = v.begin(); p != v.end(); ++p) { _obj->ice_invoke((*p)->op, (*p)->mode, (*p)->data, dummy, (*p)->context); } Ice::AsyncResultPtr result = _obj->begin_ice_flushBatchRequests( Ice::newCallback_Object_ice_flushBatchRequests(this, &SubscriberBatch::exception, &SubscriberBatch::sent)); if(result->sentSynchronously()) { --_outstanding; assert(_outstanding == 0); if(_observer) { _observer->delivered(_outstandingCount); } } } catch(const Ice::Exception& ex) { error(false, ex); return; } if(_events.empty() && _outstanding == 0 && _shutdown) { _lock.notify(); } // This is significantly faster than the async version, but it can // block the calling thread. Bad news! //_obj->ice_flushBatchRequests(); }