예제 #1
0
void
TopicImpl::removeSubscribers(const Ice::IdentitySeq& ids)
{
    Ice::IdentitySeq removed;

    // First remove the subscriber from the subscribers list. Its
    // possible that some of these subscribers have already been
    // removed (consider, for example, a concurrent reap call from two
    // replicas on the same subscriber). To avoid sending unnecessary
    // observer updates keep track of the observers that are actually
    // removed.
    for(Ice::IdentitySeq::const_iterator id = ids.begin(); id != ids.end(); ++id)
    {
        vector<SubscriberPtr>::iterator p = find(_subscribers.begin(), _subscribers.end(), *id);
        if(p != _subscribers.end())
        {
            (*p)->destroy();
            _subscribers.erase(p);
            removed.push_back(*id);
        }
    }

    // If there is no further work to do we are done.
    if(removed.empty())
    {
        return;
    }

    // Next update the database and send the notification to any
    // slaves.
    LogUpdate llu;
    try
    {
        IceDB::ReadWriteTxn txn(_instance->dbEnv());

        for(Ice::IdentitySeq::const_iterator id = ids.begin(); id != ids.end(); ++id)
        {
            SubscriberRecordKey key;
            key.topic = _id;
            key.id = *id;

            _subscriberMap.del(txn, key);
        }

        llu = getIncrementedLLU(txn, _lluMap);

        txn.commit();
    }
    catch(const IceDB::LMDBException& ex)
    {
        halt(_instance->communicator(), ex);
    }

    _instance->observers()->removeSubscriber(llu, _name, ids);
}
예제 #2
0
void
TopicImpl::removeSubscribers(const Ice::IdentitySeq& ids)
{
    Ice::IdentitySeq removed;

    // First remove the subscriber from the subscribers list. Its
    // possible that some of these subscribers have already been
    // removed (consider, for example, a concurrent reap call from two
    // replicas on the same subscriber). To avoid sending unnecessary
    // observer updates keep track of the observers that are actually
    // removed.
    for(Ice::IdentitySeq::const_iterator id = ids.begin(); id != ids.end(); ++id)
    {
        vector<SubscriberPtr>::iterator p = find(_subscribers.begin(), _subscribers.end(), *id);
        if(p != _subscribers.end())
        {
            (*p)->destroy();
            _subscribers.erase(p);
            removed.push_back(*id);
        }
    }

    // If there is no further work to do we are done.
    if(removed.empty())
    {
        return;
    }

    // Next update the database and send the notification to any
    // slaves.
    LogUpdate llu;
    for(;;)
    {
        try
        {
            DatabaseConnectionPtr connection = _connectionPool->newConnection();
            TransactionHolder txn(connection);

            for(Ice::IdentitySeq::const_iterator id = ids.begin(); id != ids.end(); ++id)
            {
                SubscriberRecordKey key;
                key.topic = _id;
                key.id = *id;
                
                SubscribersWrapperPtr subscribersWrapper = _connectionPool->getSubscribers(connection);
                subscribersWrapper->erase(key);
            }

            LLUWrapperPtr lluWrapper = _connectionPool->getLLU(connection);
            llu = lluWrapper->get();
            llu.iteration++;
            lluWrapper->put(llu);

            txn.commit();
            break;
        }
        catch(const DeadlockException&)
        {
            continue;
        }
        catch(const DatabaseException& ex)
        {
            halt(_instance->communicator(), ex);
        }       
    }

    _instance->observers()->removeSubscriber(llu, _name, ids);
}
예제 #3
0
void
TopicImpl::publish(bool forwarded, const EventDataSeq& events)
{
    TopicInternalPrx masterInternal;
    Ice::Long generation = -1;
    Ice::IdentitySeq reap;
    {
        // Use cached reads.
        CachedReadHelper unlock(_instance->node(), __FILE__, __LINE__);

        //
        // Copy of the subscriber list so that event publishing can occur
        // in parallel.
        //
        vector<SubscriberPtr> copy;
        {
            IceUtil::Mutex::Lock sync(_subscribersMutex);
            if(_observer)
            {
                if(forwarded)
                {
                    _observer->forwarded();
                }
                else
                {
                    _observer->published();
                }
            }
            copy = _subscribers;
        }

        //
        // Queue each event, gathering a list of those subscribers that
        // must be reaped.
        //
        for(vector<SubscriberPtr>::const_iterator p = copy.begin(); p != copy.end(); ++p)
        {
            if(!(*p)->queue(forwarded, events) && (*p)->reap())
            {
                reap.push_back((*p)->id());
            }
        }

        // If there are no subscribers in error then we're done.
        if(reap.empty())
        {
            return;
        }
        if(!unlock.getMaster())
        {
            IceUtil::Mutex::Lock sync(_subscribersMutex);
            removeSubscribers(reap);
            return;
        }
        masterInternal = TopicInternalPrx::uncheckedCast(unlock.getMaster()->ice_identity(_id));
        generation = unlock.generation();
    }

    
    // Tell the master to reap this set of subscribers. This is an
    // AMI invocation so it shouldn't block the caller (in the
    // typical case) we do it outside of the mutex lock for
    // performance reasons.
    //
    // We must release the cached lock before calling this as the AMI
    // call may raise an exception in the caller (that is directly
    // call ice_exception) which calls recover() on the node which
    // would result in a deadlock since the node is locked.
    masterInternal->begin_reap(reap, newCallback_TopicInternal_reap(new TopicInternalReapCB(_instance, generation),
                                                                    &TopicInternalReapCB::exception));
}