void ConnectRequestHandler::setException(const Ice::LocalException& ex) { Lock sync(*this); assert(!_initialized && !_exception.get()); assert(_updateRequestHandler || _requests.empty()); _exception.reset(ex.ice_clone()); _proxy = 0; // Break cyclic reference count. _delegate = 0; // Break cyclic reference count. // // If some requests were queued, we notify them of the failure. This is done from a thread // from the client thread pool since this will result in ice_exception callbacks to be // called. // if(!_requests.empty()) { const InstancePtr instance = _reference->getInstance(); instance->clientThreadPool()->execute(new FlushRequestsWithException(instance, this, ex)); } notifyAll(); }
void ConnectRequestHandler::flushRequests() { { Lock sync(*this); assert(_connection && !_initialized); while(_batchRequestInProgress) { wait(); } // // We set the _flushing flag to true to prevent any additional queuing. Callers // might block for a little while as the queued requests are being sent but this // shouldn't be an issue as the request sends are non-blocking. // _flushing = true; } vector<OutgoingAsyncMessageCallbackPtr> sentCallbacks; try { while(!_requests.empty()) // _requests is immutable when _flushing = true { Request& req = _requests.front(); if(req.out) { if(_connection->sendAsyncRequest(req.out, _compress, _response) & AsyncStatusInvokeSentCallback) { sentCallbacks.push_back(req.out); } } else if(req.batchOut) { if(_connection->flushAsyncBatchRequests(req.batchOut) & AsyncStatusInvokeSentCallback) { sentCallbacks.push_back(req.batchOut); } } else { BasicStream os(req.os->instance(), Ice::currentProtocolEncoding); _connection->prepareBatchRequest(&os); try { const Ice::Byte* bytes; req.os->i = req.os->b.begin(); req.os->readBlob(bytes, req.os->b.size()); os.writeBlob(bytes, req.os->b.size()); } catch(const Ice::LocalException&) { _connection->abortBatchRequest(); throw; } _connection->finishBatchRequest(&os, _compress); delete req.os; } _requests.pop_front(); } } catch(const LocalExceptionWrapper& ex) { Lock sync(*this); assert(!_exception.get() && !_requests.empty()); _exception.reset(ex.get()->ice_clone()); const InstancePtr instance = _reference->getInstance(); instance->clientThreadPool()->execute(new FlushRequestsWithExceptionWrapper(instance, this, ex)); } catch(const Ice::LocalException& ex) { Lock sync(*this); assert(!_exception.get() && !_requests.empty()); _exception.reset(ex.ice_clone()); const InstancePtr instance = _reference->getInstance(); instance->clientThreadPool()->execute(new FlushRequestsWithException(instance, this, ex)); } if(!sentCallbacks.empty()) { const InstancePtr instance = _reference->getInstance(); instance->clientThreadPool()->execute(new FlushSentRequests(instance, sentCallbacks)); } // // We've finished sending the queued requests and the request handler now send // the requests over the connection directly. It's time to substitute the // request handler of the proxy with the more efficient connection request // handler which does not have any synchronization. This also breaks the cyclic // reference count with the proxy. // if(_updateRequestHandler && !_exception.get()) { _proxy->__setRequestHandler(_delegate, new ConnectionRequestHandler(_reference, _connection, _compress)); } { Lock sync(*this); assert(!_initialized); if(!_exception.get()) { _initialized = true; _flushing = false; } _proxy = 0; // Break cyclic reference count. _delegate = 0; // Break cyclic reference count. notifyAll(); } }