void MulticastClientTransport::dropIdleTransports() { bringInNewTransports(); Lock lock(mClientTransportsMutex); bool needToRemove = false; ClientTransportList::iterator iter; for (iter = mClientTransports.begin(); iter != mClientTransports.end(); ++iter) { RCF::ClientTransport & transport = ***iter; RcfSessionWeakPtr rcfSessionWeakPtr = transport.getRcfSession(); if ( rcfSessionWeakPtr == RcfSessionWeakPtr() ) { // HTTP/HTTPS connections do not hold on to the RcfSession and can't receive pings. continue; } RcfSessionPtr rcfSessionPtr = rcfSessionWeakPtr.lock(); if (!rcfSessionPtr) { RCF_LOG_2() << "Dropping subscription. Subscriber has closed connection."; iter->reset(); needToRemove = true; } else { boost::uint32_t pingIntervalMs = rcfSessionPtr->getPingIntervalMs(); if (pingIntervalMs) { RCF::Timer pingTimer( rcfSessionPtr->getPingTimestamp() ); if (pingTimer.elapsed(5000 + 2*pingIntervalMs)) { std::string subscriberUrl = rcfSessionPtr->getClientAddress().string(); RCF_LOG_2()(subscriberUrl)(pingIntervalMs) << "Dropping subscription. Subscriber has not sent pings within the expected ping interval."; iter->reset(); needToRemove = true; } } } } if (needToRemove) { eraseRemove(mClientTransports, ClientTransportAutoPtrPtr()); } }
void SubscriptionService::harvestExpiredSubscriptions() { // Kill off subscriptions that haven't received any recent pings. Subscriptions subsToDrop; { Lock lock(mSubscriptionsMutex); Subscriptions::iterator iter; for (iter = mSubscriptions.begin(); iter != mSubscriptions.end(); ++iter) { SubscriptionPtr subPtr = iter->lock(); if (subPtr) { Subscription & sub = * subPtr; RecursiveLock lock(sub.mMutex); RcfSessionPtr sessionPtr = sub.mRcfSessionWeakPtr.lock(); if (!sessionPtr) { RCF_LOG_2()(sub.mPublisherUrl)(sub.mTopic) << "Dropping subscription. Publisher has closed connection."; subsToDrop.insert(*iter); } else if (sub.mPingsEnabled) { boost::uint32_t pingIntervalMs = sub.mPingIntervalMs; if (pingIntervalMs) { RCF::Timer pingTimer(sessionPtr->getPingTimestamp()); if (pingTimer.elapsed(5000 + 2*pingIntervalMs)) { RCF_LOG_2()(sub.mPublisherUrl)(sub.mTopic)(sub.mPingIntervalMs) << "Dropping subscription. Publisher has not sent pings."; subsToDrop.insert(*iter); } } } } } for (iter = subsToDrop.begin(); iter != subsToDrop.end(); ++iter) { mSubscriptions.erase(*iter); } } subsToDrop.clear(); }
void ClientStub::disconnect() { std::string endpoint; if (mEndpoint.get()) { endpoint = mEndpoint->asString(); } RcfClientPtr subRcfClientPtr = getSubRcfClientPtr(); setSubRcfClientPtr( RcfClientPtr() ); if (subRcfClientPtr) { subRcfClientPtr->getClientStub().disconnect(); subRcfClientPtr.reset(); } if (mTransport.get()) { RCF_LOG_2()(this)(endpoint) << "RcfClient - disconnecting from server."; mTransport->disconnect(mConnectTimeoutMs); mConnected = false; } if (mBatchBufferPtr) { mBatchBufferPtr->resize(0); } mAsyncCallback = boost::function0<void>(); }
TransportPair UnixLocalTransportFactory::createTransports() { std::string pipeName = generateNewPipeName(); RCF_LOG_2()(pipeName) << "Creating unix local socket transport pair"; return std::make_pair( ServerTransportPtr( new UnixLocalServerTransport(pipeName) ), ClientTransportAutoPtrPtr( new ClientTransportAutoPtr( new UnixLocalClientTransport(pipeName)))); }
void Win32NamedPipeServerTransport::onServerStart(RcfServer & server) { AsioServerTransport::onServerStart(server); mpIoService = mTaskEntries[0].getThreadPool().getIoService(); RCF_ASSERT(mAcceptorPtr.get() == NULL); if ( !mPipeName.empty() ) { startAccepting(); } RCF_LOG_2()(mPipeName) << "Win32NamedPipeServerTransport - listening on named pipe."; }
void UdpServerTransport::open() { RCF_LOG_4()(mIpAddress.string()) << "UdpServerTransport - creating server socket."; int mPort = mIpAddress.getPort(); // create and bind a socket for receiving UDP messages if (mFd == -1 && mPort >= 0) { int ret = 0; int err = 0; mIpAddress.resolve(); mFd = mIpAddress.createSocket(SOCK_DGRAM, IPPROTO_UDP); // enable reception of broadcast messages int enable = 1; ret = setsockopt(mFd, SOL_SOCKET, SO_BROADCAST, (char *) &enable, sizeof(enable)); err = Platform::OS::BsdSockets::GetLastError(); if (ret) { RCF_LOG_1()(ret)(err) << "setsockopt() - failed to set SO_BROADCAST on listening udp socket."; } // Share the address binding, if appropriate. if (mEnableSharedAddressBinding) { enable = 1; // Set SO_REUSEADDR socket option. ret = setsockopt(mFd, SOL_SOCKET, SO_REUSEADDR, (char *) &enable, sizeof(enable)); err = Platform::OS::BsdSockets::GetLastError(); if (ret) { RCF_LOG_1()(ret)(err) << "setsockopt() - failed to set SO_REUSEADDR on listening udp multicast socket."; } // On OS X and BSD variants, need to set SO_REUSEPORT as well. #if (defined(__MACH__) && defined(__APPLE__)) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) ret = setsockopt(mFd, SOL_SOCKET, SO_REUSEPORT, (char *) &enable, sizeof(enable)); err = Platform::OS::BsdSockets::GetLastError(); if (ret) { RCF_LOG_1()(ret)(err) << "setsockopt() - failed to set SO_REUSEPORT on listening udp multicast socket."; } #endif } sockaddr * pServerAddr = NULL; Platform::OS::BsdSockets::socklen_t serverAddrSize = 0; mIpAddress.getSockAddr(pServerAddr, serverAddrSize); // bind the socket ret = ::bind(mFd, pServerAddr, serverAddrSize); if (ret < 0) { err = Platform::OS::BsdSockets::GetLastError(); Exception e(_RcfError_Socket("bind()"), err, RcfSubsystem_Os); RCF_THROW(e)(mFd)(mIpAddress.string())(ret); } RCF_ASSERT_NEQ( mFd , -1 ); if (!mMulticastIpAddress.empty()) { // set socket option for receiving multicast messages mMulticastIpAddress.resolve(); // TODO: implement for IPv6. RCF_ASSERT( mIpAddress.getType() == IpAddress::V4 && mMulticastIpAddress.getType() == IpAddress::V4); std::string ip = mMulticastIpAddress.getIp(); ip_mreq imr; imr.imr_multiaddr.s_addr = inet_addr(ip.c_str()); imr.imr_interface.s_addr = INADDR_ANY; int ret = setsockopt(mFd,IPPROTO_IP, IP_ADD_MEMBERSHIP, (const char*) &imr, sizeof(imr)); int err = Platform::OS::BsdSockets::GetLastError(); RCF_VERIFY( ret == 0, Exception( _RcfError_Socket("setsockopt() with IPPROTO_IP/IP_ADD_MEMBERSHIP"), err, RcfSubsystem_Os)) (mMulticastIpAddress.string())(mIpAddress.string()); // TODO: enable source-filtered multicast messages //ip_mreq_source imr; //imr.imr_multiaddr.s_addr = inet_addr("232.5.6.7"); //imr.imr_sourceaddr.s_addr = INADDR_ANY;//inet_addr("10.1.1.2"); //imr.imr_interface.s_addr = INADDR_ANY; //int ret = setsockopt(mFd,IPPROTO_IP, IP_ADD_SOURCE_MEMBERSHIP, (const char*) &imr, sizeof(imr)); //int err = Platform::OS::BsdSockets::GetLastError(); } // set the socket to nonblocking mode Platform::OS::BsdSockets::setblocking(mFd, false); // retrieve the port number, if it's generated by the system if (mPort == 0) { IpAddress ip(mFd, mIpAddress.getType()); mPort = ip.getPort(); mIpAddress.setPort(mPort); } RCF_LOG_2() << "UdpServerTransport - listening on port " << mPort << "."; } }
void UdpServerTransport::cycle(int timeoutMs) { RCF::ThreadInfoPtr tiPtr = getTlsThreadInfoPtr(); RCF::ThreadPool & threadPool = tiPtr->getThreadPool(); if (threadPool.shouldStop()) { return; } // poll the UDP socket for messages, and read a message if one is available fd_set fdSet; FD_ZERO(&fdSet); FD_SET( static_cast<SOCKET>(mFd), &fdSet); timeval timeout; timeout.tv_sec = timeoutMs/1000; timeout.tv_usec = 1000*(timeoutMs%1000); int ret = Platform::OS::BsdSockets::select( mFd+1, &fdSet, NULL, NULL, timeoutMs < 0 ? NULL : &timeout); int err = Platform::OS::BsdSockets::GetLastError(); if (ret == 1) { SessionStatePtr sessionStatePtr = getTlsUdpSessionStatePtr(); if (sessionStatePtr.get() == NULL) { sessionStatePtr = SessionStatePtr(new SessionState(*this)); sessionStatePtr->mSessionPtr = getSessionManager().createSession(); sessionStatePtr->mSessionPtr->setSessionState(*sessionStatePtr); setTlsUdpSessionStatePtr(sessionStatePtr); } { // read a message ReallocBufferPtr &readVecPtr = sessionStatePtr->mReadVecPtr; if (readVecPtr.get() == NULL || !readVecPtr.unique()) { readVecPtr.reset( new ReallocBuffer()); } ReallocBuffer &buffer = *readVecPtr; SockAddrStorage from; int fromlen = sizeof(from); memset(&from, 0, sizeof(from)); buffer.resize(4); int len = Platform::OS::BsdSockets::recvfrom( mFd, &buffer[0], 4, MSG_PEEK, (sockaddr *) &from, &fromlen); err = Platform::OS::BsdSockets::GetLastError(); sessionStatePtr->mRemoteAddress.init( (sockaddr&) from, fromlen, mIpAddress.getType()); if (!isIpAllowed(sessionStatePtr->mRemoteAddress)) { RCF_LOG_2()(sessionStatePtr->mRemoteAddress.getIp()) << "Client IP does not match server's IP access rules. Closing connection."; discardPacket(mFd); } else if ( len == 4 || (len == -1 && err == Platform::OS::BsdSockets::ERR_EMSGSIZE)) { unsigned int dataLength = 0; memcpy(&dataLength, &buffer[0], 4); networkToMachineOrder(&dataLength, 4, 1); if (getMaxMessageLength() && dataLength > getMaxMessageLength()) { ByteBuffer byteBuffer; encodeServerError(getSessionManager(), byteBuffer, RcfError_ServerMessageLength); byteBuffer.expandIntoLeftMargin(4); * (boost::uint32_t *) ( byteBuffer.getPtr() ) = static_cast<boost::uint32_t>(byteBuffer.getLength()-4); RCF::machineToNetworkOrder(byteBuffer.getPtr(), 4, 1); char *buffer = byteBuffer.getPtr(); std::size_t bufferLen = byteBuffer.getLength(); sockaddr * pRemoteAddr = NULL; Platform::OS::BsdSockets::socklen_t remoteAddrSize = 0; sessionStatePtr->mRemoteAddress.getSockAddr(pRemoteAddr, remoteAddrSize); int len = sendto( mFd, buffer, static_cast<int>(bufferLen), 0, pRemoteAddr, remoteAddrSize); RCF_UNUSED_VARIABLE(len); discardPacket(mFd); } else { buffer.resize(4+dataLength); memset(&from, 0, sizeof(from)); fromlen = sizeof(from); len = Platform::OS::BsdSockets::recvfrom( mFd, &buffer[0], 4+dataLength, 0, (sockaddr *) &from, &fromlen); if (static_cast<unsigned int>(len) == 4+dataLength) { getSessionManager().onReadCompleted(sessionStatePtr->mSessionPtr); } } } else { discardPacket(mFd); } } } else if (ret == 0) { //RCF_LOG_4()(mFd)(mPort)(timeoutMs) << "UdpServerTransport - no messages received within polling interval."; } else if (ret == -1) { Exception e( _RcfError_Socket("select()"), err, RcfSubsystem_Os); RCF_THROW(e)(mFd)(mIpAddress.string())(err); } }
int MulticastClientTransport::send( ClientTransportCallback & clientStub, const std::vector<ByteBuffer> & data, unsigned int timeoutMs) { // NB: As the same buffer is sent on all transports, the transports and // filters should never modify the buffer. Any transport that transforms // data needs to do so in a separate per-transport buffer. RCF_UNUSED_VARIABLE(timeoutMs); RCF_LOG_2()(lengthByteBuffers(data))(timeoutMs) << "MulticastClientTransport::send() - entry."; mLastRequestSize = lengthByteBuffers(data); mRunningTotalBytesSent += mLastRequestSize; bringInNewTransports(); Lock lock(mClientTransportsMutex); std::size_t transportsInitial = mClientTransports.size(); PublishCompletionInfo info( mClientTransports.size() ); // Setup completion handlers. std::vector<PublishCompletionHandler> handlers( mClientTransports.size() ); for (std::size_t i=0; i<mClientTransports.size(); ++i) { ClientTransport * pTransport = (*mClientTransports[i]).get(); handlers[i] = PublishCompletionHandler(pTransport, &info); } // Async send on all transports. for (std::size_t i=0; i<handlers.size(); ++i) { try { handlers[i].mpClientTransport->setAsync(true); handlers[i].mpClientTransport->send(handlers[i], data, 0); } catch(const Exception &e) { Exception err( _RcfError_SyncPublishError(e.what()) ); handlers[i].onError(err); } } // Wait for async completions. boost::uint32_t completionDurationMs = 0; { Timer timer; info.wait(timeoutMs); completionDurationMs = timer.getDurationMs(); } // Cancel any outstanding sends. for (std::size_t i=0; i<handlers.size(); ++i) { if (!handlers[i].mCompleted) { (*mClientTransports[i])->cancel(); RCF_LOG_2()(i) << "MulticastClientTransport::send() - cancel send."; } } // Wait for canceled ops to complete. boost::uint32_t cancelDurationMs = 0; { Timer timer; info.wait(timeoutMs); cancelDurationMs = timer.getDurationMs(); } RCF_ASSERT(info.getCompletionCount() == handlers.size()); // Close and remove any subscriber transports with errors. std::size_t transportsRemoved = 0; for (std::size_t i=0; i<handlers.size(); ++i) { RCF_ASSERT(handlers[i].mCompleted); if (!handlers[i].mOk) { mClientTransports[i] = ClientTransportAutoPtrPtr(); ++transportsRemoved; RCF_LOG_2()(i)(handlers[i].mCompleted)(handlers[i].mOk)(handlers[i].mError) << "MulticastClientTransport::send() - remove subscriber transport."; } } eraseRemove(mClientTransports, ClientTransportAutoPtrPtr()); clientStub.onSendCompleted(); std::size_t transportsFinal = transportsInitial - transportsRemoved; RCF_LOG_2() (lengthByteBuffers(data))(completionDurationMs)(cancelDurationMs)(transportsInitial)(transportsFinal) << "MulticastClientTransport::send() - exit."; return 1; }