TransportImpl::AcceptConnectResult MulticastTransport::accept_datalink(const RemoteTransport& remote, const ConnectionAttribs& attribs, TransportClient* client) { // Check that the remote reliability matches. if (get_remote_reliability(remote) != this->config_i_->is_reliable()) { return AcceptConnectResult(); } const MulticastPeer local_peer = RepoIdConverter(attribs.local_id_).participantId(); GuardThreadType guard_links(this->links_lock_); Links::const_iterator link_iter = this->server_links_.find(local_peer); MulticastDataLink_rch link; if (link_iter == this->server_links_.end()) { link = this->make_datalink(attribs.local_id_, attribs.priority_, false /*passive*/); this->server_links_[local_peer] = link; } else { link = link_iter->second; } guard_links.release(); MulticastPeer remote_peer = RepoIdConverter(remote.repo_id_).participantId(); GuardThreadType guard(this->connections_lock_); if (connections_.count(std::make_pair(remote_peer, local_peer))) { //can't call start session with connections_lock_ due to reactor //call in session->start which could deadlock with passive_connection guard.release(); VDBG((LM_DEBUG, "(%P|%t) MulticastTransport::accept_datalink found\n")); MulticastSession_rch session = this->start_session(link, remote_peer, false /*!active*/); if (session.is_nil()) { link = 0; } return AcceptConnectResult(link._retn()); } else { this->pending_connections_[std::make_pair(remote_peer, local_peer)]. push_back(std::pair<TransportClient*, RepoId>(client, remote.repo_id_)); //can't call start session with connections_lock_ due to reactor //call in session->start which could deadlock with passive_connection guard.release(); MulticastSession_rch session = this->start_session(link, remote_peer, false /*!active*/); return AcceptConnectResult(AcceptConnectResult::ACR_SUCCESS); } }
TransportImpl::AcceptConnectResult MulticastTransport::connect_datalink(const RemoteTransport& remote, const ConnectionAttribs& attribs, TransportClient*) { // Check that the remote reliability matches. if (get_remote_reliability(remote) != this->config_i_->is_reliable()) { return AcceptConnectResult(); } GuardThreadType guard_links(this->links_lock_); const MulticastPeer local_peer = RepoIdConverter(attribs.local_id_).participantId(); Links::const_iterator link_iter = this->client_links_.find(local_peer); MulticastDataLink_rch link; if (link_iter == this->client_links_.end()) { link = this->make_datalink(attribs.local_id_, attribs.priority_, true /*active*/); this->client_links_[local_peer] = link; } else { link = link_iter->second; } MulticastPeer remote_peer = RepoIdConverter(remote.repo_id_).participantId(); MulticastSession_rch session = this->start_session(link, remote_peer, true /*active*/); if (session.is_nil()) { return AcceptConnectResult(); } return AcceptConnectResult(link._retn()); }
MulticastSession* MulticastTransport::start_session(const MulticastDataLink_rch& link, MulticastPeer remote_peer, bool active) { if (link.is_nil()) { ACE_ERROR_RETURN((LM_ERROR, ACE_TEXT("(%P|%t) ERROR: ") ACE_TEXT("MulticastTransport[%C]::start_session: ") ACE_TEXT("link is nil\n"), this->config_i_->name().c_str()), 0); } MulticastSession_rch session = link->find_or_create_session(remote_peer); if (session.is_nil()) { ACE_ERROR_RETURN((LM_ERROR, ACE_TEXT("(%P|%t) ERROR: ") ACE_TEXT("MulticastTransport[%C]::start_session: ") ACE_TEXT("failed to create session for remote peer: 0x%x!\n"), this->config_i_->name().c_str(), remote_peer), 0); } if (!session->start(active)) { ACE_ERROR_RETURN((LM_ERROR, ACE_TEXT("(%P|%t) ERROR: ") ACE_TEXT("MulticastTransport[%C]::start_session: ") ACE_TEXT("failed to start session for remote peer: 0x%x!\n"), this->config_i_->name().c_str(), remote_peer), 0); } return session._retn(); }
MulticastSession* MulticastDataLink::find_or_create_session(MulticastPeer remote_peer) { ACE_GUARD_RETURN(ACE_SYNCH_RECURSIVE_MUTEX, guard, this->session_lock_, false); MulticastSessionMap::iterator it(this->sessions_.find(remote_peer)); if (it != this->sessions_.end()) return it->second.in(); // already exists MulticastSession_rch session = this->session_factory_->create(this, remote_peer); if (session.is_nil()) { ACE_ERROR_RETURN((LM_ERROR, ACE_TEXT("(%P|%t) ERROR: ") ACE_TEXT("MulticastDataLink::find_or_create_session: ") ACE_TEXT("failed to create session for remote peer: 0x%x!\n"), remote_peer), false); } std::pair<MulticastSessionMap::iterator, bool> pair = this->sessions_.insert( MulticastSessionMap::value_type(remote_peer, session)); if (pair.first == this->sessions_.end()) { ACE_ERROR_RETURN((LM_ERROR, ACE_TEXT("(%P|%t) ERROR: ") ACE_TEXT("MulticastDataLink::find_or_create_session: ") ACE_TEXT("failed to insert session for remote peer: 0x%x!\n"), remote_peer), false); } return session._retn(); }
DataLink* MulticastTransport::find_datalink_i(const RepoId& /*local_id*/, const RepoId& remote_id, const TransportBLOB& /*remote_data*/, CORBA::Long /*priority*/, bool active) { // To accommodate the one-to-many nature of multicast reservations, // a session layer is used to maintain state between unique pairs // of DomainParticipants over a single DataLink instance. Given // that TransportImpl instances may only be attached to either // Subscribers or Publishers within the same DomainParticipant, // it may be assumed that the local_id always references the same // participant. MulticastDataLink_rch link; if (active && !this->client_link_.is_nil()) { link = this->client_link_; } if (!active && !this->server_link_.is_nil()) { link = this->server_link_; } if (!link.is_nil()) { MulticastPeer remote_peer = RepoIdConverter(remote_id).participantId(); MulticastSession_rch session = link->find_session(remote_peer); if (session.is_nil()) { // From the framework's point-of-view, no DataLink was found. // This way we will progress to the connect/accept stage for handshaking. return 0; } if (!session->start(active)) { ACE_ERROR_RETURN((LM_ERROR, ACE_TEXT("(%P|%t) ERROR: ") ACE_TEXT("MulticastTransport[%C]::find_datalink_i: ") ACE_TEXT("failed to start session for remote peer: 0x%x!\n"), this->config_i_->name().c_str(), remote_peer), 0); } VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport[%C]::find_datalink_i " "started session for remote peer: 0x%x\n", this->config_i_->name().c_str(), remote_peer), 2); } return link._retn(); }
DataLink* MulticastTransport::connect_datalink_i(const RepoId& local_id, const RepoId& remote_id, const TransportBLOB& /*remote_data*/, CORBA::Long priority) { MulticastDataLink_rch link = this->client_link_; if (link.is_nil()) { link = this->make_datalink(local_id, remote_id, priority, true /*active*/); this->client_link_ = link; } MulticastPeer remote_peer = RepoIdConverter(remote_id).participantId(); MulticastSession_rch session = this->start_session(link, remote_peer, true /*active*/); if (session.is_nil()) { return 0; // already logged in start_session() } if (remote_peer == RepoIdConverter(local_id).participantId()) { VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport[%C]::connect_datalink_i " "loopback on peer: 0x%x, skipping wait_for_ack\n", this->config_i_->name().c_str(), remote_peer), 2); return link._retn(); } VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport[%C]::connect_datalink_i " "waiting for ack from: 0x%x\n", this->config_i_->name().c_str(), remote_peer), 2); if (session->wait_for_ack()) { VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport::connect_datalink_i " "done waiting for ack\n"), 2); return link._retn(); } VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport::connect_datalink_i " "wait for ack failed\n"), 2); return 0; }