예제 #1
0
/// This method is called by a TcpConnection object that has been
/// created and opened by our acceptor_ as a result of passively
/// accepting a connection on our local address.  Ultimately, the connection
/// object needs to be paired with a DataLink object that is (or will be)
/// expecting this passive connection to be established.
void
TcpTransport::passive_connection(const ACE_INET_Addr& remote_address,
                                 const TcpConnection_rch& connection)
{
  DBG_ENTRY_LVL("TcpTransport", "passive_connection", 6);

  const PriorityKey key(connection->transport_priority(),
                        remote_address,
                        remote_address == tcp_config_->local_address_,
                        connection->is_connector());

  VDBG_LVL((LM_DEBUG, ACE_TEXT("(%P|%t) TcpTransport::passive_connection() - ")
            ACE_TEXT("established with %C:%d.\n"),
            remote_address.get_host_name(),
            remote_address.get_port_number()), 2);

  GuardType connection_guard(connections_lock_);
  TcpDataLink_rch link;
  {
    GuardType guard(links_lock_);
    links_.find(key, link);
  }

  if (!link.is_nil()) {
    connection_guard.release();

    if (connect_tcp_datalink(link, connection) == -1) {
      VDBG_LVL((LM_ERROR,
                ACE_TEXT("(%P|%t) ERROR: connect_tcp_datalink failed\n")), 5);
      GuardType guard(links_lock_);
      links_.unbind(key);

    } else {
      con_checker_->add(connection);
    }

    return;
  }

  // If we reach this point, this link was not in links_, so the
  // accept_datalink() call hasn't happened yet.  Store in connections_ for the
  // accept_datalink() method to find.
  VDBG_LVL((LM_DEBUG, "(%P|%t) # of bef connections: %d\n", connections_.size()), 5);
  const ConnectionMap::iterator where = connections_.find(key);

  if (where != connections_.end()) {
    ACE_ERROR((LM_ERROR,
               ACE_TEXT("(%P|%t) ERROR: TcpTransport::passive_connection() - ")
               ACE_TEXT("connection with %C:%d at priority %d already exists, ")
               ACE_TEXT("overwriting previously established connection.\n"),
               remote_address.get_host_name(),
               remote_address.get_port_number(),
               connection->transport_priority()));
  }

  connections_[key] = connection;
  VDBG_LVL((LM_DEBUG, "(%P|%t) # of after connections: %d\n", connections_.size()), 5);

  con_checker_->add(connection);
}
예제 #2
0
void
MulticastTransport::passive_connection(MulticastPeer peer)
{
  ACE_GUARD(ACE_SYNCH_MUTEX, guard, this->connections_lock_);
  VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport[%C]::passive_connection "
                      "from peer 0x%x\n",
                      this->config_i_->name().c_str(), peer), 2);

  typedef std::multimap<ConnectionEvent*, MulticastPeer>::iterator iter_t;
  for (iter_t iter = this->pending_connections_.begin();
       iter != this->pending_connections_.end(); ++iter) {
    if (iter->second == peer) {
      DataLink_rch link = static_rchandle_cast<DataLink>(this->server_link_);
      VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport::passive_connection "
                          "completing accept\n"), 2);
      iter->first->complete(link);
      std::pair<iter_t, iter_t> range =
        this->pending_connections_.equal_range(iter->first);
      this->pending_connections_.erase(range.first, range.second);
      break;
    }
  }

  this->connections_.insert(peer);
}
예제 #3
0
/// Associate the new connection object with this datalink object.
/// The states of the "old" connection object are copied to the new
/// connection object and the "old" connection object is replaced by
/// the new connection object.
int
OpenDDS::DCPS::SimpleTcpDataLink::reconnect(SimpleTcpConnection* connection)
{
  DBG_ENTRY_LVL("SimpleTcpDataLink","reconnect",6);

  // Sanity check - the connection should exist already since we are reconnecting.
  if (this->connection_.is_nil()) {
    VDBG_LVL((LM_ERROR,
              "(%P|%t) ERROR: SimpleTcpDataLink::reconnect old connection is nil.\n")
             , 1);
    return -1;
  }

  this->connection_->transfer(connection);

  bool released = false;
  TransportReceiveStrategy_rch brs;
  TransportSendStrategy_rch bss;

  {
    GuardType guard2(this->strategy_lock_);

    if (this->receive_strategy_.is_nil() && this->send_strategy_.is_nil()) {
      released = true;
      this->connection_ = 0;

    } else {
      brs = this->receive_strategy_;
      bss = this->send_strategy_;
    }
  }

  if (released) {
    return this->transport_->connect_datalink(this, connection);
  }

  // Keep a "copy" of the reference to the connection object for ourselves.
  connection->_add_ref();
  this->connection_ = connection;

  SimpleTcpReceiveStrategy* rs
  = dynamic_cast <SimpleTcpReceiveStrategy*>(brs.in());

  SimpleTcpSendStrategy* ss
  = dynamic_cast <SimpleTcpSendStrategy*>(bss.in());

  // Associate the new connection object with the receiveing strategy and disassociate
  // the old connection object with the receiveing strategy.
  int rs_result = rs->reset(this->connection_.in());

  // Associate the new connection object with the sending strategy and disassociate
  // the old connection object with the sending strategy.
  int ss_result = ss->reset(this->connection_.in());

  if (rs_result == 0 && ss_result == 0) {
    return 0;
  }

  return -1;
}
예제 #4
0
DataLink*
MulticastTransport::accept_datalink(ConnectionEvent& ce)
{
  const std::string ttype = "multicast";
  const CORBA::ULong num_blobs = ce.remote_association_.remote_data_.length();
  const RepoId& remote_id = ce.remote_association_.remote_id_;
  MulticastPeer remote_peer = RepoIdConverter(remote_id).participantId();

  ACE_GUARD_RETURN(ACE_SYNCH_MUTEX, guard, this->connections_lock_, 0);

  for (CORBA::ULong idx = 0; idx < num_blobs; ++idx) {
    if (ce.remote_association_.remote_data_[idx].transport_type.in() == ttype) {

      MulticastDataLink_rch link = this->server_link_;
      if (link.is_nil()) {
        link = this->make_datalink(ce.local_id_, remote_id,
                                   ce.priority_, false /*!active*/);
        this->server_link_ = link;
      }

      if (this->connections_.count(remote_peer)) {
        // remote_peer has already completed the handshake
        VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport[%C]::accept_datalink "
                  "peer 0x%x already completed handshake\n",
                  this->config_i_->name().c_str(), remote_peer), 2);
        return link._retn();
      }

      this->pending_connections_.insert(
        std::pair<ConnectionEvent* const, MulticastPeer>(&ce, remote_peer));

      guard.release(); // start_session() called without connections_lock_,
      // at this point we know we will return and not need the lock again.

      VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport[%C]::accept_datalink "
                "starting session for peer 0x%x\n",
                this->config_i_->name().c_str(), remote_peer), 2);

      MulticastSession_rch session = this->start_session(link, remote_peer,
                                                         false /*!active*/);
      // Can't return link to framework until handshaking is done, which will
      // result in a call to MulticastTransport::passive_connection().
      return 0;
    }
  }
  return 0;
}
예제 #5
0
/// Associate the new connection object with this datalink object.
/// The states of the "old" connection object are copied to the new
/// connection object and the "old" connection object is replaced by
/// the new connection object.
int
OpenDDS::DCPS::TcpDataLink::reconnect(TcpConnection* connection)
{
  DBG_ENTRY_LVL("TcpDataLink","reconnect",6);

  // Sanity check - the connection should exist already since we are reconnecting.
  if (this->connection_.is_nil()) {
    VDBG_LVL((LM_ERROR,
              "(%P|%t) ERROR: TcpDataLink::reconnect old connection is nil.\n")
             , 1);
    return -1;
  }

  this->connection_->transfer(connection);

  bool released = false;
  TransportStrategy_rch brs;
  TransportSendStrategy_rch bss;

  {
    GuardType guard2(this->strategy_lock_);

    if (this->receive_strategy_.is_nil() && this->send_strategy_.is_nil()) {
      released = true;
      this->connection_ = 0;

    } else {
      brs = this->receive_strategy_;
      bss = this->send_strategy_;
    }
  }

  TcpConnection_rch conn_rch(connection, false);

  if (released) {
    TcpDataLink_rch this_rch(this, false);
    return this->transport_->connect_tcp_datalink(this_rch, conn_rch);
  }

  this->connection_ = conn_rch._retn();

  TcpReceiveStrategy* rs = static_cast<TcpReceiveStrategy*>(brs.in());

  TcpSendStrategy* ss = static_cast<TcpSendStrategy*>(bss.in());

  // Associate the new connection object with the receiveing strategy and disassociate
  // the old connection object with the receiveing strategy.
  int rs_result = rs->reset(this->connection_.in());

  // Associate the new connection object with the sending strategy and disassociate
  // the old connection object with the sending strategy.
  int ss_result = ss->reset(this->connection_.in());

  if (rs_result == 0 && ss_result == 0) {
    return 0;
  }

  return -1;
}
예제 #6
0
MulticastDataLink*
MulticastTransport::make_datalink(const RepoId& local_id,
                                  const RepoId& remote_id,
                                  CORBA::Long priority,
                                  bool active)
{
  RcHandle<MulticastSessionFactory> session_factory;
  if (this->config_i_->reliable_) {
    ACE_NEW_RETURN(session_factory, ReliableSessionFactory, 0);
  } else {
    ACE_NEW_RETURN(session_factory, BestEffortSessionFactory, 0);
  }

  MulticastPeer local_peer = RepoIdConverter(local_id).participantId();
  MulticastPeer remote_peer = RepoIdConverter(remote_id).participantId();

  VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport[%C]::make_datalink "
            "peers: local 0x%x remote 0x%x, priority %d active %d\n",
            this->config_i_->name().c_str(), local_peer, remote_peer,
            priority, active), 2);

  MulticastDataLink_rch link;
  ACE_NEW_RETURN(link,
                 MulticastDataLink(this,
                                   session_factory.in(),
                                   local_peer,
                                   active),
                 0);

  // Configure link with transport configuration and reactor task:
  link->configure(this->config_i_.in(), reactor_task());

  // Assign send strategy:
  MulticastSendStrategy* send_strategy;
  ACE_NEW_RETURN(send_strategy, MulticastSendStrategy(link.in()), 0);
  link->send_strategy(send_strategy);

  // Assign receive strategy:
  MulticastReceiveStrategy* recv_strategy;
  ACE_NEW_RETURN(recv_strategy, MulticastReceiveStrategy(link.in()), 0);
  link->receive_strategy(recv_strategy);

  // Join multicast group:
  if (!link->join(this->config_i_->group_address_)) {
    ACE_TCHAR str[64];
    this->config_i_->group_address_.addr_to_string(str,
                                                   sizeof(str)/sizeof(str[0]));
    ACE_ERROR_RETURN((LM_ERROR,
                      ACE_TEXT("(%P|%t) ERROR: ")
                      ACE_TEXT("MulticastTransport::make_datalink: ")
                      ACE_TEXT("failed to join multicast group: %s!\n"),
                      str),
                     0);
  }
  return link._retn();
}
예제 #7
0
/// This gets invoked when a TransportClient::remove_associations()
/// call has been made.  Because this DataLink can be shared amongst
/// different TransportClient objects, and different threads could
/// be "managing" the different TransportClient objects, we need
/// to make sure that this release_reservations() works in conjunction
/// with a simultaneous call (in another thread) to one of this
/// DataLink's make_reservation() methods.
void
DataLink::release_reservations(RepoId remote_id, RepoId local_id,
                               DataLinkSetMap& released_locals)
{
  DBG_ENTRY_LVL("DataLink", "release_reservations", 6);

  if (DCPS_debug_level > 9) {
    GuidConverter local(local_id);
    GuidConverter remote(remote_id);
    ACE_DEBUG((LM_DEBUG,
               ACE_TEXT("(%P|%t) DataLink::release_reservations() - ")
               ACE_TEXT("releasing association local: %C ")
               ACE_TEXT("<--> with remote %C.\n"),
               OPENDDS_STRING(local).c_str(),
               OPENDDS_STRING(remote).c_str()));
  }

  //let the specific class release its reservations
  //done this way to prevent deadlock of holding pub_sub_maps_lock_
  //then obtaining a specific class lock in release_reservations_i
  //which reverses lock ordering of the active send logic of needing
  //the specific class lock before obtaining the over arching DataLink
  //pub_sub_maps_lock_
  this->release_reservations_i(remote_id, local_id);

  GuardType guard(this->pub_sub_maps_lock_);

  ReceiveListenerSet_rch& rls = assoc_by_remote_[remote_id];
  if (rls->size() == 1) {
    assoc_by_remote_.erase(remote_id);
    release_remote_i(remote_id);
  } else {
    rls->remove(local_id);
  }
  RepoIdSet& ris = assoc_by_local_[local_id];
  if (ris.size() == 1) {
    DataLinkSet_rch& links = released_locals[local_id];
    if (links.is_nil())
      links = new DataLinkSet;
    links->insert_link(this);
    {
      GuardType guard(this->released_assoc_by_local_lock_);
      released_assoc_by_local_[local_id].insert(remote_id);
    }
    assoc_by_local_.erase(local_id);
  } else {
    ris.erase(remote_id);
  }

  if (assoc_by_local_.empty()) {
    VDBG_LVL((LM_DEBUG,
              ACE_TEXT("(%P|%t) DataLink::release_reservations: ")
              ACE_TEXT("release_datalink due to no remaining pubs or subs.\n")), 5);
    this->impl_->release_datalink(this);
  }
}
예제 #8
0
//Called with links_lock_ held
bool
TcpTransport::find_datalink_i(const PriorityKey& key, TcpDataLink_rch& link,
                              TransportClient* client, const RepoId& remote_id)
{
  DBG_ENTRY_LVL("TcpTransport", "find_datalink_i", 6);

  if (links_.find(key, link) == 0 /*OK*/) {
    if (!link->add_on_start_callback(client, remote_id)) {
      VDBG_LVL((LM_DEBUG, ACE_TEXT("(%P|%t) TcpTransport::find_datalink_i ")
                ACE_TEXT("link[%@] found, already started.\n"), link.in()), 2);
      // Since the link was already started, we won't get an "on start"
      // callback, and the link is immediately usable.
      return true;
    }

    VDBG_LVL((LM_DEBUG, ACE_TEXT("(%P|%t) TcpTransport::find_datalink_i ")
              ACE_TEXT("link[%@] found, add to pending connections.\n"), link.in()), 2);
    add_pending_connection(client, link.in());
    link = 0; // don't return link to TransportClient
    return true;

  } else if (pending_release_links_.find(key, link) == 0 /*OK*/) {
    if (link->cancel_release()) {
      link->set_release_pending(false);

      if (pending_release_links_.unbind(key, link) == 0 /*OK*/
          && links_.bind(key, link) == 0 /*OK*/) {
        VDBG_LVL((LM_DEBUG, ACE_TEXT("(%P|%t) TcpTransport::find_datalink_i ")
                  ACE_TEXT("found link[%@] in pending release list, cancelled release and moved back to links_.\n"), link.in()), 2);
        return true;
      }
      VDBG_LVL((LM_DEBUG, ACE_TEXT("(%P|%t) TcpTransport::find_datalink_i ")
                ACE_TEXT("found link[%@] in pending release list but was unable to shift back to links_.\n"), link.in()), 2);
    } else {
      VDBG_LVL((LM_DEBUG, ACE_TEXT("(%P|%t) TcpTransport::find_datalink_i ")
                ACE_TEXT("found link[%@] in pending release list but was unable to cancel release.\n"), link.in()), 2);
    }
    link = 0; // don't return link to TransportClient
    return false;
  }

  return false;
}
예제 #9
0
DataLink*
MulticastTransport::connect_datalink_i(const RepoId& local_id,
                                       const RepoId& remote_id,
                                       const TransportBLOB& /*remote_data*/,
                                       CORBA::Long priority)
{
  MulticastDataLink_rch link = this->client_link_;
  if (link.is_nil()) {
    link = this->make_datalink(local_id, remote_id, priority, true /*active*/);
    this->client_link_ = link;
  }

  MulticastPeer remote_peer = RepoIdConverter(remote_id).participantId();

  MulticastSession_rch session =
    this->start_session(link, remote_peer, true /*active*/);
  if (session.is_nil()) {
    return 0; // already logged in start_session()
  }

  if (remote_peer == RepoIdConverter(local_id).participantId()) {
    VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport[%C]::connect_datalink_i "
              "loopback on peer: 0x%x, skipping wait_for_ack\n",
              this->config_i_->name().c_str(), remote_peer), 2);
    return link._retn();
  }

  VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport[%C]::connect_datalink_i "
            "waiting for ack from: 0x%x\n",
            this->config_i_->name().c_str(), remote_peer), 2);

  if (session->wait_for_ack()) {
    VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport::connect_datalink_i "
              "done waiting for ack\n"), 2);
    return link._retn();
  }

  VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport::connect_datalink_i "
            "wait for ack failed\n"), 2);
  return 0;
}
예제 #10
0
TransportImpl::AcceptConnectResult
ShmemTransport::connect_datalink(const RemoteTransport& remote,
                                 const ConnectionAttribs&,
                                 TransportClient*)
{
  const std::pair<std::string, std::string> key = blob_to_key(remote.blob_);
  if (key.first != hostname_) {
    return AcceptConnectResult();
  }
  GuardType guard(links_lock_);
  ShmemDataLinkMap::iterator iter = links_.find(key.second);
  if (iter != links_.end()) {
    ShmemDataLink_rch link = iter->second;
    VDBG_LVL((LM_DEBUG, ACE_TEXT("(%P|%t) ShmemTransport::connect_datalink ")
              ACE_TEXT("link found.\n")), 2);
    return AcceptConnectResult(link._retn());
  }
    VDBG_LVL((LM_DEBUG, ACE_TEXT("(%P|%t) ShmemTransport::connect_datalink ")
              ACE_TEXT("new link.\n")), 2);
  return AcceptConnectResult(add_datalink(key.second));
}
예제 #11
0
DataLink*
MulticastTransport::find_datalink_i(const RepoId& /*local_id*/,
                                    const RepoId& remote_id,
                                    const TransportBLOB& /*remote_data*/,
                                    CORBA::Long /*priority*/,
                                    bool active)
{
  // To accommodate the one-to-many nature of multicast reservations,
  // a session layer is used to maintain state between unique pairs
  // of DomainParticipants over a single DataLink instance. Given
  // that TransportImpl instances may only be attached to either
  // Subscribers or Publishers within the same DomainParticipant,
  // it may be assumed that the local_id always references the same
  // participant.
  MulticastDataLink_rch link;
  if (active && !this->client_link_.is_nil()) {
    link = this->client_link_;
  }

  if (!active && !this->server_link_.is_nil()) {
    link = this->server_link_;
  }

  if (!link.is_nil()) {

    MulticastPeer remote_peer = RepoIdConverter(remote_id).participantId();

    MulticastSession_rch session = link->find_session(remote_peer);

    if (session.is_nil()) {
      // From the framework's point-of-view, no DataLink was found.
      // This way we will progress to the connect/accept stage for handshaking.
      return 0;
    }

    if (!session->start(active)) {
      ACE_ERROR_RETURN((LM_ERROR,
                        ACE_TEXT("(%P|%t) ERROR: ")
                        ACE_TEXT("MulticastTransport[%C]::find_datalink_i: ")
                        ACE_TEXT("failed to start session for remote peer: 0x%x!\n"),
                        this->config_i_->name().c_str(), remote_peer),
                       0);
    }

    VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport[%C]::find_datalink_i "
              "started session for remote peer: 0x%x\n",
              this->config_i_->name().c_str(), remote_peer), 2);
  }

  return link._retn();
}
예제 #12
0
int
DataLink::handle_timeout(const ACE_Time_Value& /*tv*/, const void* /*arg*/)
{
  if (this->scheduled_to_stop_at_ != ACE_Time_Value::zero) {
    VDBG_LVL((LM_DEBUG, "(%P|%t) DataLink::handle_timeout called\n"), 4);
    this->impl_->unbind_link(this);

    if (assoc_by_remote_.empty() && assoc_by_local_.empty()) {
      this->stop();
    }
  }
  this->_remove_ref();
  return 0;
}
예제 #13
0
//Allows the passive side to detect that the active side is connecting again
//prior to discovery identifying the released datalink from the active side.
//The passive side still believes it has a connection to the remote, however,
//the connect has created a new link/connection, thus the passive side can try
//to reuse the existing structures but reset it to associate the datalink with
//this new connection.
int
OpenDDS::DCPS::TcpDataLink::reuse_existing_connection(const TcpConnection_rch& connection)
{
  DBG_ENTRY_LVL("TcpDataLink","reuse_existing_connection",6);

  if (this->is_active_) {
    return -1;
  }
  //Need to check if connection is nil.  If connection is not nil, then connection
  //has previously gone through connection phase so this is a reuse of the connection
  //proceed to determine if we can reuse/reset existing mechanisms or need to start from
  //scratch.
  if (!this->connection_.is_nil()) {
    VDBG_LVL((LM_DEBUG, "(%P|%t) TcpDataLink::reuse_existing_connection - "
                           "trying to reuse existing connection\n"), 0);
    this->connection_->transfer(connection.in());

    //Connection already exists.
    TransportStrategy_rch brs;
    TransportSendStrategy_rch bss;

    if (this->receive_strategy_.is_nil() && this->send_strategy_.is_nil()) {
      this->connection_ = 0;
      return -1;
    } else {
      brs = this->receive_strategy_;
      bss = this->send_strategy_;

      this->connection_ = connection;

      TcpReceiveStrategy* rs = static_cast<TcpReceiveStrategy*>(brs.in());

      TcpSendStrategy* ss = static_cast<TcpSendStrategy*>(bss.in());

      // Associate the new connection object with the receiving strategy and disassociate
      // the old connection object with the receiving strategy.
      int rs_result = rs->reset(this->connection_.in());

      // Associate the new connection object with the sending strategy and disassociate
      // the old connection object with the sending strategy.
      int ss_result = ss->reset(this->connection_.in(), true);

      if (rs_result == 0 && ss_result == 0) {
        return 0;
      }
    }
  }
  return -1;
}
예제 #14
0
void
TcpTransport::unbind_link(DataLink* link)
{
  TcpDataLink* tcp_link = static_cast<TcpDataLink*>(link);

  if (tcp_link == 0) {
    // Really an assertion failure
    ACE_ERROR((LM_ERROR,
               "(%P|%t) TcpTransport::unbind_link INTERNAL ERROR - "
               "Failed to downcast DataLink to TcpDataLink.\n"));
    return;
  }

  // Attempt to remove the TcpDataLink from our links_ map.
  PriorityKey key(
    tcp_link->transport_priority(),
    tcp_link->remote_address(),
    tcp_link->is_loopback(),
    tcp_link->is_active());

  VDBG_LVL((LM_DEBUG,
            "(%P|%t) TcpTransport::unbind_link link %@ PriorityKey "
            "prio=%d, addr=%C:%hu, is_loopback=%d, is_active=%d\n",
            link,
            tcp_link->transport_priority(),
            tcp_link->remote_address().get_host_addr(),
            tcp_link->remote_address().get_port_number(),
            (int)tcp_link->is_loopback(),
            (int)tcp_link->is_active()), 2);

  GuardType guard(this->links_lock_);

  if (this->pending_release_links_.unbind(key) != 0 &&
      link->datalink_release_delay() > ACE_Time_Value::zero) {
    ACE_ERROR((LM_ERROR,
               "(%P|%t) TcpTransport::unbind_link INTERNAL ERROR - "
               "Failed to find link %@ tcp_link %@ PriorityKey "
               "prio=%d, addr=%C:%hu, is_loopback=%d, is_active=%d\n",
               link,
               tcp_link,
               tcp_link->transport_priority(),
               tcp_link->remote_address().get_host_addr(),
               tcp_link->remote_address().get_port_number(),
               (int)tcp_link->is_loopback(),
               (int)tcp_link->is_active()));
  }
}
예제 #15
0
int
OpenDDS::DCPS::RepoIdSetMap::release_publisher(RepoId subscriber_id,
                                               RepoId publisher_id)
{
  DBG_ENTRY_LVL("RepoIdSetMap","release_publisher",6);
  RepoIdSet_rch id_set;

  if (OpenDDS::DCPS::find(map_, subscriber_id, id_set) != 0) {
    RepoIdConverter converter(subscriber_id);
    ACE_ERROR((LM_ERROR,
               ACE_TEXT("(%P|%t) ERROR: RepoIdSetMap::release_publisher: ")
               ACE_TEXT("subscriber_id %C not found in map.\n"),
               std::string(converter).c_str()));
    // Return 1 to indicate that the subscriber_id is no longer associated
    // with any publishers at all.
    return 1;
  }

  int result = id_set->remove_id(publisher_id);

  // Ignore the result
  ACE_UNUSED_ARG(result);

  VDBG_LVL((LM_DEBUG, "(%P|%t) RepoId size: %d.\n", id_set->size()), 5);
  // Return 1 if set is empty, 0 if not empty.
  //return (id_set->size() == 0) ? 1 : 0;

  if (id_set->size() == 0) {
    if (unbind(map_, subscriber_id) != 0) {
      RepoIdConverter converter(publisher_id);
      ACE_ERROR((LM_ERROR,
                 ACE_TEXT("(%P|%t) ERROR: RepoIdSetMap::release_publisher: ")
                 ACE_TEXT("failed to remove an empty ")
                 ACE_TEXT("ReceiveListenerSet for publisher_id %C.\n"),
                 std::string(converter).c_str()));
    }

    // We always return 1 if we know the publisher_id is no longer
    // associated with any ReceiveListeners.
    return 1;
  }

  // There are still ReceiveListeners associated with the publisher_id.
  // We return a 0 in this case.
  return 0;

}
예제 #16
0
int
DCPS_SimpleTcpLoader::init(int argc, ACE_TCHAR* argv[])
{
  DBG_ENTRY_LVL("DCPS_SimpleTcpLoader","init",6);

  static int initialized = 0;

  // Only allow initialization once.
  if (initialized)
    return 0;

  initialized = 1;

  // Parse any service configurator parameters.
  for (int curarg = 0; curarg < argc; curarg++)
    if (ACE_OS::strcasecmp(argv[curarg],
                           ACE_TEXT("-type")) == 0) {
      curarg++;

      if (curarg < argc) {
        ACE_TCHAR* type = argv[curarg];

        if (ACE_OS::strcasecmp(type, ACE_TEXT("SimpleTcp")) != 0) {
          ACE_ERROR_RETURN((LM_ERROR,
                            ACE_TEXT("ERROR: DCPS_SimpleTcpLoader: Unknown type ")
                            ACE_TEXT("<%s>.\n"), type),
                           -1);
        }

        OpenDDS::DCPS::SimpleTcpGenerator* generator;
        ACE_NEW_RETURN(generator,
                       OpenDDS::DCPS::SimpleTcpGenerator(),
                       -1);
        TheTransportFactory->register_generator(type,
                                                generator);
      }

    } else {
      VDBG_LVL((LM_ERROR,
                ACE_TEXT("DCPS_SimpleTcpLoader: Unknown option ")
                ACE_TEXT("<%s>.\n"),
                argv[curarg]), 1);
    }

  return 0;
}
예제 #17
0
bool
UdpReceiveStrategy::check_header(const TransportHeader& header)
{
  ReassemblyInfo& info = reassembly_[remote_address_];

  if (header.sequence_ != info.second &&
      expected_ != SequenceNumber::SEQUENCENUMBER_UNKNOWN()) {
    VDBG_LVL((LM_WARNING,
               ACE_TEXT("(%P|%t) WARNING: UdpReceiveStrategy::check_header ")
               ACE_TEXT("expected %q received %q\n"),
               info.second.getValue(), header.sequence_.getValue()), 2);
    SequenceRange range(info.second, header.sequence_.previous());
    info.first.data_unavailable(range);
  }

  info.second = header.sequence_;
  ++info.second;
  return true;
}
예제 #18
0
void
TcpTransport::stop_accepting_or_connecting(TransportClient* client,
                                           const RepoId& remote_id)
{
  GuidConverter remote_converted(remote_id);
  VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport::stop_accepting_or_connecting "
            "stop connecting to remote: %C\n",
            std::string(remote_converted).c_str()), 5);

  GuardType guard(connections_lock_);
  typedef std::multimap<TransportClient*, DataLink_rch>::iterator iter_t;
  const std::pair<iter_t, iter_t> range =
    pending_connections_.equal_range(client);

  for (iter_t iter = range.first; iter != range.second; ++iter) {
    iter->second->remove_on_start_callback(client, remote_id);
  }

  pending_connections_.erase(range.first, range.second);
}
예제 #19
0
bool
TcpTransport::connection_info_i(TransportLocator& local_info) const
{
  DBG_ENTRY_LVL("TcpTransport", "connection_info_i", 6);

  VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport public address str %C\n",
            this->tcp_config_->get_public_address().c_str()), 2);

  // Get the public address string from the inst (usually the local address)
  NetworkAddress network_order_address(this->tcp_config_->get_public_address());

  ACE_OutputCDR cdr;
  cdr << network_order_address;
  const CORBA::ULong len = static_cast<CORBA::ULong>(cdr.total_length());
  char* buffer = const_cast<char*>(cdr.buffer()); // safe

  local_info.transport_type = "tcp";
  local_info.data = TransportBLOB(len, len,
                                  reinterpret_cast<CORBA::Octet*>(buffer));
  return true;
}
예제 #20
0
void
MulticastTransport::passive_connection(MulticastPeer local_peer, MulticastPeer remote_peer)
{
  GuardThreadType guard(this->connections_lock_);

  VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastTransport[%C]::passive_connection "
            "from remote peer 0x%x to local peer 0x%x\n",
            this->config_i_->name().c_str(), remote_peer, local_peer), 2);

  const Peers peers(remote_peer, local_peer);
  const PendConnMap::iterator pend = this->pending_connections_.find(peers);

  if (pend != pending_connections_.end()) {

    Links::const_iterator server_link = this->server_links_.find(local_peer);
    DataLink_rch link;

    if (server_link != this->server_links_.end()) {
      link = static_rchandle_cast<DataLink>(server_link->second);
    }

    VDBG((LM_DEBUG, "(%P|%t) MulticastTransport::passive_connection completing\n"));
    PendConnMap::iterator updated_pend = pend;

    do {
      TransportClient* pend_client = updated_pend->second.front().first;
      RepoId remote_repo = updated_pend->second.front().second;

      guard.release();
      pend_client->use_datalink(remote_repo, link);

      guard.acquire();

    } while ((updated_pend = pending_connections_.find(peers)) != pending_connections_.end());
  }

  //if connection was pending, calls to use_datalink finalized the connection
  //if it was not previously pending, accept_datalink() will finalize connection
  this->connections_.insert(peers);
}
예제 #21
0
bool
BestEffortSession::check_header(const TransportHeader& header)
{
  if (header.sequence_ != this->expected_ &&
      expected_ != SequenceNumber::SEQUENCENUMBER_UNKNOWN()) {
    VDBG_LVL((LM_WARNING,
               ACE_TEXT("(%P|%t) WARNING: BestEffortSession::check_header ")
               ACE_TEXT("expected %q received %q\n"),
               this->expected_.getValue(), header.sequence_.getValue()), 2);
    if (header.sequence_ > this->expected_) {
      SequenceRange range(this->expected_, header.sequence_.previous());
      this->reassembly_.data_unavailable(range);
    }
  }

  this->expected_ = header.sequence_;
  ++this->expected_;

  // Assume header is valid; this does not prevent duplicate
  // delivery of datagrams:
  return true;
}
예제 #22
0
void
MulticastDataLink::syn_received_no_session(MulticastPeer source,
    ACE_Message_Block* data,
    bool swap_bytes)
{
  Serializer serializer_read(data, swap_bytes);

  MulticastPeer local_peer;
  serializer_read >> local_peer;

  if (local_peer != local_peer_) {
    return;
  }

  VDBG_LVL((LM_DEBUG, "(%P|%t) MulticastDataLink[%C]::syn_received_no_session "
      "send_synack local 0x%x remote 0x%x\n",
      config_->name().c_str(), local_peer, source), 2);

  ACE_Message_Block* synack_data = new ACE_Message_Block(sizeof(MulticastPeer));

  Serializer serializer_write(synack_data);
  serializer_write << source;

  DataSampleHeader header;
  ACE_Message_Block* control =
      create_control(MULTICAST_SYNACK, header, synack_data);

  const int error = send_control(header, control);
  if (error != SEND_CONTROL_OK) {
    ACE_ERROR((LM_ERROR, "(%P|%t) MulticastDataLink::syn_received_no_session: "
        "ERROR: send_control failed: %d!\n", error));
    return;
  }

  transport_->passive_connection(local_peer, source);
}
예제 #23
0
TransportImpl::AcceptConnectResult
TcpTransport::connect_datalink(const RemoteTransport& remote,
                               const ConnectionAttribs& attribs,
                               TransportClient* client)
{
  DBG_ENTRY_LVL("TcpTransport", "connect_datalink", 6);

  const PriorityKey key =
    blob_to_key(remote.blob_, attribs.priority_, true /*active*/);

  VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport::connect_datalink PriorityKey "
            "prio=%d, addr=%C:%hu, is_loopback=%d, is_active=%d\n",
            key.priority(), key.address().get_host_addr(),
            key.address().get_port_number(), key.is_loopback(),
            key.is_active()), 2);

  TcpDataLink_rch link;
  {
    GuardType guard(links_lock_);

    if (find_datalink_i(key, link, client, remote.repo_id_)) {
      VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport::connect_datalink found datalink link[%@]\n", link.in()), 2);
      return link.is_nil()
        ? AcceptConnectResult(AcceptConnectResult::ACR_SUCCESS)
        : AcceptConnectResult(link._retn());
    }

    link = new TcpDataLink(key.address(), this, attribs.priority_,
                           key.is_loopback(), true /*active*/);
    VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport::connect_datalink create new link[%@]\n", link.in()), 2);
    if (links_.bind(key, link) != 0 /*OK*/) {
      ACE_ERROR((LM_ERROR, "(%P|%t) ERROR: TcpTransport::connect_datalink "
                 "Unable to bind new TcpDataLink[%@] to "
                 "TcpTransport in links_ map.\n", link.in()));
      return AcceptConnectResult();
    }
  }

  TcpConnection_rch connection =
    new TcpConnection(key.address(), link->transport_priority(), tcp_config_);
  connection->set_datalink(link.in());

  TcpConnection* pConn = connection.in();
  TcpConnection_rch reactor_refcount(connection); // increment for reactor callback

  ACE_TCHAR str[64];
  key.address().addr_to_string(str,sizeof(str)/sizeof(str[0]));

  // Can't make this call while holding onto TransportClient::lock_
  const int ret =
    connector_.connect(pConn, key.address(), ACE_Synch_Options::asynch);

  if (ret == -1 && errno != EWOULDBLOCK) {

    VDBG_LVL((LM_ERROR, "(%P|%t) TcpTransport::connect_datalink error %m.\n"), 2);
    return AcceptConnectResult();
  }

  // Don't decrement count when reactor_refcount goes out of scope, see
  // TcpConnection::open()
  (void) reactor_refcount._retn();

  if (ret == 0) {
    // connect() completed synchronously and called TcpConnection::active_open().
    VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport::connect_datalink "
              "completed synchronously.\n"), 2);
    return AcceptConnectResult(link._retn());
  }

  if (!link->add_on_start_callback(client, remote.repo_id_)) {
    // link was started by the reactor thread before we could add a callback

    VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport::connect_datalink got link.\n"), 2);
    return AcceptConnectResult(link._retn());
  }

  GuardType connections_guard(connections_lock_);

  add_pending_connection(client, link.in());
  VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport::connect_datalink pending.\n"), 2);
  return AcceptConnectResult(AcceptConnectResult::ACR_SUCCESS);
}
예제 #24
0
int
OpenDDS::DCPS::TcpConnection::open(void* arg)
{
  DBG_ENTRY_LVL("TcpConnection","open",6);

  if (is_connector_) {

    VDBG_LVL((LM_DEBUG, "(%P|%t) DBG:   TcpConnection::open active.\n"), 2);
    // Take over the refcount from TcpTransport::connect_datalink().
    const TcpConnection_rch self(this);
    const TcpTransport_rch transport = link_->get_transport_impl();

    const bool is_loop(local_address_ == remote_address_);
    const PriorityKey key(transport_priority_, remote_address_,
                          is_loop, false /* !active */);

    int active_open_ = active_open();

    int connect_tcp_datalink_ = transport->connect_tcp_datalink(link_, self);

    if (active_open_ == -1 || connect_tcp_datalink_ == -1) {
      // if (active_open() == -1 ||
      //       transport->connect_tcp_datalink(link_, self) == -1) {

      transport->async_connect_failed(key);

      return -1;
    }

    return 0;
  }

  // The passed-in arg is really the acceptor object that created this
  // TcpConnection object, and is also the caller of this open()
  // method.  We need to cast the arg to the TcpAcceptor* type.
  TcpAcceptor* acceptor = static_cast<TcpAcceptor*>(arg);

  if (acceptor == 0) {
    // The cast failed.
    ACE_ERROR_RETURN((LM_ERROR,
                      ACE_TEXT("(%P|%t) ERROR: TcpConnection::open() - ")
                      ACE_TEXT("failed to cast void* arg to ")
                      ACE_TEXT("TcpAcceptor* type.\n")),
                     -1);
  }

  // Now we need to ask the TcpAcceptor object to provide us with
  // a pointer to the TcpTransport object that "owns" the acceptor.
  TcpTransport_rch transport = acceptor->transport();

  if (transport.is_nil()) {
    // The acceptor gave us a nil transport (smart) pointer.
    ACE_ERROR_RETURN((LM_ERROR,
                      ACE_TEXT("(%P|%t) ERROR: TcpConnection::open() - ")
                      ACE_TEXT("acceptor's transport is nil.\n")),
                     -1);
  }

  TcpInst* tcp_config = acceptor->get_configuration();

  // Keep a "copy" of the reference to TcpInst object
  // for ourselves.
  tcp_config->_add_ref();
  tcp_config_ = tcp_config;
  local_address_ = tcp_config_->local_address_;

  set_sock_options(tcp_config_.in());

  // We expect that the active side of the connection (the remote side
  // in this case) will supply its listening ACE_INET_Addr as the first
  // message it sends to the socket.  This is a one-way connection
  // establishment protocol message.
  passive_setup_ = true;
  transport_during_setup_ = transport;
  passive_setup_buffer_.size(sizeof(ACE_UINT32));

  if (reactor()->register_handler(this, READ_MASK) == -1) {
    ACE_ERROR_RETURN((LM_ERROR,
                      ACE_TEXT("(%P|%t) ERROR: TcpConnection::open() - ")
                      ACE_TEXT("unable to register with the reactor.%p\n"),
                      ACE_TEXT("register_handler")),
                     -1);
  }

  VDBG_LVL((LM_DEBUG, "(%P|%t) DBG:   TcpConnection::open passive handle=%d.\n",
            int(get_handle())), 2);

  return 0;
}
예제 #25
0
int
OpenDDS::DCPS::TcpConnection::handle_setup_input(ACE_HANDLE /*h*/)
{
  const ssize_t ret = peer().recv(passive_setup_buffer_.wr_ptr(),
                                  passive_setup_buffer_.space(),
                                  &ACE_Time_Value::zero);

  if (ret < 0 && errno == ETIME) {
    return 0;
  }

  VDBG_LVL((LM_DEBUG, "(%P|%t) DBG:   TcpConnection::handle_setup_input %@ "
            "recv returned %b %m.\n", this, ret), 4);

  if (ret <= 0) {
    return -1;
  }

  passive_setup_buffer_.wr_ptr(ret);
  // Parse the setup message: <len><addr><prio>
  // len and prio are network order 32-bit ints
  // addr is a string of length len, including null
  ACE_UINT32 nlen = 0;

  if (passive_setup_buffer_.length() >= sizeof(nlen)) {

    ACE_OS::memcpy(&nlen, passive_setup_buffer_.rd_ptr(), sizeof(nlen));
    passive_setup_buffer_.rd_ptr(sizeof(nlen));
    ACE_UINT32 hlen = ntohl(nlen);
    passive_setup_buffer_.size(hlen + 2 * sizeof(nlen));

    ACE_UINT32 nprio = 0;

    if (passive_setup_buffer_.length() >= hlen + sizeof(nprio)) {

      const std::string bufstr(passive_setup_buffer_.rd_ptr());
      const NetworkAddress network_order_address(bufstr);
      network_order_address.to_addr(remote_address_);

      ACE_OS::memcpy(&nprio, passive_setup_buffer_.rd_ptr() + hlen, sizeof(nprio));
      transport_priority_ = ntohl(nprio);

      passive_setup_buffer_.reset();
      passive_setup_ = false;

      VDBG((LM_DEBUG, "(%P|%t) DBG:   TcpConnection::handle_setup_input "
            "%@ %C:%d->%C:%d, priority==%d, reconnect_state = %d\n", this,
            remote_address_.get_host_addr(), remote_address_.get_port_number(),
            local_address_.get_host_addr(), local_address_.get_port_number(),
            transport_priority_, reconnect_state_));

      // remove from reactor, normal recv strategy setup will add us back
      if (reactor()->remove_handler(this, READ_MASK | DONT_CALL) == -1) {
        VDBG((LM_DEBUG, "(%P|%t) DBG:   TcpConnection::handle_setup_input "
              "remove_handler failed %m.\n"));
      }

      const TcpConnection_rch self(this, false);

      transport_during_setup_->passive_connection(remote_address_, self);
      transport_during_setup_ = 0;
      connected_ = true;

      return 0;
    }
  }

  passive_setup_buffer_.rd_ptr(passive_setup_buffer_.base());

  return 0;
}
예제 #26
0
void
TcpTransport::release_datalink(DataLink* link)
{
  DBG_ENTRY_LVL("TcpTransport", "release_datalink", 6);

  TcpDataLink* tcp_link = static_cast<TcpDataLink*>(link);

  if (tcp_link == 0) {
    // Really an assertion failure
    ACE_ERROR((LM_ERROR,
               "(%P|%t) INTERNAL ERROR - Failed to downcast DataLink to "
               "TcpDataLink.\n"));
    return;
  }

  TcpDataLink_rch released_link;

  // Possible actions that will be taken to release the link.
  enum LinkAction { None, StopLink, ScheduleLinkRelease };
  LinkAction linkAction = None;

  // Scope for locking to protect the links (and pending_release) containers.
  GuardType guard(this->links_lock_);

  // Attempt to remove the TcpDataLink from our links_ map.
  PriorityKey key(
    tcp_link->transport_priority(),
    tcp_link->remote_address(),
    tcp_link->is_loopback(),
    tcp_link->is_active());

  VDBG_LVL((LM_DEBUG,
            "(%P|%t) TcpTransport::release_datalink link[%@] PriorityKey "
            "prio=%d, addr=%C:%hu, is_loopback=%d, is_active=%d\n",
            link,
            tcp_link->transport_priority(),
            tcp_link->remote_address().get_host_addr(),
            tcp_link->remote_address().get_port_number(),
            (int)tcp_link->is_loopback(),
            (int)tcp_link->is_active()), 2);

  if (this->links_.unbind(key, released_link) != 0) {
    //No op
  } else if (link->datalink_release_delay() > ACE_Time_Value::zero) {
    link->set_scheduling_release(true);

    VDBG_LVL((LM_DEBUG,
              "(%P|%t) TcpTransport::release_datalink datalink_release_delay "
              "is %: sec %d usec\n",
              link->datalink_release_delay().sec(),
              link->datalink_release_delay().usec()), 4);

    // Atomic value update, safe to perform here.
    released_link->set_release_pending(true);

    switch (this->pending_release_links_.bind(key, released_link)) {
    case -1:
      ACE_ERROR((LM_ERROR,
                 "(%P|%t) ERROR: Unable to bind released TcpDataLink[%@] to "
                 "pending_release_links_ map: %p\n", released_link.in(), ACE_TEXT("bind")));
      linkAction = StopLink;
      break;

    case 1:
      ACE_ERROR((LM_ERROR,
                 "(%P|%t) ERROR: Unable to bind released TcpDataLink[%@] to "
                 "pending_release_links_ map: already bound\n", released_link.in()));
      linkAction = StopLink;
      break;

    case 0:
      linkAction = ScheduleLinkRelease;
      break;

    default:
      break;
    }

  } else { // datalink_release_delay_ is 0
    link->set_scheduling_release(true);

    linkAction = StopLink;
  }

  // Actions are executed outside of the lock scope.
  ACE_Time_Value cancel_now = ACE_OS::gettimeofday();
  switch (linkAction) {
  case StopLink:
    link->schedule_stop(cancel_now);
    break;

  case ScheduleLinkRelease:

    link->schedule_delayed_release();
    break;

  case None:
    break;
  }

  if (DCPS_debug_level > 9) {
    std::stringstream buffer;
    buffer << *link;
    ACE_DEBUG((LM_DEBUG,
               ACE_TEXT("(%P|%t) TcpTransport::release_datalink() - ")
               ACE_TEXT("link[%@] with priority %d released.\n%C"),
               link,
               link->transport_priority(),
               buffer.str().c_str()));
  }
}
예제 #27
0
bool
ShmemTransport::configure_i(TransportInst* config)
{
#if (!defined ACE_WIN32 && defined ACE_LACKS_SYSV_SHMEM) || defined ACE_HAS_WINCE
  ACE_UNUSED_ARG(config);
  ACE_ERROR_RETURN((LM_ERROR,
                    ACE_TEXT("(%P|%t) ERROR: ")
                    ACE_TEXT("ShmemTransport::configure_i: ")
                    ACE_TEXT("no platform support for shared memory!\n")),
                   false);
#else
  config_i_ = dynamic_cast<ShmemInst*>(config);
  if (config_i_ == 0) {
    ACE_ERROR_RETURN((LM_ERROR,
                      ACE_TEXT("(%P|%t) ERROR: ")
                      ACE_TEXT("ShmemTransport::configure_i: ")
                      ACE_TEXT("invalid configuration!\n")),
                     false);
  }
  config_i_->_add_ref();

  std::ostringstream pool;
  pool << "OpenDDS-" << ACE_OS::getpid() << '-' << config->name();
  poolname_ = pool.str();

  ShmemAllocator::MEMORY_POOL_OPTIONS alloc_opts;
#ifdef ACE_WIN32
  alloc_opts.max_size_ = config_i_->pool_size_;
#elif !defined ACE_LACKS_SYSV_SHMEM
  alloc_opts.base_addr_ = 0;
  alloc_opts.segment_size_ = config_i_->pool_size_;
  alloc_opts.minimum_bytes_ = alloc_opts.segment_size_;
  alloc_opts.max_segments_ = 1;
#endif

  alloc_ =
    new ShmemAllocator(ACE_TEXT_CHAR_TO_TCHAR(poolname_.c_str()),
                       0 /*lock_name is optional*/, &alloc_opts);

  void* mem = alloc_->malloc(sizeof(ShmemSharedSemaphore));
  if (mem == 0) {
    ACE_ERROR_RETURN((LM_ERROR, ACE_TEXT("(%P|%t) ERROR: ")
                      ACE_TEXT("ShmemTrasport::configure_i: failed to allocate")
                      ACE_TEXT(" space for semaphore in shared memory!\n")),
                     false);
  }

  ShmemSharedSemaphore* pSem = reinterpret_cast<ShmemSharedSemaphore*>(mem);
  alloc_->bind("Semaphore", pSem);

  bool ok;
# ifdef ACE_WIN32
  *pSem = ::CreateSemaphoreW(0 /*default security*/,
                             0 /*initial count*/,
                             0x7fffffff /*max count (ACE's default)*/,
                             0 /*no name*/);
  ACE_sema_t ace_sema = *pSem;
  ok = (*pSem != 0);
# else
  ok = (0 == ::sem_init(pSem, 1 /*process shared*/, 0 /*initial count*/));
  ACE_sema_t ace_sema = {pSem, 0 /*no name*/
#  if !defined (ACE_HAS_POSIX_SEM_TIMEOUT) && !defined (ACE_DISABLE_POSIX_SEM_TIMEOUT_EMULATION)
                         , PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER
#  endif
  };
# endif
  if (!ok) {
    ACE_ERROR_RETURN((LM_ERROR,
                      ACE_TEXT("(%P|%t) ERROR: ")
                      ACE_TEXT("ShmemTransport::configure_i: ")
                      ACE_TEXT("could not create semaphore\n")),
                     false);
  }

  read_task_ = new ReadTask(this, ace_sema);

  VDBG_LVL((LM_INFO, "(%P|%t) ShmemTransport %@ configured with address %C\n",
            this, poolname_.c_str()), 1);

  return true;
#endif
}
예제 #28
0
bool
TcpTransport::configure_i(TransportInst* config)
{
  DBG_ENTRY_LVL("TcpTransport", "configure_i", 6);

  // Downcast the config argument to a TcpInst*
  TcpInst* tcp_config =
    static_cast<TcpInst*>(config);

  if (tcp_config == 0) {
    // The downcast failed.
    ACE_ERROR_RETURN((LM_ERROR,
                      "(%P|%t) ERROR: Failed downcast from TransportInst "
                      "to TcpInst.\n"),
                     false);
  }

  this->create_reactor_task();

  // Ask our base class for a "copy" of the reference to the reactor task.
  this->reactor_task_ = reactor_task();

  connector_.open(reactor_task_->get_reactor());

  // Make a "copy" of the reference for ourselves.
  tcp_config->_add_ref();
  this->tcp_config_ = tcp_config;

  // Open the reconnect task
  if (this->con_checker_->open()) {
    ACE_ERROR_RETURN((LM_ERROR,
                      ACE_TEXT("(%P|%t) ERROR: connection checker failed to open : %p\n"),
                      ACE_TEXT("open")),
                     false);
  }

  // Open our acceptor object so that we can accept passive connections
  // on our this->tcp_config_->local_address_.

  if (this->acceptor_->open(this->tcp_config_->local_address_,
                            this->reactor_task_->get_reactor()) != 0) {
    // Remember to drop our reference to the tcp_config_ object since
    // we are about to return -1 here, which means we are supposed to
    // keep a copy after all.
    TcpInst_rch cfg = this->tcp_config_._retn();

    ACE_ERROR_RETURN((LM_ERROR,
                      ACE_TEXT("(%P|%t) ERROR: Acceptor failed to open %C:%d: %p\n"),
                      cfg->local_address_.get_host_addr(),
                      cfg->local_address_.get_port_number(),
                      ACE_TEXT("open")),
                     false);
  }

  // update the port number (incase port zero was given).
  ACE_INET_Addr address;

  if (this->acceptor_->acceptor().get_local_addr(address) != 0) {
    ACE_ERROR((LM_ERROR,
               ACE_TEXT("(%P|%t) ERROR: TcpTransport::configure_i ")
               ACE_TEXT("- %p"),
               ACE_TEXT("cannot get local addr\n")));
  }

  VDBG_LVL((LM_DEBUG,
            ACE_TEXT("(%P|%t) TcpTransport::configure_i listening on %C:%hu\n"),
            address.get_host_name(), address.get_port_number()), 2);

  unsigned short port = address.get_port_number();
  std::stringstream out;
  out << port;

  // As default, the acceptor will be listening on INADDR_ANY but advertise with the fully
  // qualified hostname and actual listening port number.
  if (tcp_config_->local_address_.is_any()) {
    std::string hostname = get_fully_qualified_hostname();

    this->tcp_config_->local_address_.set(port, hostname.c_str());
    this->tcp_config_->local_address_str_ = hostname;
    this->tcp_config_->local_address_str_ += ':' + out.str();
  }

  // Now we got the actual listening port. Update the port nnmber in the configuration
  // if it's 0 originally.
  else if (tcp_config_->local_address_.get_port_number() == 0) {
    this->tcp_config_->local_address_.set_port_number(port);

    if (this->tcp_config_->local_address_str_.length() > 0) {
      size_t pos = this->tcp_config_->local_address_str_.find_last_of(
                     ":]", std::string::npos, 2);
      std::string str = this->tcp_config_->local_address_str_.substr(0, pos + 1);

      if (this->tcp_config_->local_address_str_[pos] == ']') {
        str += ":";
      }

      str += out.str();
      this->tcp_config_->local_address_str_ = str;
    }
  }

  // Ahhh...  The sweet smell of success!
  return true;
}
예제 #29
0
TransportImpl::AcceptConnectResult
TcpTransport::accept_datalink(const RemoteTransport& remote,
                              const ConnectionAttribs& attribs,
                              TransportClient* client)
{
  GuidConverter remote_conv(remote.repo_id_);
  GuidConverter local_conv(attribs.local_id_);

  VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport::accept_datalink local %C "
            "accepting connection from remote %C\n",
            std::string(local_conv).c_str(),
            std::string(remote_conv).c_str()), 5);

  GuardType guard(connections_lock_);
  const PriorityKey key =
    blob_to_key(remote.blob_, attribs.priority_, false /* !active */);

  VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport::accept_datalink PriorityKey "
            "prio=%d, addr=%C:%hu, is_loopback=%d, is_active=%d\n", attribs.priority_,
            key.address().get_host_addr(), key.address().get_port_number(),
            key.is_loopback(), key.is_active()), 2);

  TcpDataLink_rch link;
  {
    GuardType guard(links_lock_);

    if (find_datalink_i(key, link, client, remote.repo_id_)) {
      return link.is_nil()
        ? AcceptConnectResult(AcceptConnectResult::ACR_SUCCESS)
        : AcceptConnectResult(link._retn());

    } else {
      link = new TcpDataLink(key.address(), this, key.priority(),
                             key.is_loopback(), key.is_active());

      if (links_.bind(key, link) != 0 /*OK*/) {
        ACE_ERROR((LM_ERROR,
                   "(%P|%t) ERROR: TcpTransport::accept_datalink "
                   "Unable to bind new TcpDataLink to "
                   "TcpTransport in links_ map.\n"));
        return AcceptConnectResult();
      }
    }
  }

  TcpConnection_rch connection;
  const ConnectionMap::iterator iter = connections_.find(key);

  if (iter != connections_.end()) {
    connection = iter->second;
    connections_.erase(iter);
  }

  if (connection.is_nil()) {
    if (!link->add_on_start_callback(client, remote.repo_id_)) {
      VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport::accept_datalink "
                "got started link %@.\n", link.in()), 2);
      return AcceptConnectResult(link._retn());
    }

    VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport::accept_datalink "
              "no existing TcpConnection.\n"), 2);

    add_pending_connection(client, link.in());

    // no link ready, passive_connection will complete later
    return AcceptConnectResult(AcceptConnectResult::ACR_SUCCESS);
  }

  guard.release(); // connect_tcp_datalink() isn't called with connections_lock_

  if (connect_tcp_datalink(link, connection) == -1) {
    GuardType guard(links_lock_);
    links_.unbind(key);
    link = 0;
  }

  VDBG_LVL((LM_DEBUG, "(%P|%t) TcpTransport::accept_datalink "
            "connected link %@.\n", link.in()), 2);
  return AcceptConnectResult(link._retn());
}