int32_t TransportTCP::read(uint8_t* buffer, uint32_t size) { { boost::recursive_mutex::scoped_lock lock(close_mutex_); if (closed_) { ROSCPP_LOG_DEBUG("Tried to read on a closed socket [%d]", sock_); return -1; } } ROS_ASSERT((int32_t)size > 0); int num_bytes = ::recv(sock_, reinterpret_cast<char*>(buffer), size, 0); if (num_bytes < 0) { if ( !last_socket_error_is_would_block() ) // !WSAWOULDBLOCK / !EAGAIN && !EWOULDBLOCK { ROSCPP_LOG_DEBUG("recv() on socket [%d] failed with error [%s]", sock_, last_socket_error_string()); close(); } else { num_bytes = 0; } } else if (num_bytes == 0) { ROSCPP_LOG_DEBUG("Socket [%d] received 0/%d bytes, closing", sock_, size); close(); return -1; } return num_bytes; }
bool ConnectionManager::onConnectionHeaderReceived(const ConnectionPtr& conn, const ros::Header& header) { bool ret = false; std::string val; if (header.getValue("topic", val)) { ROSCPP_LOG_DEBUG("Connection: Creating TransportSubscriberLink for topic [%s] connected to [%s]", val.c_str(), conn->getRemoteString().c_str()); TransportSubscriberLinkPtr sub_link(new TransportSubscriberLink()); sub_link->initialize(conn); ret = sub_link->handleHeader(header); } else if (header.getValue("service", val)) { ROSCPP_LOG_DEBUG("Connection: Creating ServiceClientLink for service [%s] connected to [%s]", val.c_str(), conn->getRemoteString().c_str()); ServiceClientLinkPtr link(new ServiceClientLink()); link->initialize(conn); ret = link->handleHeader(header); } else { ROSCPP_LOG_DEBUG("Got a connection for a type other than 'topic' or 'service' from [%s]. Fail.", conn->getRemoteString().c_str()); return false; } return ret; }
int32_t TransportTCP::write(uint8_t* buffer, uint32_t size) { { boost::recursive_mutex::scoped_lock lock(close_mutex_); if (closed_) { ROSCPP_LOG_DEBUG("Tried to write on a closed socket [%d]", sock_); return -1; } } ROS_ASSERT(size > 0); // never write more than INT_MAX since this is the maximum we can report back with the current return type uint32_t writesize = std::min(size, static_cast<uint32_t>(INT_MAX)); int num_bytes = ::send(sock_, reinterpret_cast<const char*>(buffer), writesize, 0); if (num_bytes < 0) { if ( !last_socket_error_is_would_block() ) { ROSCPP_LOG_DEBUG("send() on socket [%d] failed with error [%s]", sock_, last_socket_error_string()); close(); } else { num_bytes = 0; } } return num_bytes; }
int32_t TransportTCP::write(uint8_t* buffer, uint32_t size) { { boost::recursive_mutex::scoped_lock lock(close_mutex_); if (closed_) { ROSCPP_LOG_DEBUG("Tried to write on a closed socket [%d]", sock_); return -1; } } ROS_ASSERT((int32_t)size > 0); int num_bytes = ::send(sock_, reinterpret_cast<const char*>(buffer), size, 0); if (num_bytes < 0) { if ( !last_socket_error_is_would_block() ) { ROSCPP_LOG_DEBUG("send() on socket [%d] failed with error [%s]", sock_, last_socket_error_string()); close(); } else { num_bytes = 0; } } return num_bytes; }
bool TransportSubscriberLink::handleHeader(const Header& header) { std::string topic; if (!header.getValue("topic", topic)) { std::string msg("Header from subscriber did not have the required element: topic"); ROS_ERROR("%s", msg.c_str()); connection_->sendHeaderError(msg); return false; } // This will get validated by validateHeader below std::string client_callerid; header.getValue("callerid", client_callerid); PublicationPtr pt = TopicManager::instance()->lookupPublication(topic); if (!pt) { std::string msg = std::string("received a connection for a nonexistent topic [") + topic + std::string("] from [" + connection_->getTransport()->getTransportInfo() + "] [" + client_callerid +"]."); ROSCPP_LOG_DEBUG("%s", msg.c_str()); connection_->sendHeaderError(msg); return false; } std::string error_msg; if (!pt->validateHeader(header, error_msg)) { ROSCPP_LOG_DEBUG("%s", error_msg.c_str()); connection_->sendHeaderError(error_msg); return false; } destination_caller_id_ = client_callerid; connection_id_ = ConnectionManager::instance()->getNewConnectionID(); topic_ = pt->getName(); parent_ = PublicationWPtr(pt); // Send back a success, with info M_string m; m["type"] = pt->getDataType(); m["md5sum"] = pt->getMD5Sum(); m["message_definition"] = pt->getMessageDefinition(); m["callerid"] = this_node::getName(); m["latching"] = pt->isLatching() ? "1" : "0"; m["topic"] = topic_; connection_->writeHeader(m, boost::bind(&TransportSubscriberLink::onHeaderWritten, this, _1)); pt->addSubscriberLink(shared_from_this()); return true; }
void shutdown() { boost::recursive_mutex::scoped_lock lock(g_shutting_down_mutex); // gracefully return if we've not fired ros up yet, or mid-shutdown if (g_shutting_down || !g_initialized) { return; } ROSCPP_LOG_DEBUG("Shutting down roscpp"); g_shutting_down = true; g_global_queue->disable(); g_global_queue->clear(); if (g_internal_queue_thread.get_id() != boost::this_thread::get_id()) { g_internal_queue_thread.join(); } const log4cxx::LoggerPtr& logger = log4cxx::Logger::getLogger(ROSCONSOLE_ROOT_LOGGER_NAME); logger->removeAppender(g_rosout_appender); g_rosout_appender = 0; // reset this so that the logger doesn't get crashily destroyed // again during global destruction. // // See https://code.ros.org/trac/ros/ticket/3271 // log4cxx::Logger::getRootLogger()->getLoggerRepository()->shutdown(); log4cxx::LoggerPtr& fo_logger = ros::file_log::getFileOnlyLogger(); fo_logger = log4cxx::LoggerPtr(); if (g_started) { TopicManager::instance()->shutdown(); ServiceManager::instance()->shutdown(); PollManager::instance()->shutdown(); ConnectionManager::instance()->shutdown(); XMLRPCManager::instance()->shutdown(); } WallTime end = WallTime::now(); ROSCPP_LOG_DEBUG("Shutdown finished"); g_started = false; g_ok = false; Time::shutdown(); }
void IntraProcessSubscriberLink::drop() { { boost::recursive_mutex::scoped_lock lock(drop_mutex_); if (dropped_) { return; } dropped_ = true; } if (subscriber_) { subscriber_->drop(); subscriber_.reset(); } if (PublicationPtr parent = parent_.lock()) { ROSCPP_LOG_DEBUG("Connection to local subscriber on topic [%s] dropped", topic_.c_str()); parent->removeSubscriberLink(shared_from_this()); } }
bool PollSet::delSocket(int fd) { if(fd < 0) { return false; } boost::mutex::scoped_lock lock(socket_info_mutex_); M_SocketInfo::iterator it = socket_info_.find(fd); if (it != socket_info_.end()) { socket_info_.erase(it); { boost::mutex::scoped_lock lock(just_deleted_mutex_); just_deleted_.push_back(fd); } sockets_changed_ = true; signal(); return true; } ROSCPP_LOG_DEBUG("PollSet: Tried to delete fd [%d] which is not being tracked", fd); return false; }
bool TransportUDP::createIncoming(int port, bool is_server) { is_server_ = is_server; sock_ = socket(AF_INET, SOCK_DGRAM, 0); if (sock_ <= 0) { ROS_ERROR("socket() failed with error [%s]", last_socket_error_string()); return false; } server_address_.sin_family = AF_INET; server_address_.sin_port = htons(port); server_address_.sin_addr.s_addr = INADDR_ANY; if (bind(sock_, (sockaddr *)&server_address_, sizeof(server_address_)) < 0) { ROS_ERROR("bind() failed with error [%s]", last_socket_error_string()); return false; } socklen_t len = sizeof(server_address_); getsockname(sock_, (sockaddr *)&server_address_, &len); server_port_ = ntohs(server_address_.sin_port); ROSCPP_LOG_DEBUG("UDPROS server listening on port [%d]", server_port_); if (!initializeSocket()) { return false; } enableRead(); return true; }
TransportTCPPtr TransportTCP::accept() { ROS_ASSERT(is_server_); sockaddr client_address; socklen_t len = sizeof(client_address); int new_sock = ::accept(sock_, (sockaddr *)&client_address, &len); if (new_sock >= 0) { ROSCPP_LOG_DEBUG("Accepted connection on socket [%d], new socket [%d]", sock_, new_sock); TransportTCPPtr transport(boost::make_shared<TransportTCP>(poll_set_, flags_)); if (!transport->setSocket(new_sock)) { ROS_ERROR("Failed to set socket on transport for socket %d", new_sock); } return transport; } else { ROS_ERROR("accept() on socket [%d] failed with error [%s]", sock_, last_socket_error_string()); } return TransportTCPPtr(); }
bool PollSet::addSocket(int fd, const SocketUpdateFunc& update_func, const TransportPtr& transport) { SocketInfo info; info.fd_ = fd; info.events_ = 0; info.transport_ = transport; info.func_ = update_func; { boost::mutex::scoped_lock lock(socket_info_mutex_); bool b = socket_info_.insert(std::make_pair(fd, info)).second; if (!b) { ROSCPP_LOG_DEBUG("PollSet: Tried to add duplicate fd [%d]", fd); return false; } sockets_changed_ = true; } signal(); return true; }
void IntraProcessPublisherLink::drop() { { boost::recursive_mutex::scoped_lock lock(drop_mutex_); if (dropped_) { return; } dropped_ = true; } if (publisher_) { publisher_->drop(); publisher_.reset(); } if (SubscriptionPtr parent = parent_.lock()) { ROSCPP_LOG_DEBUG("Connection to local publisher on topic [%s] dropped", parent->getName().c_str()); parent->removePublisherLink(shared_from_this()); } }
void TransportPublisherLink::onRetryTimer(const ros::WallTimerEvent&) { if (dropping_) { return; } if (needs_retry_ && WallTime::now() > next_retry_) { retry_period_ = std::min(retry_period_ * 2, WallDuration(20)); needs_retry_ = false; SubscriptionPtr parent = parent_.lock(); // TODO: support retry on more than just TCP // For now, since UDP does not have a heartbeat, we do not attempt to retry // UDP connections since an error there likely means some invalid operation has // happened. if (connection_->getTransport()->getType() == std::string("TCPROS")) { std::string topic = parent ? parent->getName() : "unknown"; TransportTCPPtr old_transport = boost::dynamic_pointer_cast<TransportTCP>(connection_->getTransport()); ROS_ASSERT(old_transport); const std::string& host = old_transport->getConnectedHost(); int port = old_transport->getConnectedPort(); ROSCPP_LOG_DEBUG("Retrying connection to [%s:%d] for topic [%s]", host.c_str(), port, topic.c_str()); TransportTCPPtr transport(new TransportTCP(&PollManager::instance()->getPollSet())); if (transport->connect(host, port)) { ConnectionPtr connection(new Connection); connection->initialize(transport, false, HeaderReceivedFunc()); initialize(connection); ConnectionManager::instance()->addConnection(connection); } else { ROSCPP_LOG_DEBUG("connect() failed when retrying connection to [%s:%d] for topic [%s]", host.c_str(), port, topic.c_str()); } } else if (parent) { parent->removePublisherLink(shared_from_this()); } } }
bool XMLRPCManager::validateXmlrpcResponse(const std::string& method, XmlRpcValue &response, XmlRpcValue &payload) { if (response.getType() != XmlRpcValue::TypeArray) { ROSCPP_LOG_DEBUG("XML-RPC call [%s] didn't return an array", method.c_str()); return false; } if (response.size() != 2 && response.size() != 3) { ROSCPP_LOG_DEBUG("XML-RPC call [%s] didn't return a 2 or 3-element array", method.c_str()); return false; } if (response[0].getType() != XmlRpcValue::TypeInt) { ROSCPP_LOG_DEBUG("XML-RPC call [%s] didn't return a int as the 1st element", method.c_str()); return false; } int status_code = response[0]; if (response[1].getType() != XmlRpcValue::TypeString) { ROSCPP_LOG_DEBUG("XML-RPC call [%s] didn't return a string as the 2nd element", method.c_str()); return false; } std::string status_string = response[1]; if (status_code != 1) { ROSCPP_LOG_DEBUG("XML-RPC call [%s] returned an error (%d): [%s]", method.c_str(), status_code, status_string.c_str()); return false; } if (response.size() > 2) { payload = response[2]; } else { std::string empty_array = "<value><array><data></data></array></value>"; int offset = 0; payload = XmlRpcValue(empty_array, &offset); } return true; }
void atexitCallback() { if (ok() && !isShuttingDown()) { ROSCPP_LOG_DEBUG("shutting down due to exit() or end of main() without cleanup of all NodeHandles"); shutdown(); } }
void TransportTCP::parseHeader(const Header& header) { std::string nodelay; if (header.getValue("tcp_nodelay", nodelay) && nodelay == "1") { ROSCPP_LOG_DEBUG("Setting nodelay on socket [%d]", sock_); setNoDelay(true); } }
void ConnectionManager::tcprosAcceptConnection(const TransportTCPPtr& transport) { std::string client_uri = transport->getClientURI(); ROSCPP_LOG_DEBUG("TCPROS received a connection from [%s]", client_uri.c_str()); ConnectionPtr conn(new Connection()); addConnection(conn); conn->initialize(transport, true, boost::bind(&ConnectionManager::onConnectionHeaderReceived, this, _1, _2)); }
void ServiceServerLink::onConnectionDropped(const ConnectionPtr& conn) { ROS_ASSERT(conn == connection_); ROSCPP_LOG_DEBUG("Service client from [%s] for [%s] dropped", conn->getRemoteString().c_str(), service_name_.c_str()); dropped_ = true; clearCalls(); ServiceManager::instance()->removeServiceServerLink(shared_from_this()); }
void ConnectionManager::udprosIncomingConnection(const TransportUDPPtr& transport, ros::Header& header) { std::string client_uri = ""; // TODO: transport->getClientURI(); ROSCPP_LOG_DEBUG("UDPROS received a connection from [%s]", client_uri.c_str()); ConnectionPtr conn(new Connection()); addConnection(conn); conn->initialize(transport, true, NULL); onConnectionHeaderReceived(conn, header); }
bool ServiceServerLink::call(const ros::SerializedMessage& req, ros::SerializedMessage& resp) { CallInfoPtr info(new CallInfo); info->req_ = req; info->resp_ = &resp; info->success_ = false; info->finished_ = false; info->call_finished_ = false; info->caller_thread_id_ = boost::this_thread::get_id(); //ros::WallDuration(0.1).sleep(); bool immediate = false; { boost::mutex::scoped_lock lock(call_queue_mutex_); if (connection_->isDropped()) { ROSCPP_LOG_DEBUG("ServiceServerLink::call called on dropped connection for service [%s]", service_name_.c_str()); info->call_finished_ = true; return false; } if (call_queue_.empty() && header_written_ && header_read_) { immediate = true; } call_queue_.push(info); } if (immediate) { processNextCall(); } { boost::mutex::scoped_lock lock(info->finished_mutex_); while (!info->finished_) { info->finished_condition_.wait(lock); } } info->call_finished_ = true; if (info->exception_string_.length() > 0) { ROS_ERROR("Service call failed: service [%s] responded with an error: %s", service_name_.c_str(), info->exception_string_.c_str()); } return info->success_; }
void XMLRPCManager::shutdown() { if (shutting_down_) { return; } shutting_down_ = true; server_thread_.join(); server_.close(); // kill the last few clients that were started in the shutdown process for (V_CachedXmlRpcClient::iterator i = clients_.begin(); i != clients_.end(); ++i) { for (int wait_count = 0; i->in_use_ && wait_count < 10; wait_count++) { ROSCPP_LOG_DEBUG("waiting for xmlrpc connection to finish..."); ros::WallDuration(0.01).sleep(); } i->client_->close(); delete i->client_; } clients_.clear(); boost::mutex::scoped_lock lock(functions_mutex_); functions_.clear(); { S_ASyncXMLRPCConnection::iterator it = connections_.begin(); S_ASyncXMLRPCConnection::iterator end = connections_.end(); for (; it != end; ++it) { (*it)->removeFromDispatch(server_.get_dispatch()); } } connections_.clear(); { boost::mutex::scoped_lock lock(added_connections_mutex_); added_connections_.clear(); } { boost::mutex::scoped_lock lock(removed_connections_mutex_); removed_connections_.clear(); } }
void TransportSubscriberLink::onConnectionDropped(const ConnectionPtr& conn) { ROS_ASSERT(conn == connection_); PublicationPtr parent = parent_.lock(); if (parent) { ROSCPP_LOG_DEBUG("Connection to subscriber [%s] to topic [%s] dropped", connection_->getRemoteString().c_str(), topic_.c_str()); parent->removeSubscriberLink(shared_from_this()); } }
bool XMLRPCManager::validateXmlrpcResponse(const std::string& method, XmlRpcValue &response, XmlRpcValue &payload) { if (response.getType() != XmlRpcValue::TypeArray) { ROSCPP_LOG_DEBUG("XML-RPC call [%s] didn't return an array", method.c_str()); return false; } if (response.size() != 3) { ROSCPP_LOG_DEBUG("XML-RPC call [%s] didn't return a 3-element array", method.c_str()); return false; } if (response[0].getType() != XmlRpcValue::TypeInt) { ROSCPP_LOG_DEBUG("XML-RPC call [%s] didn't return a int as the 1st element", method.c_str()); return false; } int status_code = response[0]; if (response[1].getType() != XmlRpcValue::TypeString) { ROSCPP_LOG_DEBUG("XML-RPC call [%s] didn't return a string as the 2nd element", method.c_str()); return false; } std::string status_string = response[1]; if (status_code != 1) { ROSCPP_LOG_DEBUG("XML-RPC call [%s] returned an error (%d): [%s]", method.c_str(), status_code, status_string.c_str()); return false; } payload = response[2]; return true; }
int32_t TransportTCP::read(uint8_t* buffer, uint32_t size) { { boost::recursive_mutex::scoped_lock lock(close_mutex_); if (closed_) { ROSCPP_LOG_DEBUG("Tried to read on a closed socket [%d]", sock_); return -1; } } ROS_ASSERT(size > 0); // never read more than INT_MAX since this is the maximum we can report back with the current return type uint32_t read_size = std::min(size, static_cast<uint32_t>(INT_MAX)); int num_bytes = ::recv(sock_, reinterpret_cast<char*>(buffer), read_size, 0); if (num_bytes < 0) { if ( !last_socket_error_is_would_block() ) // !WSAWOULDBLOCK / !EAGAIN && !EWOULDBLOCK { ROSCPP_LOG_DEBUG("recv() on socket [%d] failed with error [%s]", sock_, last_socket_error_string()); close(); } else { num_bytes = 0; } } else if (num_bytes == 0) { ROSCPP_LOG_DEBUG("Socket [%d] received 0/%u bytes, closing", sock_, size); close(); return -1; } return num_bytes; }
void TransportTCP::close() { Callback disconnect_cb; if (!closed_) { { boost::recursive_mutex::scoped_lock lock(close_mutex_); if (!closed_) { closed_ = true; ROS_ASSERT(sock_ != ROS_INVALID_SOCKET); if (poll_set_) { poll_set_->delSocket(sock_); } ::shutdown(sock_, ROS_SOCKETS_SHUT_RDWR); if ( close_socket(sock_) != 0 ) { ROS_ERROR("Error closing socket [%d]: [%s]", sock_, last_socket_error_string()); } else { ROSCPP_LOG_DEBUG("TCP socket [%d] closed", sock_); } sock_ = ROS_INVALID_SOCKET; disconnect_cb = disconnect_cb_; disconnect_cb_ = Callback(); read_cb_ = Callback(); write_cb_ = Callback(); accept_cb_ = AcceptCallback(); } } } if (disconnect_cb) { disconnect_cb(shared_from_this()); } }
bool PollSet::addEvents(int sock, int events) { boost::mutex::scoped_lock lock(socket_info_mutex_); M_SocketInfo::iterator it = socket_info_.find(sock); if (it == socket_info_.end()) { ROSCPP_LOG_DEBUG("PollSet: Tried to add events [%d] to fd [%d] which does not exist in this pollset", events, sock); return false; } it->second.events_ |= events; signal(); return true; }
void TransportPublisherLink::onConnectionDropped(const ConnectionPtr& conn, Connection::DropReason reason) { if (dropping_) { return; } ROS_ASSERT(conn == connection_); SubscriptionPtr parent = parent_.lock(); if (reason == Connection::TransportDisconnect) { std::string topic = parent ? parent->getName() : "unknown"; ROSCPP_LOG_DEBUG("Connection to publisher [%s] to topic [%s] dropped", connection_->getTransport()->getTransportInfo().c_str(), topic.c_str()); ROS_ASSERT(!needs_retry_); needs_retry_ = true; next_retry_ = WallTime::now() + retry_period_; if (retry_timer_handle_ == -1) { retry_period_ = WallDuration(0.1); next_retry_ = WallTime::now() + retry_period_; retry_timer_handle_ = getInternalTimerManager()->add(WallDuration(retry_period_), boost::bind(&TransportPublisherLink::onRetryTimer, this, _1), getInternalCallbackQueue().get(), VoidConstPtr(), false); } else { getInternalTimerManager()->setPeriod(retry_timer_handle_, retry_period_); } } else { drop(); } }
void TransportUDP::socketUpdate(int events) { { boost::mutex::scoped_lock lock(close_mutex_); if (closed_) { return; } } if((events & POLLERR) || (events & POLLHUP) || (events & POLLNVAL)) { ROSCPP_LOG_DEBUG("Socket %d closed with (ERR|HUP|NVAL) events %d", sock_, events); close(); } else { if ((events & POLLIN) && expecting_read_) { if (read_cb_) { read_cb_(shared_from_this()); } } if ((events & POLLOUT) && expecting_write_) { if (write_cb_) { write_cb_(shared_from_this()); } } } }
std::string determineHost() { std::string ip_env; // First, did the user set ROS_HOSTNAME? if ( get_environment_variable(ip_env, "ROS_HOSTNAME")) { ROSCPP_LOG_DEBUG( "determineIP: using value of ROS_HOSTNAME:%s:", ip_env.c_str()); return ip_env; } // Second, did the user set ROS_IP? if ( get_environment_variable(ip_env, "ROS_IP")) { ROSCPP_LOG_DEBUG( "determineIP: using value of ROS_IP:%s:", ip_env.c_str()); return ip_env; } // Third, try the hostname char host[1024]; memset(host,0,sizeof(host)); if(gethostname(host,sizeof(host)-1) != 0) { ROS_ERROR("determineIP: gethostname failed"); } // We don't want localhost to be our ip else if(strlen(host) && strcmp("localhost", host)) { return std::string(host); } // Fourth, fall back on interface search, which will yield an IP address #ifdef HAVE_IFADDRS_H struct ifaddrs *ifa = NULL, *ifp = NULL; int rc; if ((rc = getifaddrs(&ifp)) < 0) { ROS_FATAL("error in getifaddrs: [%s]", strerror(rc)); ROS_BREAK(); } char preferred_ip[200] = {0}; for (ifa = ifp; ifa; ifa = ifa->ifa_next) { char ip_[200]; socklen_t salen; if (!ifa->ifa_addr) continue; // evidently this interface has no ip address if (ifa->ifa_addr->sa_family == AF_INET) salen = sizeof(struct sockaddr_in); else if (ifa->ifa_addr->sa_family == AF_INET6) salen = sizeof(struct sockaddr_in6); else continue; if (getnameinfo(ifa->ifa_addr, salen, ip_, sizeof(ip_), NULL, 0, NI_NUMERICHOST) < 0) { ROSCPP_LOG_DEBUG( "getnameinfo couldn't get the ip of interface [%s]", ifa->ifa_name); continue; } //ROS_INFO( "ip of interface [%s] is [%s]", ifa->ifa_name, ip); // prefer non-private IPs over private IPs if (!strcmp("127.0.0.1", ip_) || strchr(ip_,':')) continue; // ignore loopback unless we have no other choice if (ifa->ifa_addr->sa_family == AF_INET6 && !preferred_ip[0]) strcpy(preferred_ip, ip_); else if (isPrivateIP(ip_) && !preferred_ip[0]) strcpy(preferred_ip, ip_); else if (!isPrivateIP(ip_) && (isPrivateIP(preferred_ip) || !preferred_ip[0])) strcpy(preferred_ip, ip_); } freeifaddrs(ifp); if (!preferred_ip[0]) { ROS_ERROR( "Couldn't find a preferred IP via the getifaddrs() call; I'm assuming that your IP " "address is 127.0.0.1. This should work for local processes, " "but will almost certainly not work if you have remote processes." "Report to the ROS development team to seek a fix."); return std::string("127.0.0.1"); } ROSCPP_LOG_DEBUG( "preferred IP is guessed to be %s", preferred_ip); return std::string(preferred_ip); #else // @todo Fix IP determination in the case where getifaddrs() isn't // available. ROS_ERROR( "You don't have the getifaddrs() call; I'm assuming that your IP " "address is 127.0.0.1. This should work for local processes, " "but will almost certainly not work if you have remote processes." "Report to the ROS development team to seek a fix."); return std::string("127.0.0.1"); #endif }
void TransportTCP::socketUpdate(int events) { { boost::recursive_mutex::scoped_lock lock(close_mutex_); if (closed_) { return; } // Handle read events before err/hup/nval, since there may be data left on the wire if ((events & POLLIN) && expecting_read_) { if (is_server_) { // Should not block here, because poll() said that it's ready // for reading TransportTCPPtr transport = accept(); if (transport) { ROS_ASSERT(accept_cb_); accept_cb_(transport); } } else { if (read_cb_) { read_cb_(shared_from_this()); } } } if ((events & POLLOUT) && expecting_write_) { if (write_cb_) { write_cb_(shared_from_this()); } } } if((events & POLLERR) || (events & POLLHUP) || (events & POLLNVAL)) { uint32_t error = -1; socklen_t len = sizeof(error); if (getsockopt(sock_, SOL_SOCKET, SO_ERROR, reinterpret_cast<char*>(&error), &len) < 0) { ROSCPP_LOG_DEBUG("getsockopt failed on socket [%d]", sock_); } #ifdef _MSC_VER char err[60]; strerror_s(err,60,error); ROSCPP_LOG_DEBUG("Socket %d closed with (ERR|HUP|NVAL) events %d: %s", sock_, events, err); #else ROSCPP_LOG_DEBUG("Socket %d closed with (ERR|HUP|NVAL) events %d: %s", sock_, events, strerror(error)); #endif close(); } }