ManagementClient::ManagementClient(ManagementInformationBase& mib, const udp::endpoint& clientEndpoint, u_int8_t wirelessStateUpdateInterval, u_int8_t locationUpdateInterval, Logger& logger) : mib(mib), clientEndpoint(clientEndpoint), logger(logger) { /** * Check that source port is not an ephemeral port which would * change every time a client sendto()s to MGMT */ if (clientEndpoint.port() >= 32768 && clientEndpoint.port() <= 61000) logger.error("Client uses an ephemeral port that will change every time it sends data and this will confuse my state management!"); /** * Initialise state strings map */ clientStateStringMap.insert(std::make_pair(ManagementClient::OFFLINE, "OFFLINE")); clientStateStringMap.insert(std::make_pair(ManagementClient::ONLINE, "ONLINE")); /** * Initialise type strings map */ clientTypeStringMap.insert(std::make_pair(ManagementClient::UNKNOWN, "Unknown")); clientTypeStringMap.insert(std::make_pair(ManagementClient::GN, "GeoNetworking")); clientTypeStringMap.insert(std::make_pair(ManagementClient::FAC, "Facilities")); clientTypeStringMap.insert(std::make_pair(ManagementClient::LTE, "Long Term Evolution")); /** * Initialise this client's state and type */ state = ManagementClient::OFFLINE; type = ManagementClient::UNKNOWN; /** * We are not waiting a reply from this client now */ repliedToTheLastPacket = true; }
void udp_socket::bind(udp::endpoint const& ep, error_code& ec) { if (m_ipv4_sock.is_open()) m_ipv4_sock.close(ec); if (m_ipv6_sock.is_open()) m_ipv6_sock.close(ec); if (ep.address().is_v4()) { m_ipv4_sock.open(udp::v4(), ec); if (ec) return; m_ipv4_sock.bind(ep, ec); if (ec) return; m_ipv4_sock.async_receive_from(asio::buffer(m_v4_buf, sizeof(m_v4_buf)) , m_v4_ep, boost::bind(&udp_socket::on_read, this, &m_ipv4_sock, _1, _2)); } else { m_ipv6_sock.set_option(v6only(true), ec); if (ec) return; m_ipv6_sock.bind(ep, ec); if (ec) return; m_ipv6_sock.async_receive_from(asio::buffer(m_v6_buf, sizeof(m_v6_buf)) , m_v6_ep, boost::bind(&udp_socket::on_read, this, &m_ipv6_sock, _1, _2)); } m_bind_port = ep.port(); }
void udp_socket::wrap(udp::endpoint const& ep, char const* p, int len, error_code& ec) { CHECK_MAGIC; using namespace libtorrent::detail; char header[20]; char* h = header; write_uint16(0, h); // reserved write_uint8(0, h); // fragment write_uint8(ep.address().is_v4()?1:4, h); // atyp write_address(ep.address(), h); write_uint16(ep.port(), h); boost::array<asio::const_buffer, 2> iovec; iovec[0] = asio::const_buffer(header, h - header); iovec[1] = asio::const_buffer(p, len); #if TORRENT_USE_IPV6 if (m_proxy_addr.address().is_v4() && m_ipv4_sock.is_open()) #endif m_ipv4_sock.send_to(iovec, m_proxy_addr, 0, ec); #if TORRENT_USE_IPV6 else m_ipv6_sock.send_to(iovec, m_proxy_addr, 0, ec); #endif }
string EndpointToString(const udp::endpoint &endpoint) { string strIp = endpoint.address().to_string(); unsigned int dwPort = endpoint.port(); char szIpPort[32] = {0}; sprintf(szIpPort, "%s:%d", strIp.c_str(), dwPort); return szIpPort; }
node_entry(node_id const& id_, udp::endpoint ep, int roundtriptime = 0xffff, bool pinged = false) : addr(ep.address()) , port(ep.port()) , timeout_count(pinged ? 0 : 0xffff) , rtt(roundtriptime) , id(id_) { #ifdef TORRENT_DHT_VERBOSE_LOGGING first_seen = time_now(); #endif }
void rpc_manager::invoke(int message_id, udp::endpoint target_addr , observer_ptr o) { INVARIANT_CHECK; if (m_destructing) { o->abort(); return; } msg m; m.message_id = message_id; m.reply = false; m.id = m_our_id; m.addr = target_addr; TORRENT_ASSERT(!m_transactions[m_next_transaction_id]); #ifdef TORRENT_DEBUG int potential_new_id = m_next_transaction_id; #endif #ifndef BOOST_NO_EXCEPTIONS try { #endif m.transaction_id.clear(); std::back_insert_iterator<std::string> out(m.transaction_id); io::write_uint16(m_next_transaction_id, out); o->send(m); o->sent = time_now(); #if TORRENT_USE_IPV6 o->target_addr = target_addr.address(); #else o->target_addr = target_addr.address().to_v4(); #endif o->port = target_addr.port(); #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "Invoking " << messages::ids[message_id] << " -> " << target_addr; #endif m_send(m); new_transaction_id(o); #ifndef BOOST_NO_EXCEPTIONS } catch (std::exception& e) { // m_send may fail with "no route to host" TORRENT_ASSERT(potential_new_id == m_next_transaction_id); o->abort(); } #endif }
node_entry(udp::endpoint ep) : addr(ep.address()) , port(ep.port()) , timeout_count(0xffff) , rtt(0xffff) , id(0) { #ifdef TORRENT_DHT_VERBOSE_LOGGING first_seen = time_now(); #endif }
bool CUDPSocket::SendData(udp::endpoint& ep, char* msg, int size, string& errInfo, unsigned long iProxyIp /*= 0*/, unsigned short iProxyPort /*= 0*/) { if(ep.address().to_v4().to_string().empty()) return false; if(ep.address().to_v4().to_string()=="255.255.255.255") return false; if(ep.port() <=0 || ep.port() >65535) return false; if(msg == NULL) return true; try { int ret = this->m_mySocket.send_to(boost::asio::buffer((const void*)msg, size), ep); if (ret < size) { return false; } } catch (const boost::system::error_code& err) { ostringstream oss; oss << "严重错误 CUDPSocket error_code2 " << err.value(); make_log_func_(oss.str()); return false; } catch (...) { ostringstream oss; oss << "严重错误..."; make_log_func_(oss.str()); return false; } return true; }
node_entry::node_entry(udp::endpoint ep) : last_queried(min_time()) , id(0) , a(ep.address().to_v4().to_bytes()) , p(ep.port()) , rtt(0xffff) , timeout_count(0xff) { #ifdef TORRENT_DHT_VERBOSE_LOGGING first_seen = aux::time_now(); #endif }
void DatagramTransceiver::sendMessage(const QString &msg, const udp::endpoint& remote_endpoint) { bool write_in_progress = m_write_msgs.empty() == false; MessageToSend* struct_msg = new MessageToSend; struct_msg->destination_endpoint.address(remote_endpoint.address()); struct_msg->destination_endpoint.port(remote_endpoint.port()); struct_msg->msg = msg; m_write_msgs.push_back(struct_msg); if (write_in_progress == false) doSend(); }
node_entry::node_entry(node_id const& id_, udp::endpoint ep , int roundtriptime , bool pinged) : last_queried(pinged ? aux::time_now() : min_time()) , id(id_) , a(ep.address().to_v4().to_bytes()) , p(ep.port()) , rtt(roundtriptime & 0xffff) , timeout_count(pinged ? 0 : 0xff) { #ifdef TORRENT_DHT_VERBOSE_LOGGING first_seen = aux::time_now(); #endif }
void udp_socket::bind(udp::endpoint const& ep, error_code& ec) { CHECK_MAGIC; TORRENT_ASSERT(is_single_thread()); TORRENT_ASSERT(m_abort == false); if (m_abort) { ec = boost::asio::error::operation_aborted; return; } if (m_ipv4_sock.is_open()) m_ipv4_sock.close(ec); #if TORRENT_USE_IPV6 if (m_ipv6_sock.is_open()) m_ipv6_sock.close(ec); #endif if (ep.address().is_v4()) { m_ipv4_sock.open(udp::v4(), ec); if (ec) return; m_ipv4_sock.bind(ep, ec); if (ec) return; udp::socket::non_blocking_io ioc(true); m_ipv4_sock.io_control(ioc, ec); if (ec) return; setup_read(&m_ipv4_sock); } #if TORRENT_USE_IPV6 else { #ifdef IPV6_V6ONLY m_ipv6_sock.set_option(v6only(true), ec); if (ec) return; #endif m_ipv6_sock.bind(ep, ec); if (ec) return; udp::socket::non_blocking_io ioc(true); m_ipv6_sock.io_control(ioc, ec); if (ec) return; setup_read(&m_ipv6_sock); } #endif #ifdef TORRENT_DEBUG m_started = true; #endif m_bind_port = ep.port(); }
void udp_socket::bind(udp::endpoint const& ep, error_code& ec) { CHECK_MAGIC; mutex_t::scoped_lock l(m_mutex); TORRENT_ASSERT(m_abort == false); if (m_abort) return; if (m_ipv4_sock.is_open()) m_ipv4_sock.close(ec); #if TORRENT_USE_IPV6 if (m_ipv6_sock.is_open()) m_ipv6_sock.close(ec); #endif if (ep.address().is_v4()) { m_ipv4_sock.open(udp::v4(), ec); if (ec) return; m_ipv4_sock.bind(ep, ec); if (ec) return; if (m_v4_outstanding == 0) { ++m_v4_outstanding; m_ipv4_sock.async_receive_from(asio::buffer(m_v4_buf, sizeof(m_v4_buf)) , m_v4_ep, boost::bind(&udp_socket::on_read, this, &m_ipv4_sock, _1, _2)); } } #if TORRENT_USE_IPV6 else { m_ipv6_sock.set_option(v6only(true), ec); if (ec) return; m_ipv6_sock.bind(ep, ec); if (ec) return; if (m_v6_outstanding == 0) { ++m_v6_outstanding; m_ipv6_sock.async_receive_from(asio::buffer(m_v6_buf, sizeof(m_v6_buf)) , m_v6_ep, boost::bind(&udp_socket::on_read, this, &m_ipv6_sock, _1, _2)); } } #endif #ifdef TORRENT_DEBUG m_started = true; #endif m_bind_port = ep.port(); }
void observer::set_target(udp::endpoint const& ep) { m_sent = clock_type::now(); m_port = ep.port(); #if TORRENT_USE_IPV6 if (ep.address().is_v6()) { flags |= flag_ipv6_address; m_addr.v6 = ep.address().to_v6().to_bytes(); } else #endif { flags &= ~flag_ipv6_address; m_addr.v4 = ep.address().to_v4().to_bytes(); } }
static bool resolve_address_udp(io_service &io, int chan, std::string host, unsigned short port, udp::endpoint &ep) { bool result = false; udp::resolver resolver(io); error_code ec; udp::resolver::query query(host, ""); std::for_each(resolver.resolve(query, ec), udp::resolver::iterator(), [&](const udp::endpoint &q_ep) { ep = q_ep; ep.port(port); result = true; logDebug(PFXd "host %s resolved as %s", chan, host.c_str(), to_string_ss(ep).c_str()); }); if (ec) { logWarn(PFXd "resolve error: %s", chan, ec.message().c_str()); result = false; } return result; }
void observer::set_target(udp::endpoint const& ep) { #ifdef TORRENT_DHT_VERBOSE_LOGGING // use high resolution timers for logging m_sent = time_now_hires(); #else m_sent = time_now(); #endif m_port = ep.port(); #if TORRENT_USE_IPV6 if (ep.address().is_v6()) { flags |= flag_ipv6_address; m_addr.v6 = ep.address().to_v6().to_bytes(); } else #endif { flags &= ~flag_ipv6_address; m_addr.v4 = ep.address().to_v4().to_bytes(); } }
tuple endpoint_to_tuple(udp::endpoint const& ep) { return boost::python::make_tuple(ep.address().to_string(), ep.port()); }
uint64_t endpoint_to_i(const udp::endpoint& ep) { uint64_t addr_i = ep.address().to_v4().to_ulong(); uint32_t port = ep.port(); return (addr_i << 32) + port; }
//============================================================================== // Handle Connection (uglysolution.com) // - Checks if the connection already exists. If it exists, restart timeout timer. // Else, add new connection. //============================================================================== boost::shared_ptr<Connection> ConnectionHandler::handleConnection( udp::endpoint& _endpoint ) { unsigned int connection_size = m_connections.size(); // Check if the connection already exists for( unsigned int i = 0; i < connection_size; i++ ) { if( m_connections[i]->endpoint.address().to_string().compare(_endpoint.address().to_string()) == 0 && m_connections[i]->endpoint.port() == _endpoint.port() ) { boost::shared_ptr<Connection> connection = m_connections[i]; connection->timout_timer.expires_from_now( boost::posix_time::seconds(TIMEOUT) ); connection->timout_timer.async_wait( boost::bind(&ConnectionHandler::timeout, this, connection, boost::asio::placeholders::error) ); return m_connections[i]; } } // Check if connection size is greater than the allowed amount of connections if( connection_size >= MAX_CONNECTIONS ) return boost::shared_ptr<Connection>(); // Find an connection ID m_id_counter++; for( unsigned int i = 0; i < m_connections.size(); i++ ) { if( m_connections[i]->connection_id == m_id_counter ) { m_id_counter++; i = 0; } } // Add the connection and start the timers boost::shared_ptr<Connection> new_connection( new Connection(m_id_counter, _endpoint, (*m_io_service)) ); new_connection->ack_timer.async_wait( boost::bind(&ConnectionHandler::requestAck, this, new_connection, boost::asio::placeholders::error) ); new_connection->timout_timer.async_wait( boost::bind(&ConnectionHandler::timeout, this, new_connection, boost::asio::placeholders::error) ); m_connections.push_back( new_connection ); // temp solution if( m_network_handler->isServer() ) new_connection->sync_timer.async_wait( boost::bind(&ConnectionHandler::syncClock, this, new_connection, boost::asio::placeholders::error) ); return m_connections.back(); }
inline std::string print_endpoint(udp::endpoint const& ep) { return print_endpoint(tcp::endpoint(ep.address(), ep.port())); }
long UdpTransport::send_buffer( const udp::endpoint& target, const char* buf, size_t size) { uint64_t bytes_sent = 0; int send_attempts = -1; ssize_t actual_sent = -1; while (actual_sent < 0 && (settings_.resend_attempts < 0 || send_attempts < settings_.resend_attempts)) { if(settings_.max_send_hertz > 0) { enforcer_.sleep_until_next(); } // send the fragment try { actual_sent = socket_.send_to(asio::buffer(buf, size), target); } catch (const boost::system::system_error& e) { madara_logger_log(context_.get_logger(), logger::LOG_MAJOR, "UdpTransport::send_buffer:" " Error sending packet to %s:%d: %s\n", target.address().to_string().c_str(), (int)target.port(), e.what()); // ensure erroneous data is not being used actual_sent = -1; } ++send_attempts; if(settings_.debug_to_kb_prefix != "") { ++sent_packets; } if(actual_sent > 0) { madara_logger_log(context_.get_logger(), logger::LOG_MAJOR, "UdpTransport::send_buffer: Sent %d byte packet to %s:%d\n", (int)actual_sent, target.address().to_string().c_str(), (int)target.port()); bytes_sent += actual_sent; if(settings_.debug_to_kb_prefix != "") { sent_data += actual_sent; if(sent_data_max < actual_sent) { sent_data_max = actual_sent; } if(sent_data_min > actual_sent || sent_data_min == 0) { sent_data_min = actual_sent; } } } else { if(settings_.debug_to_kb_prefix != "") { ++failed_sends; } } } return (long)bytes_sent; }
void EndpointToIpPort(const udp::endpoint &endpoint, string &strIp, unsigned int &dwPort) { strIp = endpoint.address().to_string(); dwPort = endpoint.port(); }
void DatagramTransceiver::setDefaultRemoteEndPoint(const udp::endpoint &remote_endpoint) { m_remote_endpoint.address(remote_endpoint.address()); m_remote_endpoint.port(remote_endpoint.port()); }