int zmq::proxy ( class socket_base_t *frontend_, class socket_base_t *backend_, class socket_base_t *capture_, class socket_base_t *control_) { msg_t msg; int rc = msg.init (); if (rc != 0) return -1; // The algorithm below assumes ratio of requests and replies processed // under full load to be 1:1. int more; size_t moresz; zmq_pollitem_t items [] = { { frontend_, 0, ZMQ_POLLIN, 0 }, { backend_, 0, ZMQ_POLLIN, 0 }, { control_, 0, ZMQ_POLLIN, 0 } }; int qt_poll_items = (control_ ? 3 : 2); zmq_pollitem_t itemsout [] = { { frontend_, 0, ZMQ_POLLOUT, 0 }, { backend_, 0, ZMQ_POLLOUT, 0 } }; zmq_socket_stats_t frontend_stats; memset(&frontend_stats, 0, sizeof(frontend_stats)); zmq_socket_stats_t backend_stats; memset(&backend_stats, 0, sizeof(backend_stats)); // Proxy can be in these three states enum { active, paused, terminated } state = active; while (state != terminated) { // Wait while there are either requests or replies to process. rc = zmq_poll (&items [0], qt_poll_items, -1); if (unlikely (rc < 0)) return close_and_return (&msg, -1); // Get the pollout separately because when combining this with pollin it maxes the CPU // because pollout shall most of the time return directly. // POLLOUT is only checked when frontend and backend sockets are not the same. if (frontend_ != backend_) { rc = zmq_poll (&itemsout [0], 2, 0); if (unlikely (rc < 0)) { return close_and_return (&msg, -1); } } // Process a control command if any if (control_ && items [2].revents & ZMQ_POLLIN) { rc = control_->recv (&msg, 0); if (unlikely (rc < 0)) return close_and_return (&msg, -1); moresz = sizeof more; rc = control_->getsockopt (ZMQ_RCVMORE, &more, &moresz); if (unlikely (rc < 0) || more) return close_and_return (&msg, -1); // Copy message to capture socket if any rc = capture (capture_, msg); if (unlikely (rc < 0)) return close_and_return (&msg, -1); if (msg.size () == 5 && memcmp (msg.data (), "PAUSE", 5) == 0) state = paused; else if (msg.size () == 6 && memcmp (msg.data (), "RESUME", 6) == 0) state = active; else if (msg.size () == 9 && memcmp (msg.data (), "TERMINATE", 9) == 0) state = terminated; else { #ifdef ZMQ_BUILD_DRAFT_API if (msg.size () == 10 && memcmp (msg.data (), "STATISTICS", 10) == 0) { rc = reply_stats(control_, &frontend_stats, &backend_stats); if (unlikely (rc < 0)) return close_and_return (&msg, -1); } else { #endif // This is an API error, we assert puts ("E: invalid command sent to proxy"); zmq_assert (false); #ifdef ZMQ_BUILD_DRAFT_API } #endif } } // Process a request if (state == active && items [0].revents & ZMQ_POLLIN && (frontend_ == backend_ || itemsout [1].revents & ZMQ_POLLOUT)) { rc = forward (frontend_, &frontend_stats, backend_, &backend_stats, capture_, msg); if (unlikely (rc < 0)) return close_and_return (&msg, -1); } // Process a reply if (state == active && frontend_ != backend_ && items [1].revents & ZMQ_POLLIN && itemsout [0].revents & ZMQ_POLLOUT) { rc = forward (backend_, &backend_stats, frontend_, &frontend_stats, capture_, msg); if (unlikely (rc < 0)) return close_and_return (&msg, -1); } } return close_and_return (&msg, 0); }
bool rpc_manager::incoming(msg const& m, node_id* id) { INVARIANT_CHECK; if (m_destructing) return false; // we only deal with replies, not queries TORRENT_ASSERT(m.message.dict_find_string_value("y") == "r"); // if we don't have the transaction id in our // request list, ignore the packet std::string transaction_id = m.message.dict_find_string_value("t"); std::string::const_iterator i = transaction_id.begin(); int tid = transaction_id.size() != 2 ? -1 : io::read_uint16(i); observer_ptr o; for (transactions_t::iterator i = m_transactions.begin() , end(m_transactions.end()); i != end; ++i) { TORRENT_ASSERT(*i); if ((*i)->transaction_id() != tid) continue; if (m.addr.address() != (*i)->target_addr()) continue; o = *i; m_transactions.erase(i); break; } if (!o) { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "Reply with invalid transaction id size: " << transaction_id.size() << " from " << m.addr; #endif entry e; incoming_error(e, "invalid transaction id"); m_send(m_userdata, e, m.addr, 0); return false; } #ifdef TORRENT_DHT_VERBOSE_LOGGING std::ofstream reply_stats("round_trip_ms.log", std::ios::app); reply_stats << m.addr << "\t" << total_milliseconds(time_now_hires() - o->sent()) << std::endl; #endif lazy_entry const* ret_ent = m.message.dict_find_dict("r"); if (ret_ent == 0) { entry e; incoming_error(e, "missing 'r' key"); m_send(m_userdata, e, m.addr, 0); return false; } lazy_entry const* node_id_ent = ret_ent->dict_find_string("id"); if (node_id_ent == 0 || node_id_ent->string_length() != 20) { entry e; incoming_error(e, "missing 'id' key"); m_send(m_userdata, e, m.addr, 0); return false; } lazy_entry const* ext_ip = ret_ent->dict_find_string("ip"); if (ext_ip && ext_ip->string_length() == 4) { // this node claims we use the wrong node-ID! address_v4::bytes_type b; memcpy(&b[0], ext_ip->string_ptr(), 4); m_ext_ip(address_v4(b), aux::session_impl::source_dht, m.addr.address()); } #if TORRENT_USE_IPV6 else if (ext_ip && ext_ip->string_length() == 16) { // this node claims we use the wrong node-ID! address_v6::bytes_type b; memcpy(&b[0], ext_ip->string_ptr(), 16); m_ext_ip(address_v6(b), aux::session_impl::source_dht, m.addr.address()); } #endif #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "[" << o->m_algorithm.get() << "] Reply with transaction id: " << tid << " from " << m.addr; #endif o->reply(m); *id = node_id(node_id_ent->string_ptr()); // we found an observer for this reply, hence the node is not spoofing // add it to the routing table return m_table.node_seen(*id, m.addr); }
int zmq::proxy ( class socket_base_t *frontend_, class socket_base_t *backend_, class socket_base_t *capture_, class socket_base_t *control_) { msg_t msg; int rc = msg.init (); if (rc != 0) return -1; // The algorithm below assumes ratio of requests and replies processed // under full load to be 1:1. int more; size_t moresz = sizeof (more); // Proxy can be in these three states enum { active, paused, terminated } state = active; bool frontend_equal_to_backend; bool frontend_in = false; bool frontend_out = false; bool backend_in = false; bool backend_out = false; bool control_in = false; zmq::socket_poller_t::event_t events [3]; zmq_socket_stats_t frontend_stats; zmq_socket_stats_t backend_stats; memset(&frontend_stats, 0, sizeof(frontend_stats)); memset(&backend_stats, 0, sizeof(backend_stats)); // Don't allocate these pollers from stack because they will take more than 900 kB of stack! // On Windows this blows up default stack of 1 MB and aborts the program. // I wanted to use std::shared_ptr here as the best solution but that requires C++11... zmq::socket_poller_t *poller_all = new (std::nothrow) zmq::socket_poller_t; // Poll for everything. zmq::socket_poller_t *poller_in = new (std::nothrow) zmq::socket_poller_t; // Poll only 'ZMQ_POLLIN' on all sockets. Initial blocking poll in loop. zmq::socket_poller_t *poller_control = new (std::nothrow) zmq::socket_poller_t; // Poll only for 'ZMQ_POLLIN' on 'control_', when proxy is paused. zmq::socket_poller_t *poller_receive_blocked = new (std::nothrow) zmq::socket_poller_t; // All except 'ZMQ_POLLIN' on 'frontend_'. // If frontend_==backend_ 'poller_send_blocked' and 'poller_receive_blocked' are the same, 'ZMQ_POLLIN' is ignored. // In that case 'poller_send_blocked' is not used. We need only 'poller_receive_blocked'. // We also don't need 'poller_both_blocked', 'poller_backend_only' nor 'poller_frontend_only' no need to initialize it. // We save some RAM and time for initialization. zmq::socket_poller_t *poller_send_blocked = NULL; // All except 'ZMQ_POLLIN' on 'backend_'. zmq::socket_poller_t *poller_both_blocked = NULL; // All except 'ZMQ_POLLIN' on both 'frontend_' and 'backend_'. zmq::socket_poller_t *poller_frontend_only = NULL; // Only 'ZMQ_POLLIN' and 'ZMQ_POLLOUT' on 'frontend_'. zmq::socket_poller_t *poller_backend_only = NULL; // Only 'ZMQ_POLLIN' and 'ZMQ_POLLOUT' on 'backend_'. if (frontend_ != backend_) { poller_send_blocked = new (std::nothrow) zmq::socket_poller_t; // All except 'ZMQ_POLLIN' on 'backend_'. poller_both_blocked = new (std::nothrow) zmq::socket_poller_t; // All except 'ZMQ_POLLIN' on both 'frontend_' and 'backend_'. poller_frontend_only = new (std::nothrow) zmq::socket_poller_t; // Only 'ZMQ_POLLIN' and 'ZMQ_POLLOUT' on 'frontend_'. poller_backend_only = new (std::nothrow) zmq::socket_poller_t; // Only 'ZMQ_POLLIN' and 'ZMQ_POLLOUT' on 'backend_'. frontend_equal_to_backend = false; } else frontend_equal_to_backend = true; if (poller_all == NULL || poller_in == NULL || poller_control == NULL || poller_receive_blocked == NULL || ((poller_send_blocked == NULL || poller_both_blocked == NULL) && !frontend_equal_to_backend)) { PROXY_CLEANUP (); return close_and_return (&msg, -1); } zmq::socket_poller_t *poller_wait = poller_in; // Poller for blocking wait, initially all 'ZMQ_POLLIN'. // Register 'frontend_' and 'backend_' with pollers. rc = poller_all->add (frontend_, NULL, ZMQ_POLLIN | ZMQ_POLLOUT); // Everything. CHECK_RC_EXIT_ON_FAILURE (); rc = poller_in->add (frontend_, NULL, ZMQ_POLLIN); // All 'ZMQ_POLLIN's. CHECK_RC_EXIT_ON_FAILURE (); if (frontend_equal_to_backend) { // If frontend_==backend_ 'poller_send_blocked' and 'poller_receive_blocked' are the same, // so we don't need 'poller_send_blocked'. We need only 'poller_receive_blocked'. // We also don't need 'poller_both_blocked', no need to initialize it. rc = poller_receive_blocked->add (frontend_, NULL, ZMQ_POLLOUT); CHECK_RC_EXIT_ON_FAILURE (); } else { rc = poller_all->add (backend_, NULL, ZMQ_POLLIN | ZMQ_POLLOUT); // Everything. CHECK_RC_EXIT_ON_FAILURE (); rc = poller_in->add (backend_, NULL, ZMQ_POLLIN); // All 'ZMQ_POLLIN's. CHECK_RC_EXIT_ON_FAILURE (); rc = poller_both_blocked->add (frontend_, NULL, ZMQ_POLLOUT); // Waiting only for 'ZMQ_POLLOUT'. CHECK_RC_EXIT_ON_FAILURE (); rc = poller_both_blocked->add (backend_, NULL, ZMQ_POLLOUT); // Waiting only for 'ZMQ_POLLOUT'. CHECK_RC_EXIT_ON_FAILURE (); rc = poller_send_blocked->add (backend_, NULL, ZMQ_POLLOUT); // All except 'ZMQ_POLLIN' on 'backend_'. CHECK_RC_EXIT_ON_FAILURE (); rc = poller_send_blocked->add (frontend_, NULL, ZMQ_POLLIN | ZMQ_POLLOUT); // All except 'ZMQ_POLLIN' on 'backend_'. CHECK_RC_EXIT_ON_FAILURE (); rc = poller_receive_blocked->add (frontend_, NULL, ZMQ_POLLOUT); // All except 'ZMQ_POLLIN' on 'frontend_'. CHECK_RC_EXIT_ON_FAILURE (); rc = poller_receive_blocked->add (backend_, NULL, ZMQ_POLLIN | ZMQ_POLLOUT); // All except 'ZMQ_POLLIN' on 'frontend_'. CHECK_RC_EXIT_ON_FAILURE (); rc = poller_frontend_only->add (frontend_, NULL, ZMQ_POLLIN | ZMQ_POLLOUT); CHECK_RC_EXIT_ON_FAILURE (); rc = poller_backend_only->add (backend_, NULL, ZMQ_POLLIN | ZMQ_POLLOUT); CHECK_RC_EXIT_ON_FAILURE (); } // Register 'control_' with pollers. if (control_ != NULL) { rc = poller_all->add (control_, NULL, ZMQ_POLLIN); CHECK_RC_EXIT_ON_FAILURE (); rc = poller_in->add (control_, NULL, ZMQ_POLLIN); CHECK_RC_EXIT_ON_FAILURE (); rc = poller_control->add (control_, NULL, ZMQ_POLLIN); // When proxy is paused we wait only for ZMQ_POLLIN on 'control_' socket. CHECK_RC_EXIT_ON_FAILURE (); rc = poller_receive_blocked->add (control_, NULL, ZMQ_POLLIN); CHECK_RC_EXIT_ON_FAILURE (); if (!frontend_equal_to_backend) { rc = poller_send_blocked->add (control_, NULL, ZMQ_POLLIN); CHECK_RC_EXIT_ON_FAILURE (); rc = poller_both_blocked->add (control_, NULL, ZMQ_POLLIN); CHECK_RC_EXIT_ON_FAILURE (); rc = poller_frontend_only->add (control_, NULL, ZMQ_POLLIN); CHECK_RC_EXIT_ON_FAILURE (); rc = poller_backend_only->add (control_, NULL, ZMQ_POLLIN); CHECK_RC_EXIT_ON_FAILURE (); } } int i; bool request_processed, reply_processed; while (state != terminated) { // Blocking wait initially only for 'ZMQ_POLLIN' - 'poller_wait' points to 'poller_in'. // If one of receiving end's queue is full ('ZMQ_POLLOUT' not available), // 'poller_wait' is pointed to 'poller_receive_blocked', 'poller_send_blocked' or 'poller_both_blocked'. rc = poller_wait->wait (events, 3, -1); if (rc < 0 && errno == EAGAIN) rc = 0; CHECK_RC_EXIT_ON_FAILURE (); // Some of events waited for by 'poller_wait' have arrived, now poll for everything without blocking. rc = poller_all->wait (events, 3, 0); if (rc < 0 && errno == EAGAIN) rc = 0; CHECK_RC_EXIT_ON_FAILURE (); // Process events. for (i = 0; i < rc; i++) { if (events [i].socket == frontend_) { frontend_in = (events [i].events & ZMQ_POLLIN) != 0; frontend_out = (events [i].events & ZMQ_POLLOUT) != 0; } else // This 'if' needs to be after check for 'frontend_' in order never // to be reached in case frontend_==backend_, so we ensure backend_in=false in that case. if (events [i].socket == backend_) { backend_in = (events [i].events & ZMQ_POLLIN) != 0; backend_out = (events [i].events & ZMQ_POLLOUT) != 0; } else if (events [i].socket == control_) control_in = (events [i].events & ZMQ_POLLIN) != 0; } // Process a control command if any. if (control_in) { rc = control_->recv (&msg, 0); CHECK_RC_EXIT_ON_FAILURE (); rc = control_->getsockopt (ZMQ_RCVMORE, &more, &moresz); if (unlikely (rc < 0) || more) { PROXY_CLEANUP (); return close_and_return (&msg, -1); } // Copy message to capture socket if any. rc = capture (capture_, msg); CHECK_RC_EXIT_ON_FAILURE (); if (msg.size () == 5 && memcmp (msg.data (), "PAUSE", 5) == 0) { state = paused; poller_wait = poller_control; } else if (msg.size () == 6 && memcmp (msg.data (), "RESUME", 6) == 0) { state = active; poller_wait = poller_in; } else { if (msg.size () == 9 && memcmp (msg.data (), "TERMINATE", 9) == 0) state = terminated; else { #ifdef ZMQ_BUILD_DRAFT_API if (msg.size () == 10 && memcmp (msg.data (), "STATISTICS", 10) == 0) { rc = reply_stats(control_, &frontend_stats, &backend_stats); CHECK_RC_EXIT_ON_FAILURE (); } else { #endif // This is an API error, we assert puts ("E: invalid command sent to proxy"); zmq_assert (false); #ifdef ZMQ_BUILD_DRAFT_API } #endif } } control_in = false; } if (state == active) { // Process a request, 'ZMQ_POLLIN' on 'frontend_' and 'ZMQ_POLLOUT' on 'backend_'. // In case of frontend_==backend_ there's no 'ZMQ_POLLOUT' event. if (frontend_in && (backend_out || frontend_equal_to_backend)) { rc = forward (frontend_, &frontend_stats, backend_, &backend_stats, capture_, msg); CHECK_RC_EXIT_ON_FAILURE (); request_processed = true; frontend_in = backend_out = false; } else request_processed = false; // Process a reply, 'ZMQ_POLLIN' on 'backend_' and 'ZMQ_POLLOUT' on 'frontend_'. // If 'frontend_' and 'backend_' are the same this is not needed because previous processing // covers all of the cases. 'backend_in' is always false if frontend_==backend_ due to // design in 'for' event processing loop. if (backend_in && frontend_out) { rc = forward (backend_, &backend_stats, frontend_, &frontend_stats, capture_, msg); CHECK_RC_EXIT_ON_FAILURE (); reply_processed = true; backend_in = frontend_out = false; } else reply_processed = false; if (request_processed || reply_processed) { // If request/reply is processed that means we had at least one 'ZMQ_POLLOUT' event. // Enable corresponding 'ZMQ_POLLIN' for blocking wait if any was disabled. if (poller_wait != poller_in) { if (request_processed) { // 'frontend_' -> 'backend_' if (poller_wait == poller_both_blocked) poller_wait = poller_send_blocked; else if (poller_wait == poller_receive_blocked || poller_wait == poller_frontend_only) poller_wait = poller_in; } if (reply_processed) { // 'backend_' -> 'frontend_' if (poller_wait == poller_both_blocked) poller_wait = poller_receive_blocked; else if (poller_wait == poller_send_blocked || poller_wait == poller_backend_only) poller_wait = poller_in; } } } else { // No requests have been processed, there were no 'ZMQ_POLLIN' with corresponding 'ZMQ_POLLOUT' events. // That means that out queue(s) is/are full or one out queue is full and second one has no messages to process. // Disable receiving 'ZMQ_POLLIN' for sockets for which there's no 'ZMQ_POLLOUT', // or wait only on both 'backend_''s or 'frontend_''s 'ZMQ_POLLIN' and 'ZMQ_POLLOUT'. if (frontend_in) { if (frontend_out) // If frontend_in and frontend_out are true, obviously backend_in and backend_out are both false. // In that case we need to wait for both 'ZMQ_POLLIN' and 'ZMQ_POLLOUT' only on 'backend_'. // We'll never get here in case of frontend_==backend_ because then frontend_out will always be false. poller_wait = poller_backend_only; else { if (poller_wait == poller_send_blocked) poller_wait = poller_both_blocked; else if (poller_wait == poller_in) poller_wait = poller_receive_blocked; } } if (backend_in) { // Will never be reached if frontend_==backend_, 'backend_in' will // always be false due to design in 'for' event processing loop. if (backend_out) // If backend_in and backend_out are true, obviously frontend_in and frontend_out are both false. // In that case we need to wait for both 'ZMQ_POLLIN' and 'ZMQ_POLLOUT' only on 'frontend_'. poller_wait = poller_frontend_only; else { if (poller_wait == poller_receive_blocked) poller_wait = poller_both_blocked; else if (poller_wait == poller_in) poller_wait = poller_send_blocked; } } } } } PROXY_CLEANUP (); return close_and_return (&msg, 0); }
bool rpc_manager::incoming(msg const& m, node_id* id , libtorrent::dht_settings const& settings) { INVARIANT_CHECK; if (m_destructing) return false; // we only deal with replies and errors, not queries TORRENT_ASSERT(m.message.dict_find_string_value("y") == "r" || m.message.dict_find_string_value("y") == "e"); // if we don't have the transaction id in our // request list, ignore the packet std::string transaction_id = m.message.dict_find_string_value("t"); if (transaction_id.empty()) return false; std::string::const_iterator i = transaction_id.begin(); int tid = transaction_id.size() != 2 ? -1 : io::read_uint16(i); observer_ptr o; std::pair<transactions_t::iterator, transactions_t::iterator> range = m_transactions.equal_range(tid); for (transactions_t::iterator i = range.first; i != range.second; ++i) { if (m.addr.address() != i->second->target_addr()) continue; o = i->second; i = m_transactions.erase(i); break; } if (!o) { #ifndef TORRENT_DISABLE_LOGGING m_log->log(dht_logger::rpc_manager, "reply with unknown transaction id size: %d from %s" , int(transaction_id.size()), print_endpoint(m.addr).c_str()); #endif // this isn't necessarily because the other end is doing // something wrong. This can also happen when we restart // the node, and we prematurely abort all outstanding // requests. Also, this opens up a potential magnification // attack. // entry e; // incoming_error(e, "invalid transaction id"); // m_sock->send_packet(e, m.addr, 0); return false; } time_point now = clock_type::now(); #ifndef TORRENT_DISABLE_LOGGING std::ofstream reply_stats("round_trip_ms.log", std::ios::app); reply_stats << m.addr << "\t" << total_milliseconds(now - o->sent()) << std::endl; #endif bdecode_node ret_ent = m.message.dict_find_dict("r"); if (!ret_ent) { // it may be an error ret_ent = m.message.dict_find("e"); o->timeout(); if (!ret_ent) { entry e; incoming_error(e, "missing 'r' key"); m_sock->send_packet(e, m.addr, 0); } return false; } bdecode_node node_id_ent = ret_ent.dict_find_string("id"); if (!node_id_ent || node_id_ent.string_length() != 20) { o->timeout(); entry e; incoming_error(e, "missing 'id' key"); m_sock->send_packet(e, m.addr, 0); return false; } node_id nid = node_id(node_id_ent.string_ptr()); if (settings.enforce_node_id && !verify_id(nid, m.addr.address())) { o->timeout(); entry e; incoming_error(e, "invalid node ID"); m_sock->send_packet(e, m.addr, 0); return false; } #ifndef TORRENT_DISABLE_LOGGING m_log->log(dht_logger::rpc_manager, "[%p] reply with transaction id: %d from %s" , o->algorithm(), int(transaction_id.size()) , print_endpoint(m.addr).c_str()); #endif o->reply(m); *id = nid; int rtt = int(total_milliseconds(now - o->sent())); // we found an observer for this reply, hence the node is not spoofing // add it to the routing table return m_table.node_seen(*id, m.addr, rtt); }