static void proxy_lookup_cb (GObject *object, GAsyncResult *result, gpointer user_data) { GTask *task = user_data; GProxyAddressEnumeratorPrivate *priv = g_task_get_task_data (task); g_clear_error (&priv->last_error); priv->proxies = g_proxy_resolver_lookup_finish (G_PROXY_RESOLVER (object), result, &priv->last_error); priv->next_proxy = priv->proxies; if (priv->last_error) { complete_async (task); return; } else { next_enumerator (priv); if (priv->addr_enum) { g_socket_address_enumerator_next_async (priv->addr_enum, g_task_get_cancellable (task), address_enumerate_cb, task); return; } } complete_async (task); }
static void next_proxy (GTask *task) { GProxyAddressEnumeratorPrivate *priv = g_task_get_task_data (task); if (*priv->next_proxy) { g_object_unref (priv->addr_enum); priv->addr_enum = NULL; if (priv->dest_ips) { g_resolver_free_addresses (priv->dest_ips); priv->dest_ips = NULL; } next_enumerator (priv); if (priv->addr_enum) { g_socket_address_enumerator_next_async (priv->addr_enum, g_task_get_cancellable (task), address_enumerate_cb, task); return; } } complete_async (task); }
void upnp::resend_request(error_code const& ec) { #if defined TORRENT_ASIO_DEBUGGING complete_async("upnp::resend_request"); #endif if (ec) return; boost::shared_ptr<upnp> me(self()); mutex::scoped_lock l(m_mutex); if (m_closing) return; if (m_retry_count < 12 && (m_devices.empty() || m_retry_count < 4)) { discover_device_impl(l); return; } if (m_devices.empty()) { disable(errors::no_router, l); return; } for (std::set<rootdevice>::iterator i = m_devices.begin() , end(m_devices.end()); i != end; ++i) { if (i->control_url.empty() && !i->upnp_connection && !i->disabled) { // we don't have a WANIP or WANPPP url for this device, // ask for it rootdevice& d = const_cast<rootdevice&>(*i); TORRENT_ASSERT(d.magic == 1337); TORRENT_TRY { char msg[500]; snprintf(msg, sizeof(msg), "connecting to: %s", d.url.c_str()); log(msg, l); if (d.upnp_connection) d.upnp_connection->close(); d.upnp_connection.reset(new http_connection(m_io_service , m_resolver , boost::bind(&upnp::on_upnp_xml, self(), _1, _2 , boost::ref(d), _5))); d.upnp_connection->get(d.url, seconds(30), 1); } TORRENT_CATCH (std::exception& exc) { TORRENT_DECLARE_DUMMY(std::exception, exc); char msg[500]; snprintf(msg, sizeof(msg), "connection failed to: %s %s", d.url.c_str(), exc.what()); log(msg, l); d.disabled = true; } }
void i2p_stream::start_read_line(error_code const& e, boost::shared_ptr<handler_type> h) { TORRENT_ASSERT(m_magic == 0x1337); #if defined TORRENT_ASIO_DEBUGGING complete_async("i2p_stream::start_read_line"); #endif if (handle_error(e, h)) return; #if defined TORRENT_ASIO_DEBUGGING add_outstanding_async("i2p_stream::read_line"); #endif m_buffer.resize(1); async_read(m_sock, boost::asio::buffer(m_buffer) , boost::bind(&i2p_stream::read_line, this, _1, h)); }
void i2p_connection::on_sam_connect(error_code const& ec, i2p_stream::handler_type const& h, boost::shared_ptr<i2p_stream>) { #if defined TORRENT_ASIO_DEBUGGING complete_async("i2p_stream::on_sam_connect"); #endif m_state = sam_idle; if (ec) { h(ec); return; } do_name_lookup("ME", boost::bind(&i2p_connection::set_local_endpoint, this, _1, _2, h)); }
void i2p_stream::connected(error_code const& e, boost::shared_ptr<handler_type> h) { TORRENT_ASSERT(m_magic == 0x1337); #if defined TORRENT_ASIO_DEBUGGING complete_async("i2p_stream::connected"); #endif if (handle_error(e, h)) return; // send hello command m_state = read_hello_response; static const char cmd[] = "HELLO VERSION MIN=3.0 MAX=3.0\n"; #if defined TORRENT_ASIO_DEBUGGING add_outstanding_async("i2p_stream::start_read_line"); #endif async_write(m_sock, boost::asio::buffer(cmd, sizeof(cmd) - 1) , boost::bind(&i2p_stream::start_read_line, this, _1, h)); // fprintf(stderr, ">>> %s", cmd); }
static void g_proxy_address_enumerator_next_async (GSocketAddressEnumerator *enumerator, GCancellable *cancellable, GAsyncReadyCallback callback, gpointer user_data) { GProxyAddressEnumeratorPrivate *priv = GET_PRIVATE (enumerator); GTask *task; task = g_task_new (enumerator, cancellable, callback, user_data); g_task_set_task_data (task, priv, NULL); if (priv->proxies == NULL) { GProxyResolver *resolver = g_proxy_resolver_get_default (); g_proxy_resolver_lookup_async (resolver, priv->dest_uri, cancellable, proxy_lookup_cb, task); return; } if (priv->addr_enum) { if (priv->proxy_address) { return_result (task); return; } else { g_socket_address_enumerator_next_async (priv->addr_enum, cancellable, address_enumerate_cb, task); return; } } complete_async (task); }
void resolver::on_lookup(error_code const& ec, tcp::resolver::iterator i , resolver_interface::callback_t h, std::string hostname) { #if defined TORRENT_ASIO_DEBUGGING complete_async("resolver::on_lookup"); #endif if (ec) { std::vector<address> empty; h(ec, empty); return; } dns_cache_entry& ce = m_cache[hostname]; time_point now = aux::time_now(); ce.last_seen = now; ce.addresses.clear(); while (i != tcp::resolver::iterator()) { ce.addresses.push_back(i->endpoint().address()); ++i; } h(ec, ce.addresses); // if m_cache grows too big, weed out the // oldest entries if (m_cache.size() > m_max_size) { cache_t::iterator oldest = m_cache.begin(); for (cache_t::iterator i = m_cache.begin(); i != m_cache.end(); ++i) { if (i->second.last_seen < oldest->second.last_seen) oldest = i; } // remove the oldest entry m_cache.erase(oldest); } }
void timeout_handler::timeout_callback(error_code const& error) { #if defined TORRENT_ASIO_DEBUGGING complete_async("timeout_handler::timeout_callback"); #endif if (m_abort) return; ptime now = time_now_hires(); time_duration receive_timeout = now - m_read_time; time_duration completion_timeout = now - m_start_time; if ((m_read_timeout && m_read_timeout <= total_seconds(receive_timeout)) || (m_completion_timeout && m_completion_timeout <= total_seconds(completion_timeout)) || error) { on_timeout(error); return; } int timeout = 0; if (m_read_timeout > 0) timeout = m_read_timeout; if (m_completion_timeout > 0) { timeout = timeout == 0 ? int(m_completion_timeout - total_seconds(m_read_time - m_start_time)) : (std::min)(int(m_completion_timeout - total_seconds(m_read_time - m_start_time)), timeout); } #if defined TORRENT_ASIO_DEBUGGING add_outstanding_async("timeout_handler::timeout_callback"); #endif error_code ec; m_timeout.expires_at(m_read_time + seconds(timeout), ec); m_timeout.async_wait( boost::bind(&timeout_handler::timeout_callback, self(), _1)); }
void connection_queue::on_timeout(error_code const& e) { #if defined TORRENT_ASIO_DEBUGGING complete_async("connection_queue::on_timeout"); #endif mutex_t::scoped_lock l(m_mutex); --m_num_timers; INVARIANT_CHECK; #ifdef TORRENT_DEBUG function_guard guard_(m_in_timeout_function); #endif TORRENT_ASSERT(!e || e == error::operation_aborted); // if there was an error, it's most likely operation aborted, // we should just quit. However, in case there are still connections // in connecting state, and there are no other timer invocations // we need to stick around still. if (e && (m_num_connecting == 0 || m_num_timers > 0)) return; ptime next_expire = max_time(); ptime now = time_now_hires() + milliseconds(100); std::list<entry> timed_out; for (std::list<entry>::iterator i = m_queue.begin(); !m_queue.empty() && i != m_queue.end();) { if (i->connecting && i->expires < now) { std::list<entry>::iterator j = i; ++i; timed_out.splice(timed_out.end(), m_queue, j, i); --m_num_connecting; continue; } if (i->connecting && i->expires < next_expire) next_expire = i->expires; ++i; } // we don't want to call the timeout callback while we're locked // since that is a recepie for dead-locks l.unlock(); for (std::list<entry>::iterator i = timed_out.begin() , end(timed_out.end()); i != end; ++i) { TORRENT_ASSERT(i->connecting); TORRENT_ASSERT(i->ticket != -1); TORRENT_TRY { i->on_timeout(); } TORRENT_CATCH(std::exception&) {} } l.lock(); if (next_expire < max_time()) { #if defined TORRENT_ASIO_DEBUGGING add_outstanding_async("connection_queue::on_timeout"); #endif error_code ec; m_timer.expires_at(next_expire, ec); m_timer.async_wait(boost::bind(&connection_queue::on_timeout, this, _1)); ++m_num_timers; } try_connect(l); }
void i2p_stream::read_line(error_code const& e, boost::shared_ptr<handler_type> h) { TORRENT_ASSERT(m_magic == 0x1337); #if defined TORRENT_ASIO_DEBUGGING complete_async("i2p_stream::read_line"); #endif if (handle_error(e, h)) return; int read_pos = m_buffer.size(); // look for \n which means end of the response if (m_buffer[read_pos - 1] != '\n') { #if defined TORRENT_ASIO_DEBUGGING add_outstanding_async("i2p_stream::read_line"); #endif // read another byte from the socket m_buffer.resize(read_pos + 1); async_read(m_sock, boost::asio::buffer(&m_buffer[read_pos], 1) , boost::bind(&i2p_stream::read_line, this, _1, h)); return; } m_buffer[read_pos - 1] = 0; if (m_command == cmd_incoming) { // this is the line containing the destination // of the incoming connection in an accept call m_dest = &m_buffer[0]; (*h)(e); std::vector<char>().swap(m_buffer); return; } error_code invalid_response(i2p_error::parse_failed , get_i2p_category()); // null-terminate the string and parse it m_buffer.push_back(0); char* ptr = &m_buffer[0]; char* next = ptr; char const* expect1 = 0; char const* expect2 = 0; switch (m_state) { case read_hello_response: expect1 = "HELLO"; expect2 = "REPLY"; break; case read_connect_response: case read_accept_response: expect1 = "STREAM"; expect2 = "STATUS"; break; case read_session_create_response: expect1 = "SESSION"; expect2 = "STATUS"; break; case read_name_lookup_response: expect1 = "NAMING"; expect2 = "REPLY"; break; } // fprintf(stderr, "<<< %s\n", &m_buffer[0]); ptr = string_tokenize(next, ' ', &next); if (ptr == 0 || expect1 == 0 || strcmp(expect1, ptr)) { handle_error(invalid_response, h); return; } ptr = string_tokenize(next, ' ', &next); if (ptr == 0 || expect2 == 0 || strcmp(expect2, ptr)) { handle_error(invalid_response, h); return; } int result = 0; // char const* message = 0; // float version = 3.0f; for(;;) { char* name = string_tokenize(next, '=', &next); if (name == 0) break; // fprintf(stderr, "name=\"%s\"\n", name); char* ptr2 = string_tokenize(next, ' ', &next); if (ptr2 == 0) { handle_error(invalid_response, h); return; } // fprintf(stderr, "value=\"%s\"\n", ptr2); if (strcmp("RESULT", name) == 0) { if (strcmp("OK", ptr2) == 0) result = i2p_error::no_error; else if (strcmp("CANT_REACH_PEER", ptr2) == 0) result = i2p_error::cant_reach_peer; else if (strcmp("I2P_ERROR", ptr2) == 0) result = i2p_error::i2p_error; else if (strcmp("INVALID_KEY", ptr2) == 0) result = i2p_error::invalid_key; else if (strcmp("INVALID_ID", ptr2) == 0) result = i2p_error::invalid_id; else if (strcmp("TIMEOUT", ptr2) == 0) result = i2p_error::timeout; else if (strcmp("KEY_NOT_FOUND", ptr2) == 0) result = i2p_error::key_not_found; else if (strcmp("DUPLICATED_ID", ptr2) == 0) result = i2p_error::duplicated_id; else result = i2p_error::num_errors; // unknown error } else if (strcmp("MESSAGE", name) == 0) { // message = ptr2; } else if (strcmp("VERSION", name) == 0) { // version = float(atof(ptr2)); } else if (strcmp("VALUE", name) == 0) { m_name_lookup = ptr2; } else if (strcmp("DESTINATION", name) == 0) { m_dest = ptr2; } } error_code ec(result, get_i2p_category()); switch (result) { case i2p_error::no_error: case i2p_error::invalid_key: break; default: { handle_error (ec, h); return; } } switch (m_state) { case read_hello_response: switch (m_command) { case cmd_create_session: send_session_create(h); break; case cmd_accept: send_accept(h); break; case cmd_connect: send_connect(h); break; default: (*h)(e); std::vector<char>().swap(m_buffer); } break; case read_connect_response: case read_session_create_response: case read_name_lookup_response: (*h)(ec); std::vector<char>().swap(m_buffer); break; case read_accept_response: // the SAM bridge is waiting for an incoming // connection. // wait for one more line containing // the destination of the remote peer m_command = cmd_incoming; m_buffer.resize(1); #if defined TORRENT_ASIO_DEBUGGING add_outstanding_async("i2p_stream::read_line"); #endif async_read(m_sock, boost::asio::buffer(m_buffer) , boost::bind(&i2p_stream::read_line, this, _1, h)); break; } return; }