void utp_socket_manager::send_packet(udp::endpoint const& ep, char const* p , int len, error_code& ec, int flags) { #if !defined TORRENT_HAS_DONT_FRAGMENT && !defined TORRENT_DEBUG_MTU TORRENT_UNUSED(flags); #endif if (!m_sock.is_open()) { ec = boost::asio::error::operation_aborted; return; } #ifdef TORRENT_DEBUG_MTU // drop packets that exceed the debug MTU if ((flags & dont_fragment) && len > TORRENT_DEBUG_MTU) return; #endif #ifdef TORRENT_HAS_DONT_FRAGMENT error_code tmp; if (flags & utp_socket_manager::dont_fragment) { m_sock.set_option(libtorrent::dont_fragment(true), tmp); TORRENT_ASSERT_VAL(!tmp, tmp.message()); } #endif m_sock.send(ep, p, len, ec); #ifdef TORRENT_HAS_DONT_FRAGMENT if (flags & utp_socket_manager::dont_fragment) { m_sock.set_option(libtorrent::dont_fragment(false), tmp); TORRENT_ASSERT_VAL(!tmp, tmp.message()); } #endif }
void packet_buffer_impl::reserve(std::size_t size) { INVARIANT_CHECK; TORRENT_ASSERT_VAL(size <= 0xffff, size); std::size_t new_size = m_capacity == 0 ? 16 : m_capacity; while (new_size < size) new_size <<= 1; void** new_storage = static_cast<void**>(malloc(sizeof(void*) * new_size)); #ifndef BOOST_NO_EXCEPTIONS if (new_storage == nullptr) throw std::bad_alloc(); #endif for (index_type i = 0; i < new_size; ++i) new_storage[i] = 0; for (index_type i = m_first; i < (m_first + m_capacity); ++i) new_storage[i & (new_size - 1)] = m_storage[i & (m_capacity - 1)]; free(m_storage); m_storage = new_storage; m_capacity = new_size; }
int piece_manager::hash_for_slot(int slot, bool *hash_ok, boost::uint32_t *post_flags, int piece_size) { TORRENT_ASSERT_VAL(!error(), error()); *hash_ok = false; int num_read = 0; int slot_size = piece_size; file::iovec_t buf; disk_buffer_holder holder(*m_storage->disk_pool() , m_storage->disk_pool()->allocate_buffer("hash temp")); buf.iov_base = holder.get(); buf.iov_len = slot_size; // deliberately pass in 0 as flags, to disable random_access int ret = m_storage->readv(&buf, slot, 0, 1, 0); //printf("piece_manager::hash_for_slot %d ret=%d\n", slot, ret); if (ret > 0) num_read += ret; // TODO: if the read fails, set error and exit immediately if (ret > 0) { std::string errmsg; *hash_ok = acceptSignedPost((char const*)buf.iov_base, ret, m_info->name(), slot, errmsg, post_flags); } if (error()) return 0; return num_read; }
void save_struct(entry& e, void const* s, bencode_map_entry const* m, int num, void const* def) { if (e.type() != entry::dictionary_t) e = entry(entry::dictionary_t); for (int i = 0; i < num; ++i) { char const* key = m[i].name; void const* src = ((char*)s) + m[i].offset; if (def) { // if we have a default value for this field // and it is the default, don't save it void const* default_value = ((char*)def) + m[i].offset; switch (m[i].type) { case std_string: if (*((std::string*)src) == *((std::string*)default_value)) continue; break; case character: if (*((char*)src) == *((char*)default_value)) continue; break; case integer: if (*((int*)src) == *((int*)default_value)) continue; break; case size_integer: if (*((size_type*)src) == *((size_type*)default_value)) continue; break; case time_integer: if (*((time_t*)src) == *((time_t*)default_value)) continue; break; case floating_point: if (*((float*)src) == *((float*)default_value)) continue; break; case boolean: if (*((bool*)src) == *((bool*)default_value)) continue; break; default: TORRENT_ASSERT(false); } } entry& val = e[key]; TORRENT_ASSERT_VAL(val.type() == entry::undefined_t, val.type()); switch (m[i].type) { case std_string: val = *((std::string*)src); break; case character: val = *((char*)src); break; case integer: val = *((int*)src); break; case size_integer: val = *((size_type*)src); break; case time_integer: val = *((time_t*)src); break; case floating_point: val = size_type(*((float*)src) * 1000.f); break; case boolean: val = *((bool*)src); break; default: TORRENT_ASSERT(false); } } }
int bencode_recursive(OutIt& out, const entry& e) { int ret = 0; switch(e.type()) { case entry::int_t: write_char(out, 'i'); ret += write_integer(out, e.integer()); write_char(out, 'e'); ret += 2; break; case entry::string_t: ret += write_integer(out, e.string().length()); write_char(out, ':'); ret += write_string(e.string(), out); ret += 1; break; case entry::list_t: write_char(out, 'l'); for (entry::list_type::const_iterator i = e.list().begin(); i != e.list().end(); ++i) ret += bencode_recursive(out, *i); write_char(out, 'e'); ret += 2; break; case entry::dictionary_t: write_char(out, 'd'); for (entry::dictionary_type::const_iterator i = e.dict().begin(); i != e.dict().end(); ++i) { // write key ret += write_integer(out, i->first.length()); write_char(out, ':'); ret += write_string(i->first, out); // write value ret += bencode_recursive(out, i->second); ret += 1; } write_char(out, 'e'); ret += 2; break; default: // trying to encode a structure with uninitialized values! TORRENT_ASSERT_VAL(false, e.type()); // do nothing break; } return ret; }
void* packet_buffer_impl::remove(index_type idx) { INVARIANT_CHECK; // TODO: use compare_less_wrap for this comparison as well if (idx >= m_first + m_capacity) return 0; if (compare_less_wrap(idx, m_first, 0xffff)) return 0; const int mask = int(m_capacity - 1); void* old_value = m_storage[idx & mask]; m_storage[idx & mask] = 0; if (old_value) { --m_size; if (m_size == 0) m_last = m_first; } if (idx == m_first && m_size != 0) { ++m_first; for (std::uint32_t i = 0; i < m_capacity; ++i, ++m_first) if (m_storage[m_first & mask]) break; m_first &= 0xffff; } if (((idx + 1) & 0xffff) == m_last && m_size != 0) { --m_last; for (std::uint32_t i = 0; i < m_capacity; ++i, --m_last) if (m_storage[m_last & mask]) break; ++m_last; m_last &= 0xffff; } TORRENT_ASSERT_VAL(m_first <= 0xffff, m_first); return old_value; }
void packet_buffer::reserve(std::size_t size) { INVARIANT_CHECK; TORRENT_ASSERT_VAL(size <= 0xffff, size); std::size_t new_size = m_capacity == 0 ? 16 : m_capacity; while (new_size < size) new_size <<= 1; void** new_storage = (void**)malloc(sizeof(void*) * new_size); for (index_type i = 0; i < new_size; ++i) new_storage[i] = 0; for (index_type i = m_first; i < (m_first + m_capacity); ++i) new_storage[i & (new_size - 1)] = m_storage[i & (m_capacity - 1)]; free(m_storage); m_storage = new_storage; m_capacity = new_size; }
boost::optional<piece_block_progress> http_seed_connection::downloading_piece_progress() const { if (m_requests.empty()) return boost::optional<piece_block_progress>(); boost::shared_ptr<torrent> t = associated_torrent().lock(); TORRENT_ASSERT(t); piece_block_progress ret; peer_request const& pr = m_requests.front(); ret.piece_index = pr.piece; if (!m_parser.header_finished()) { ret.bytes_downloaded = 0; } else { int receive_buffer_size = m_recv_buffer.get().left() - m_parser.body_start(); // TODO: 1 in chunked encoding mode, this assert won't hold. // the chunk headers should be subtracted from the receive_buffer_size TORRENT_ASSERT_VAL(receive_buffer_size <= t->block_size(), receive_buffer_size); ret.bytes_downloaded = t->block_size() - receive_buffer_size; } // this is used to make sure that the block_index stays within // bounds. If the entire piece is downloaded, the block_index // would otherwise point to one past the end int correction = ret.bytes_downloaded ? -1 : 0; ret.block_index = (pr.start + ret.bytes_downloaded + correction) / t->block_size(); ret.full_block_bytes = t->block_size(); const int last_piece = t->torrent_file().num_pieces() - 1; if (ret.piece_index == last_piece && ret.block_index == t->torrent_file().piece_size(last_piece) / t->block_size()) ret.full_block_bytes = t->torrent_file().piece_size(last_piece) % t->block_size(); return ret; }
TORRENT_EXPORT bool instantiate_connection(io_service& ios , proxy_settings const& ps, socket_type& s , void* ssl_context , utp_socket_manager* sm , bool peer_connection) { if (sm) { utp_stream* str; #ifdef TORRENT_USE_OPENSSL if (ssl_context) { s.instantiate<ssl_stream<utp_stream> >(ios, ssl_context); str = &s.get<ssl_stream<utp_stream> >()->next_layer(); } else #endif { s.instantiate<utp_stream>(ios); str = s.get<utp_stream>(); } str->set_impl(sm->new_utp_socket(str)); } #if TORRENT_USE_I2P else if (ps.type == proxy_settings::i2p_proxy) { // it doesn't make any sense to try ssl over i2p TORRENT_ASSERT(ssl_context == 0); s.instantiate<i2p_stream>(ios); s.get<i2p_stream>()->set_proxy(ps.hostname, ps.port); } #endif else if (ps.type == proxy_settings::none || (peer_connection && !ps.proxy_peer_connections)) { // stream_socket* str; #ifdef TORRENT_USE_OPENSSL if (ssl_context) { s.instantiate<ssl_stream<stream_socket> >(ios, ssl_context); // str = &s.get<ssl_stream<stream_socket> >()->next_layer(); } else #endif { s.instantiate<stream_socket>(ios); // str = s.get<stream_socket>(); } } else if (ps.type == proxy_settings::http || ps.type == proxy_settings::http_pw) { http_stream* str; #ifdef TORRENT_USE_OPENSSL if (ssl_context) { s.instantiate<ssl_stream<http_stream> >(ios, ssl_context); str = &s.get<ssl_stream<http_stream> >()->next_layer(); } else #endif { s.instantiate<http_stream>(ios); str = s.get<http_stream>(); } str->set_proxy(ps.hostname, ps.port); if (ps.type == proxy_settings::http_pw) str->set_username(ps.username, ps.password); } else if (ps.type == proxy_settings::socks5 || ps.type == proxy_settings::socks5_pw || ps.type == proxy_settings::socks4) { socks5_stream* str; #ifdef TORRENT_USE_OPENSSL if (ssl_context) { s.instantiate<ssl_stream<socks5_stream> >(ios, ssl_context); str = &s.get<ssl_stream<socks5_stream> >()->next_layer(); } else #endif { s.instantiate<socks5_stream>(ios); str = s.get<socks5_stream>(); } str->set_proxy(ps.hostname, ps.port); if (ps.type == proxy_settings::socks5_pw) str->set_username(ps.username, ps.password); if (ps.type == proxy_settings::socks4) str->set_version(4); } else { TORRENT_ASSERT_VAL(false, ps.type); return false; } return true; }
std::string tracker_announce_alert::message() const { const static char* event_str[] = {"none", "completed", "started", "stopped", "paused"}; TORRENT_ASSERT_VAL(event < int(sizeof(event_str)/sizeof(event_str[0])), event); return tracker_alert::message() + " sending announce (" + event_str[event] + ")"; }
void* packet_buffer_impl::insert(index_type idx, void* value) { INVARIANT_CHECK; TORRENT_ASSERT_VAL(idx <= 0xffff, idx); // you're not allowed to insert NULLs! TORRENT_ASSERT(value); if (value == 0) return remove(idx); if (m_size != 0) { if (compare_less_wrap(idx, m_first, 0xffff)) { // Index comes before m_first. If we have room, we can simply // adjust m_first backward. std::size_t free_space = 0; for (index_type i = (m_first - 1) & (m_capacity - 1); i != (m_first & (m_capacity - 1)); i = (i - 1) & (m_capacity - 1)) { if (m_storage[i & (m_capacity - 1)]) break; ++free_space; } if (((m_first - idx) & 0xffff) > free_space) reserve(((m_first - idx) & 0xffff) + m_capacity - free_space); m_first = idx; } else if (idx >= m_first + m_capacity) { reserve(idx - m_first + 1); } else if (idx < m_first) { // We have wrapped. if (idx >= ((m_first + m_capacity) & 0xffff) && m_capacity < 0xffff) { reserve(m_capacity + (idx + 1 - ((m_first + m_capacity) & 0xffff))); } } if (compare_less_wrap(m_last, (idx + 1) & 0xffff, 0xffff)) m_last = (idx + 1) & 0xffff; } else { m_first = idx; m_last = (idx + 1) & 0xffff; } if (m_capacity == 0) reserve(16); void* old_value = m_storage[idx & (m_capacity - 1)]; m_storage[idx & (m_capacity - 1)] = value; if (m_size == 0) m_first = idx; // if we're just replacing an old value, the number // of elements in the buffer doesn't actually increase if (old_value == 0) ++m_size; TORRENT_ASSERT_VAL(m_first <= 0xffff, m_first); return old_value; }