void
 finish(error_code& ec)
 {
     ec.assign(0, ec.category());
 }
 void
 init(error_code& ec)
 {
     ec.assign(0, ec.category());
 }
示例#3
0
 boost::optional<std::pair<const_buffers_type, bool>>
 get(error_code& ec)
 {
     ec.assign(0, ec.category());
     return boost::none;
 }
 void
 init(boost::optional<
     std::uint64_t> const&, error_code& ec)
 {
     ec.assign(0, ec.category());
 }
示例#5
0
	void copy_file(std::string const& inf, std::string const& newf, error_code& ec)
	{
		ec.clear();
#if TORRENT_USE_WSTRING && defined TORRENT_WINDOWS
#define CopyFile_ CopyFileW
		std::wstring f1 = convert_to_wstring(inf);
		std::wstring f2 = convert_to_wstring(newf);
#else
#define CopyFile_ CopyFileA
		std::string f1 = convert_to_native(inf);
		std::string f2 = convert_to_native(newf);
#endif

#ifdef TORRENT_WINDOWS
		if (CopyFile_(f1.c_str(), f2.c_str(), false) == 0)
			ec.assign(GetLastError(), boost::system::get_system_category());
#elif defined __APPLE__ && defined __MACH__ && MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
		// this only works on 10.5
		copyfile_state_t state = copyfile_state_alloc();
		if (copyfile(f1.c_str(), f2.c_str(), state, COPYFILE_ALL) < 0)
			ec.assign(errno, boost::system::get_generic_category());
		copyfile_state_free(state);
#else
		int infd = ::open(inf.c_str(), O_RDONLY);
		if (infd < 0)
		{
			ec.assign(errno, boost::system::get_generic_category());
			return;
		}

		// rely on default umask to filter x and w permissions
		// for group and others
		// TODO: copy the mode from the source file
		int permissions = S_IRUSR | S_IWUSR
			| S_IRGRP | S_IWGRP
			| S_IROTH | S_IWOTH;

		int outfd = ::open(newf.c_str(), O_WRONLY | O_CREAT, permissions);
		if (outfd < 0)
		{
			close(infd);
			ec.assign(errno, boost::system::get_generic_category());
			return;
		}
		char buffer[4096];
		for (;;)
		{
			int num_read = read(infd, buffer, sizeof(buffer));
			if (num_read == 0) break;
			if (num_read < 0)
			{
				ec.assign(errno, boost::system::get_generic_category());
				break;
			}
			int num_written = write(outfd, buffer, num_read);
			if (num_written < num_read)
			{
				ec.assign(errno, boost::system::get_generic_category());
				break;
			}
			if (num_read < int(sizeof(buffer))) break;
		}
		close(infd);
		close(outfd);
#endif // TORRENT_WINDOWS
	}
示例#6
0
void
print_chunked_body(
    std::ostream& os,
    SyncReadStream& stream,
    DynamicBuffer& buffer,
    error_code& ec)
{
    // Declare the parser with an empty body since
    // we plan on capturing the chunks ourselves.
    parser<isRequest, empty_body> p;

    // First read the complete header
    read_header(stream, buffer, p, ec);
    if(ec)
        return;

    // This container will hold the extensions for each chunk
    chunk_extensions ce;

    // This string will hold the body of each chunk
    std::string chunk;

    // Declare our chunk header callback  This is invoked
    // after each chunk header and also after the last chunk.
    auto header_cb =
    [&](std::uint64_t size,         // Size of the chunk, or zero for the last chunk
        string_view extensions,     // The raw chunk-extensions string. Already validated.
        error_code& ev)             // We can set this to indicate an error
    {
        // Parse the chunk extensions so we can access them easily
        ce.parse(extensions, ev);
        if(ev)
            return;

        // See if the chunk is too big
        if(size > (std::numeric_limits<std::size_t>::max)())
        {
            ev = error::body_limit;
            return;
        }

        // Make sure we have enough storage, and
        // reset the container for the upcoming chunk
        chunk.reserve(static_cast<std::size_t>(size));
        chunk.clear();
    };

    // Set the callback. The function requires a non-const reference so we
    // use a local variable, since temporaries can only bind to const refs.
    p.on_chunk_header(header_cb);

    // Declare the chunk body callback. This is called one or
    // more times for each piece of a chunk body.
    auto body_cb =            
    [&](std::uint64_t remain,   // The number of bytes left in this chunk
        string_view body,       // A buffer holding chunk body data
        error_code& ec)         // We can set this to indicate an error
    {
        // If this is the last piece of the chunk body,
        // set the error so that the call to `read` returns
        // and we can process the chunk.
        if(remain == body.size())
            ec = error::end_of_chunk;

        // Append this piece to our container
        chunk.append(body.data(), body.size());

        // The return value informs the parser of how much of the body we
        // consumed. We will indicate that we consumed everything passed in.
        return body.size();
    };
    p.on_chunk_body(body_cb);

    while(! p.is_done())
    {   
        // Read as much as we can. When we reach the end of the chunk, the chunk
        // body callback will make the read return with the end_of_chunk error.
        read(stream, buffer, p, ec);
        if(! ec)
            continue;
        else if(ec != error::end_of_chunk)
            return;
        else
            ec.assign(0, ec.category());

        // We got a whole chunk, print the extensions:
        for(auto const& extension : ce)
        {
            os << "Extension: " << extension.first;
            if(! extension.second.empty())
                os << " = " << extension.second << std::endl;
            else
                os << std::endl;
        }

        // Now print the chunk body
        os << "Chunk Body: " << chunk << std::endl;
    }

    // Get a reference to the parsed message, this is for convenience
    auto const& msg = p.get();

    // Check each field promised in the "Trailer" header and output it
    for(auto const& name : token_list{msg[field::trailer]})
    {
        // Find the trailer field
        auto it = msg.find(name);
        if(it == msg.end())
        {
            // Oops! They promised the field but failed to deliver it
            os << "Missing Trailer: " << name << std::endl;
            continue;
        }
        os << it->name() << ": " << it->value() << std::endl;
    }
}
示例#7
0
 void operator()(error_code& ec, ConstBufferSequence const& buffer) const
 {
     ec.assign(0, ec.category());
     std::cout << buffers(buffer);
     sr.consume(boost::asio::buffer_size(buffer));
 }
示例#8
0
	bool file::open(std::string const& path, open_mode_t mode, error_code& ec)
	{
		close();
		native_path_string file_path = convert_to_native_path_string(path);

#ifdef TORRENT_WINDOWS

		struct win_open_mode_t
		{
			DWORD rw_mode;
			DWORD create_mode;
		};

		static std::array<win_open_mode_t, 3> const mode_array{
		{
			// read_only
			{GENERIC_READ, OPEN_EXISTING},
			// write_only
			{GENERIC_WRITE, OPEN_ALWAYS},
			// read_write
			{GENERIC_WRITE | GENERIC_READ, OPEN_ALWAYS},
		}};

		static std::array<DWORD, 4> const attrib_array{
		{
			FILE_ATTRIBUTE_NORMAL, // no attrib
			FILE_ATTRIBUTE_HIDDEN, // hidden
			FILE_ATTRIBUTE_NORMAL, // executable
			FILE_ATTRIBUTE_HIDDEN, // hidden + executable
		}};

		TORRENT_ASSERT(static_cast<std::uint32_t>(mode & open_mode::rw_mask) < mode_array.size());
		win_open_mode_t const& m = mode_array[static_cast<std::uint32_t>(mode & open_mode::rw_mask)];
		DWORD a = attrib_array[static_cast<std::uint32_t>(mode & open_mode::attribute_mask) >> 12];

		// one might think it's a good idea to pass in FILE_FLAG_RANDOM_ACCESS. It
		// turns out that it isn't. That flag will break your operating system:
		// http://support.microsoft.com/kb/2549369

		DWORD const flags = ((mode & open_mode::random_access) ? 0 : FILE_FLAG_SEQUENTIAL_SCAN)
			| (a ? a : FILE_ATTRIBUTE_NORMAL)
			| FILE_FLAG_OVERLAPPED
			| ((mode & open_mode::no_cache) ? FILE_FLAG_WRITE_THROUGH : 0);

		handle_type handle = CreateFileW(file_path.c_str(), m.rw_mode
			, FILE_SHARE_READ | FILE_SHARE_WRITE
			, 0, m.create_mode, flags, 0);

		if (handle == INVALID_HANDLE_VALUE)
		{
			ec.assign(GetLastError(), system_category());
			TORRENT_ASSERT(ec);
			return false;
		}

		m_file_handle = handle;

		// try to make the file sparse if supported
		// only set this flag if the file is opened for writing
		if ((mode & open_mode::sparse)
			&& (mode & open_mode::rw_mask) != open_mode::read_only)
		{
			DWORD temp;
			overlapped_t ol;
			BOOL ret = ::DeviceIoControl(native_handle(), FSCTL_SET_SPARSE, 0, 0
				, 0, 0, &temp, &ol.ol);
			error_code error;
			if (ret == FALSE && GetLastError() == ERROR_IO_PENDING)
				ol.wait(native_handle(), error);
		}
#else // TORRENT_WINDOWS

		// rely on default umask to filter x and w permissions
		// for group and others
		int permissions = S_IRUSR | S_IWUSR
			| S_IRGRP | S_IWGRP
			| S_IROTH | S_IWOTH;

		if ((mode & open_mode::attribute_executable))
			permissions |= S_IXGRP | S_IXOTH | S_IXUSR;
#ifdef O_BINARY
		static const int mode_array[] = {O_RDONLY | O_BINARY, O_WRONLY | O_CREAT | O_BINARY, O_RDWR | O_CREAT | O_BINARY};
#else
		static const int mode_array[] = {O_RDONLY, O_WRONLY | O_CREAT, O_RDWR | O_CREAT};
#endif

		int open_mode = 0
#ifdef O_NOATIME
			| ((mode & open_mode::no_atime) ? O_NOATIME : 0)
#endif
#ifdef O_SYNC
			| ((mode & open_mode::no_cache) ? O_SYNC : 0)
#endif
			;

		handle_type handle = ::open(file_path.c_str()
			, mode_array[static_cast<std::uint32_t>(mode & open_mode::rw_mask)] | open_mode
			, permissions);

#ifdef O_NOATIME
		// O_NOATIME is not allowed for files we don't own
		// so, if we get EPERM when we try to open with it
		// try again without O_NOATIME
		if (handle == -1 && (mode & open_mode::no_atime) && errno == EPERM)
		{
			mode &= ~open_mode::no_atime;
			open_mode &= ~O_NOATIME;
			handle = ::open(file_path.c_str()
				, mode_array[static_cast<std::uint32_t>(mode & open_mode::rw_mask)] | open_mode
				, permissions);
		}
#endif
		if (handle == -1)
		{
			ec.assign(errno, system_category());
			TORRENT_ASSERT(ec);
			return false;
		}

		m_file_handle = handle;

#ifdef DIRECTIO_ON
		// for solaris
		if ((mode & open_mode::no_cache))
		{
			int yes = 1;
			directio(native_handle(), DIRECTIO_ON);
		}
#endif

#ifdef F_NOCACHE
		// for BSD/Mac
		if ((mode & open_mode::no_cache))
		{
			int yes = 1;
			::fcntl(native_handle(), F_NOCACHE, &yes);

#ifdef F_NODIRECT
			// it's OK to temporarily cache written pages
			::fcntl(native_handle(), F_NODIRECT, &yes);
#endif
		}
#endif

#ifdef POSIX_FADV_RANDOM
		if ((mode & open_mode::random_access))
		{
			// disable read-ahead
			// NOTE: in android this function was introduced in API 21,
			// but the constant POSIX_FADV_RANDOM is there for lower
			// API levels, just don't add :: to allow a macro workaround
			posix_fadvise(native_handle(), 0, 0, POSIX_FADV_RANDOM);
		}
#endif

#endif
		m_open_mode = mode;

		TORRENT_ASSERT(is_open());
		return true;
	}
示例#9
0
	tracker_response parse_tracker_response(char const* data, int size, error_code& ec
		, bool scrape_request, sha1_hash scrape_ih)
	{
		tracker_response resp;

		lazy_entry e;
		int res = lazy_bdecode(data, data + size, e, ec);

		if (ec) return resp;

		if (res != 0 || e.type() != lazy_entry::dict_t)
		{
			ec.assign(errors::invalid_tracker_response, get_libtorrent_category());
			return resp;
		}

		int interval = int(e.dict_find_int_value("interval", 0));
		// if no interval is specified, default to 30 minutes
		if (interval == 0) interval = 1800;
		int min_interval = int(e.dict_find_int_value("min interval", 30));

		resp.interval = interval;
		resp.min_interval = min_interval;

		lazy_entry const* tracker_id = e.dict_find_string("tracker id");
		if (tracker_id)
			resp.trackerid = tracker_id->string_value();

		// parse the response
		lazy_entry const* failure = e.dict_find_string("failure reason");
		if (failure)
		{
			resp.failure_reason = failure->string_value();
			ec.assign(errors::tracker_failure, get_libtorrent_category());
			return resp;
		}

		lazy_entry const* warning = e.dict_find_string("warning message");
		if (warning)
			resp.warning_message = warning->string_value();

		if (scrape_request)
		{
			lazy_entry const* files = e.dict_find_dict("files");
			if (files == 0)
			{
				ec.assign(errors::invalid_files_entry, get_libtorrent_category());
				return resp;
			}

			lazy_entry const* scrape_data = files->dict_find_dict(
				scrape_ih.to_string());

			if (scrape_data == 0)
			{
				ec.assign(errors::invalid_hash_entry, get_libtorrent_category());
				return resp;
			}

			resp.complete = int(scrape_data->dict_find_int_value("complete", -1));
			resp.incomplete = int(scrape_data->dict_find_int_value("incomplete", -1));
			resp.downloaded = int(scrape_data->dict_find_int_value("downloaded", -1));
			resp.downloaders = int(scrape_data->dict_find_int_value("downloaders", -1));

			return resp;
		}

		// look for optional scrape info
		resp.complete = int(e.dict_find_int_value("complete", -1));
		resp.incomplete = int(e.dict_find_int_value("incomplete", -1));
		resp.downloaded = int(e.dict_find_int_value("downloaded", -1));

		lazy_entry const* peers_ent = e.dict_find("peers");
		if (peers_ent && peers_ent->type() == lazy_entry::string_t)
		{
			char const* peers = peers_ent->string_ptr();
			int len = peers_ent->string_length();
			resp.peers4.reserve(len / 6);
			for (int i = 0; i < len; i += 6)
			{
				if (len - i < 6) break;

				ipv4_peer_entry p;
				error_code ec;
				p.ip = detail::read_v4_address(peers).to_v4().to_bytes();
				p.port = detail::read_uint16(peers);
				resp.peers4.push_back(p);
			}
		}
		else if (peers_ent && peers_ent->type() == lazy_entry::list_t)
		{
			int len = peers_ent->list_size();
			resp.peers.reserve(len);
			error_code parse_error;
			for (int i = 0; i < len; ++i)
			{
				peer_entry p;
				if (!extract_peer_info(*peers_ent->list_at(i), p, parse_error))
					continue;
				resp.peers.push_back(p);
			}

			// only report an error if all peer entries are invalid
			if (resp.peers.empty() && parse_error)
			{
				ec = parse_error;
				return resp;
			}
		}
		else
		{
			peers_ent = 0;
		}

#if TORRENT_USE_IPV6
		lazy_entry const* ipv6_peers = e.dict_find_string("peers6");
		if (ipv6_peers)
		{
			char const* peers = ipv6_peers->string_ptr();
			int len = ipv6_peers->string_length();
			resp.peers6.reserve(len / 18);
			for (int i = 0; i < len; i += 18)
			{
				if (len - i < 18) break;

				ipv6_peer_entry p;
				p.ip = detail::read_v6_address(peers).to_v6().to_bytes();
				p.port = detail::read_uint16(peers);
				resp.peers6.push_back(p);
			}
		}
		else
		{
			ipv6_peers = 0;
		}
#else
		lazy_entry const* ipv6_peers = 0;
#endif
/*
		// if we didn't receive any peers. We don't care if we're stopping anyway
		if (peers_ent == 0 && ipv6_peers == 0
			&& tracker_req().event != tracker_request::stopped)
		{
			ec.assign(errors::invalid_peers_entry, get_libtorrent_category());
			return resp;
		}
*/
		lazy_entry const* ip_ent = e.dict_find_string("external ip");
		if (ip_ent)
		{
			char const* p = ip_ent->string_ptr();
			if (ip_ent->string_length() == int(address_v4::bytes_type().size()))
				resp.external_ip = detail::read_v4_address(p);
#if TORRENT_USE_IPV6
			else if (ip_ent->string_length() == int(address_v6::bytes_type().size()))
				resp.external_ip = detail::read_v6_address(p);
#endif
		}
		
		return resp;
	}
tracker_response parse_tracker_response(char const* data, int size, error_code& ec
                                        , int flags, sha1_hash scrape_ih)
{
    tracker_response resp;

    bdecode_node e;
    int res = bdecode(data, data + size, e, ec);

    if (ec) return resp;

    if (res != 0 || e.type() != bdecode_node::dict_t)
    {
        ec.assign(errors::invalid_tracker_response, get_libtorrent_category());
        return resp;
    }

    int interval = int(e.dict_find_int_value("interval", 0));
    // if no interval is specified, default to 30 minutes
    if (interval == 0) interval = 1800;
    int min_interval = int(e.dict_find_int_value("min interval", 30));

    resp.interval = interval;
    resp.min_interval = min_interval;

    bdecode_node tracker_id = e.dict_find_string("tracker id");
    if (tracker_id)
        resp.trackerid = tracker_id.string_value();

    // parse the response
    bdecode_node failure = e.dict_find_string("failure reason");
    if (failure)
    {
        resp.failure_reason = failure.string_value();
        ec.assign(errors::tracker_failure, get_libtorrent_category());
        return resp;
    }

    bdecode_node warning = e.dict_find_string("warning message");
    if (warning)
        resp.warning_message = warning.string_value();

    if (0 != (flags & tracker_request::scrape_request))
    {
        bdecode_node files = e.dict_find_dict("files");
        if (!files)
        {
            ec.assign(errors::invalid_files_entry, get_libtorrent_category());
            return resp;
        }

        bdecode_node scrape_data = files.dict_find_dict(
                                       scrape_ih.to_string());

        if (!scrape_data)
        {
            ec.assign(errors::invalid_hash_entry, get_libtorrent_category());
            return resp;
        }

        resp.complete = int(scrape_data.dict_find_int_value("complete", -1));
        resp.incomplete = int(scrape_data.dict_find_int_value("incomplete", -1));
        resp.downloaded = int(scrape_data.dict_find_int_value("downloaded", -1));
        resp.downloaders = int(scrape_data.dict_find_int_value("downloaders", -1));

        return resp;
    }

    // look for optional scrape info
    resp.complete = int(e.dict_find_int_value("complete", -1));
    resp.incomplete = int(e.dict_find_int_value("incomplete", -1));
    resp.downloaded = int(e.dict_find_int_value("downloaded", -1));

    bdecode_node peers_ent = e.dict_find("peers");
    if (peers_ent && peers_ent.type() == bdecode_node::string_t)
    {
        char const* peers = peers_ent.string_ptr();
        int len = peers_ent.string_length();
#if TORRENT_USE_I2P
        if (0 != (flags & tracker_request::i2p))
        {
            error_code parse_error;
            for (int i = 0; i < len; i += 32)
            {
                if (len - i < 32) break;
                peer_entry p;
                p.hostname = base32encode(std::string(peers + i, 32), string::i2p);
                p.hostname += ".b32.i2p";
                p.port = 6881;
                resp.peers.push_back(p);
            }
        }
        else
#endif
        {
            resp.peers4.reserve(len / 6);
            for (int i = 0; i < len; i += 6)
            {
                if (len - i < 6) break;

                ipv4_peer_entry p;
                p.ip = detail::read_v4_address(peers).to_v4().to_bytes();
                p.port = detail::read_uint16(peers);
                resp.peers4.push_back(p);
            }
        }
    }
    else if (peers_ent && peers_ent.type() == bdecode_node::list_t)
    {
        int len = peers_ent.list_size();
        resp.peers.reserve(len);
        error_code parse_error;
        for (int i = 0; i < len; ++i)
        {
            peer_entry p;
            if (!extract_peer_info(peers_ent.list_at(i), p, parse_error))
                continue;
            resp.peers.push_back(p);
        }

        // only report an error if all peer entries are invalid
        if (resp.peers.empty() && parse_error)
        {
            ec = parse_error;
            return resp;
        }
    }
    else
    {
        peers_ent.clear();
    }

#if TORRENT_USE_IPV6
    bdecode_node ipv6_peers = e.dict_find_string("peers6");
    if (ipv6_peers)
    {
        char const* peers = ipv6_peers.string_ptr();
        int len = ipv6_peers.string_length();
        resp.peers6.reserve(len / 18);
        for (int i = 0; i < len; i += 18)
        {
            if (len - i < 18) break;

            ipv6_peer_entry p;
            p.ip = detail::read_v6_address(peers).to_v6().to_bytes();
            p.port = detail::read_uint16(peers);
            resp.peers6.push_back(p);
        }
    }
    else
    {
        ipv6_peers.clear();
    }
#else
    bdecode_node ipv6_peers;
#endif
    /*
    		// if we didn't receive any peers. We don't care if we're stopping anyway
    		if (peers_ent == 0 && ipv6_peers == 0
    			&& tracker_req().event != tracker_request::stopped)
    		{
    			ec.assign(errors::invalid_peers_entry, get_libtorrent_category());
    			return resp;
    		}
    */
    bdecode_node ip_ent = e.dict_find_string("external ip");
    if (ip_ent)
    {
        char const* p = ip_ent.string_ptr();
        if (ip_ent.string_length() == int(address_v4::bytes_type().size()))
            resp.external_ip = detail::read_v4_address(p);
#if TORRENT_USE_IPV6
        else if (ip_ent.string_length() == int(address_v6::bytes_type().size()))
            resp.external_ip = detail::read_v6_address(p);
#endif
    }

    return resp;
}
示例#11
0
int load_file(std::string const& filename, std::vector<char>& v, error_code& ec, int limit = 8000000)
{
	ec.clear();
	FILE* f = fopen(filename.c_str(), "rb");

	if (f == NULL)
	{
		ec.assign(errno, boost::system::get_generic_category());
		return -1;
	}

	int r = fseek(f, 0, SEEK_END);

	if (r != 0)
	{
		ec.assign(errno, boost::system::get_generic_category());
		fclose(f);
		return -1;
	}

	long s = ftell(f);

	if (s < 0)
	{
		ec.assign(errno, boost::system::get_generic_category());
		fclose(f);
		return -1;
	}

	if (s > limit)
	{
		fclose(f);
		return -2;
	}

	r = fseek(f, 0, SEEK_SET);

	if (r != 0)
	{
		ec.assign(errno, boost::system::get_generic_category());
		fclose(f);
		return -1;
	}

	v.resize(s);

	if (s == 0)
	{
		fclose(f);
		return 0;
	}

	r = fread(&v[0], 1, v.size(), f);

	if (r < 0)
	{
		ec.assign(errno, boost::system::get_generic_category());
		fclose(f);
		return -1;
	}

	fclose(f);

	if (r != s)
	{
		return -3;
	}

	return 0;
}
示例#12
0
	void disk_buffer_pool::set_settings(aux::session_settings const& sett
		, error_code& ec)
	{
		mutex::scoped_lock l(m_pool_mutex);

		// 0 cache_buffer_chunk_size means 'automatic' (i.e.
		// proportional to the total disk cache size)
		m_cache_buffer_chunk_size = sett.get_int(settings_pack::cache_buffer_chunk_size);
		m_lock_disk_cache = sett.get_bool(settings_pack::lock_disk_cache);
#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
		m_want_pool_allocator = sett.get_bool(settings_pack::use_disk_cache_pool);
		// if there are no allocated blocks, it's OK to switch allocator
		if (m_in_use == 0)
			m_using_pool_allocator = m_want_pool_allocator;
#endif

#if TORRENT_HAVE_MMAP
		// if we've already allocated an mmap, we can't change
		// anything unless there are no allocations in use
		if (m_cache_pool && m_in_use > 0) return;
#endif

		// only allow changing size if we're not using mmapped
		// cache, or if we're just about to turn it off
		if (
#if TORRENT_HAVE_MMAP
			m_cache_pool == 0 ||
#endif
			sett.get_str(settings_pack::mmap_cache).empty())
		{
			int cache_size = sett.get_int(settings_pack::cache_size);
			if (cache_size < 0)
			{
				boost::uint64_t phys_ram = physical_ram();
				if (phys_ram == 0) m_max_use = 1024;
				else m_max_use = phys_ram / 8 / m_block_size;
			}
			else
			{
				m_max_use = cache_size;
			}
			m_low_watermark = m_max_use - (std::max)(16, sett.get_int(settings_pack::max_queued_disk_bytes) / 0x4000);
			if (m_low_watermark < 0) m_low_watermark = 0;
			if (m_in_use >= m_max_use && !m_exceeded_max_size)
			{
				m_exceeded_max_size = true;
				m_trigger_cache_trim();
			}
		}

#if TORRENT_USE_ASSERTS
		m_settings_set = true;
#endif

#if TORRENT_HAVE_MMAP
		// #error support resizing the map
		if (m_cache_pool && sett.get_str(settings_pack::mmap_cache).empty())
		{
			TORRENT_ASSERT(m_in_use == 0);
			munmap(m_cache_pool, boost::uint64_t(m_max_use) * 0x4000);
			m_cache_pool = 0;
			// attempt to make MacOS not flush this to disk, making close()
			// block for a long time
			ftruncate(m_cache_fd, 0);
			close(m_cache_fd);
			m_cache_fd = -1;
			std::vector<int>().swap(m_free_list);
		}
		else if (m_cache_pool == 0 && !sett.get_str(settings_pack::mmap_cache).empty())
		{
			// O_TRUNC here is because we don't actually care about what's
			// in the file now, there's no need to ever read that into RAM
#ifndef O_EXLOCK
#define O_EXLOCK 0
#endif
			m_cache_fd = open(sett.get_str(settings_pack::mmap_cache).c_str(), O_RDWR | O_CREAT | O_EXLOCK | O_TRUNC, 0700);
			if (m_cache_fd < 0)
			{
				ec.assign(errno, boost::system::generic_category());
			}
			else
			{
#ifndef MAP_NOCACHE
#define MAP_NOCACHE 0
#endif
				ftruncate(m_cache_fd, boost::uint64_t(m_max_use) * 0x4000);
				m_cache_pool = static_cast<char*>(mmap(0, boost::uint64_t(m_max_use) * 0x4000, PROT_READ | PROT_WRITE
					, MAP_SHARED | MAP_NOCACHE, m_cache_fd, 0));
				if (intptr_t(m_cache_pool) == -1)
				{
					ec.assign(errno, boost::system::generic_category());

					m_cache_pool = 0;
					// attempt to make MacOS not flush this to disk, making close()
					// block for a long time
					ftruncate(m_cache_fd, 0);
					close(m_cache_fd);
					m_cache_fd = -1;
				}
				else
				{
					TORRENT_ASSERT((size_t(m_cache_pool) & 0xfff) == 0);
					m_free_list.reserve(m_max_use);
					for (int i = 0; i < m_max_use; ++i)
						m_free_list.push_back(i);
				}
			}
		}
#endif
	}
示例#13
0
	// This has to be thread safe, i.e. atomic.
	// that means, on posix this has to be turned into a series of
	// pwrite() calls
	std::int64_t file::writev(std::int64_t file_offset, span<iovec_t const> bufs
		, error_code& ec, open_mode_t flags)
	{
		if (m_file_handle == INVALID_HANDLE_VALUE)
		{
#ifdef TORRENT_WINDOWS
			ec = error_code(ERROR_INVALID_HANDLE, system_category());
#else
			ec = error_code(boost::system::errc::bad_file_descriptor, generic_category());
#endif
			return -1;
		}
		TORRENT_ASSERT((m_open_mode & open_mode::rw_mask) == open_mode::write_only
			|| (m_open_mode & open_mode::rw_mask) == open_mode::read_write);
		TORRENT_ASSERT(!bufs.empty());
		TORRENT_ASSERT(is_open());

		ec.clear();

#if TORRENT_USE_PREADV
		TORRENT_UNUSED(flags);

		std::int64_t ret = iov(&::pwritev, native_handle(), file_offset, bufs, ec);
#else

		// there's no point in coalescing single buffer writes
		if (bufs.size() == 1)
		{
			flags &= ~open_mode::coalesce_buffers;
		}

		iovec_t tmp;
		if (flags & open_mode::coalesce_buffers)
		{
			if (!coalesce_write_buffers(bufs, tmp))
				// ok, that failed, don't coalesce writes
				flags &= ~open_mode::coalesce_buffers;
		}

#if TORRENT_USE_PREAD
		std::int64_t ret = iov(&::pwrite, native_handle(), file_offset, bufs, ec);
#else
		std::int64_t ret = iov(&::write, native_handle(), file_offset, bufs, ec);
#endif

		if (flags & open_mode::coalesce_buffers)
			delete[] tmp.data();

#endif
#if TORRENT_USE_FDATASYNC \
	&& !defined F_NOCACHE && \
	!defined DIRECTIO_ON
		if (m_open_mode & open_mode::no_cache)
		{
			if (::fdatasync(native_handle()) != 0
				&& errno != EINVAL
				&& errno != ENOSYS)
			{
				ec.assign(errno, system_category());
			}
		}
#endif
		return ret;
	}
示例#14
0
 boost::optional<std::pair<const_buffers_type, bool>>
 get(error_code& ec)
 {
     ec.assign(0, ec.category());
     return {{body_.data(), false}};
 }
示例#15
0
bool
deflate(
    DeflateStream& zo,
    boost::asio::mutable_buffer& out,
    buffers_suffix<ConstBufferSequence>& cb,
    bool fin,
    std::size_t& total_in,
    error_code& ec)
{
    using boost::asio::buffer;
    BOOST_ASSERT(out.size() >= 6);
    zlib::z_params zs;
    zs.avail_in = 0;
    zs.next_in = nullptr;
    zs.avail_out = out.size();
    zs.next_out = out.data();
    for(auto in : beast::detail::buffers_range(cb))
    {
        zs.avail_in = in.size();
        if(zs.avail_in == 0)
            continue;
        zs.next_in = in.data();
        zo.write(zs, zlib::Flush::none, ec);
        if(ec)
        {
            if(ec != zlib::error::need_buffers)
                return false;
            BOOST_ASSERT(zs.avail_out == 0);
            BOOST_ASSERT(zs.total_out == out.size());
            ec.assign(0, ec.category());
            break;
        }
        if(zs.avail_out == 0)
        {
            BOOST_ASSERT(zs.total_out == out.size());
            break;
        }
        BOOST_ASSERT(zs.avail_in == 0);
    }
    total_in = zs.total_in;
    cb.consume(zs.total_in);
    if(zs.avail_out > 0 && fin)
    {
        auto const remain = boost::asio::buffer_size(cb);
        if(remain == 0)
        {
            // Inspired by Mark Adler
            // https://github.com/madler/zlib/issues/149
            //
            // VFALCO We could do this flush twice depending
            //        on how much space is in the output.
            zo.write(zs, zlib::Flush::block, ec);
            BOOST_ASSERT(! ec || ec == zlib::error::need_buffers);
            if(ec == zlib::error::need_buffers)
                ec.assign(0, ec.category());
            if(ec)
                return false;
            if(zs.avail_out >= 6)
            {
                zo.write(zs, zlib::Flush::full, ec);
                BOOST_ASSERT(! ec);
                // remove flush marker
                zs.total_out -= 4;
                out = buffer(out.data(), zs.total_out);
                return false;
            }
        }
    }
    ec.assign(0, ec.category());
    out = buffer(out.data(), zs.total_out);
    return true;
}
bool file::set_size(size_type s, error_code& ec)
{
    TORRENT_ASSERT(is_open());
    TORRENT_ASSERT(s >= 0);

#ifdef TORRENT_WINDOWS
    LARGE_INTEGER offs;
    LARGE_INTEGER cur_size;
    if (GetFileSizeEx(m_file_handle, &cur_size) == FALSE)
    {
        ec = error_code(GetLastError(), get_system_category());
        return false;
    }
    offs.QuadPart = s;
    // only set the file size if it's not already at
    // the right size. We don't want to update the
    // modification time if we don't have to
    if (cur_size.QuadPart != s)
    {
        if (SetFilePointerEx(m_file_handle, offs, &offs, FILE_BEGIN) == FALSE)
        {
            ec.assign(GetLastError(), get_system_category());
            return false;
        }
        if (::SetEndOfFile(m_file_handle) == FALSE)
        {
            ec.assign(GetLastError(), get_system_category());
            return false;
        }
    }
#if _WIN32_WINNT >= 0x501
    if ((m_open_mode & sparse) == 0)
    {
        // only allocate the space if the file
        // is not fully allocated
        DWORD high_dword = 0;
        offs.LowPart = GetCompressedFileSize(m_path.c_str(), &high_dword);
        offs.HighPart = high_dword;
        ec.assign(GetLastError(), get_system_category());
        if (ec) return false;
        if (offs.QuadPart != s)
        {
            // if the user has permissions, avoid filling
            // the file with zeroes, but just fill it with
            // garbage instead
            SetFileValidData(m_file_handle, offs.QuadPart);
        }
    }
#endif // _WIN32_WINNT >= 0x501
#else // NON-WINDOWS
    struct stat st;
    if (fstat(m_fd, &st) != 0)
    {
        ec.assign(errno, get_posix_category());
        return false;
    }

    // only truncate the file if it doesn't already
    // have the right size. We don't want to update
    if (st.st_size != s && ftruncate(m_fd, s) < 0)
    {
        ec.assign(errno, get_posix_category());
        return false;
    }

    // if we're not in sparse mode, allocate the storage
    // but only if the number of allocated blocks for the file
    // is less than the file size. Otherwise we would just
    // update the modification time of the file for no good
    // reason.
    if ((m_open_mode & sparse) == 0
            && st.st_blocks < (s + st.st_blksize - 1) / st.st_blksize)
    {
        // How do we know that the file is already allocated?
        // if we always try to allocate the space, we'll update
        // the modification time without actually changing the file
        // but if we don't do anything if the file size is
#ifdef F_PREALLOCATE
        fstore_t f = {F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, s, 0};
        if (fcntl(m_fd, F_PREALLOCATE, &f) < 0)
        {
            ec = error_code(errno, get_posix_category());
            return false;
        }
#endif // F_PREALLOCATE

#if defined TORRENT_LINUX || TORRENT_HAS_FALLOCATE
        int ret;
#endif

#if defined TORRENT_LINUX
        ret = my_fallocate(m_fd, 0, 0, s);
        // if we return 0, everything went fine
        // the fallocate call succeeded
        if (ret == 0) return true;
        // otherwise, something went wrong. If the error
        // is ENOSYS, just keep going and do it the old-fashioned
        // way. If fallocate failed with some other error, it
        // probably means the user should know about it, error out
        // and report it.
        if (errno != ENOSYS && errno != EOPNOTSUPP)
        {
            ec.assign(errno, get_posix_category());
            return false;
        }
#endif // TORRENT_LINUX

#if TORRENT_HAS_FALLOCATE
        // if fallocate failed, we have to use posix_fallocate
        // which can be painfully slow
        // if you get a compile error here, you might want to
        // define TORRENT_HAS_FALLOCATE to 0.
        ret = posix_fallocate(m_fd, 0, s);
        // posix_allocate fails with EINVAL in case the underlying
        // filesystem does bot support this operation
        if (ret != 0 && ret != EINVAL)
        {
            ec = error_code(ret, get_posix_category());
            return false;
        }
#endif // TORRENT_HAS_FALLOCATE
    }
#endif // TORRENT_WINDOWS
    return true;
}
示例#17
0
	bool file::set_size(std::int64_t s, error_code& ec)
	{
		TORRENT_ASSERT(is_open());
		TORRENT_ASSERT(s >= 0);

#ifdef TORRENT_WINDOWS

		LARGE_INTEGER offs;
		LARGE_INTEGER cur_size;
		if (GetFileSizeEx(native_handle(), &cur_size) == FALSE)
		{
			ec.assign(GetLastError(), system_category());
			return false;
		}
		offs.QuadPart = s;
		// only set the file size if it's not already at
		// the right size. We don't want to update the
		// modification time if we don't have to
		if (cur_size.QuadPart != s)
		{
			if (SetFilePointerEx(native_handle(), offs, &offs, FILE_BEGIN) == FALSE)
			{
				ec.assign(GetLastError(), system_category());
				return false;
			}
			if (::SetEndOfFile(native_handle()) == FALSE)
			{
				ec.assign(GetLastError(), system_category());
				return false;
			}
			if (!(m_open_mode & open_mode::sparse))
			{
				// if the user has permissions, avoid filling
				// the file with zeroes, but just fill it with
				// garbage instead
				set_file_valid_data(m_file_handle, s);
			}
		}
#else // NON-WINDOWS
		struct stat st;
		if (::fstat(native_handle(), &st) != 0)
		{
			ec.assign(errno, system_category());
			return false;
		}

		// only truncate the file if it doesn't already
		// have the right size. We don't want to update
		if (st.st_size != s && ::ftruncate(native_handle(), s) < 0)
		{
			ec.assign(errno, system_category());
			return false;
		}

		// if we're not in sparse mode, allocate the storage
		// but only if the number of allocated blocks for the file
		// is less than the file size. Otherwise we would just
		// update the modification time of the file for no good
		// reason.
		if (!(m_open_mode & open_mode::sparse)
			&& std::int64_t(st.st_blocks) < (s + st.st_blksize - 1) / st.st_blksize)
		{
			// How do we know that the file is already allocated?
			// if we always try to allocate the space, we'll update
			// the modification time without actually changing the file
			// but if we don't do anything if the file size is
#ifdef F_PREALLOCATE
			fstore_t f = {F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, s, 0};
			if (fcntl(native_handle(), F_PREALLOCATE, &f) < 0)
			{
				// It appears Apple's new filesystem (APFS) does not
				// support this control message and fails with EINVAL
				// if so, just skip it
				if (errno != EINVAL)
				{
					if (errno != ENOSPC)
					{
						ec.assign(errno, system_category());
						return false;
					}
					// ok, let's try to allocate non contiguous space then
					f.fst_flags = F_ALLOCATEALL;
					if (fcntl(native_handle(), F_PREALLOCATE, &f) < 0)
					{
						ec.assign(errno, system_category());
						return false;
					}
				}
			}
#endif // F_PREALLOCATE

#ifdef F_ALLOCSP64
			flock64 fl64;
			fl64.l_whence = SEEK_SET;
			fl64.l_start = 0;
			fl64.l_len = s;
			if (fcntl(native_handle(), F_ALLOCSP64, &fl64) < 0)
			{
				ec.assign(errno, system_category());
				return false;
			}

#endif // F_ALLOCSP64

#if TORRENT_HAS_FALLOCATE
			// if fallocate failed, we have to use posix_fallocate
			// which can be painfully slow
			// if you get a compile error here, you might want to
			// define TORRENT_HAS_FALLOCATE to 0.
			int const ret = posix_fallocate(native_handle(), 0, s);
			// posix_allocate fails with EINVAL in case the underlying
			// filesystem does not support this operation
			if (ret != 0 && ret != EINVAL)
			{
				ec.assign(ret, system_category());
				return false;
			}
#endif // TORRENT_HAS_FALLOCATE
		}
#endif // TORRENT_WINDOWS
		return true;
	}