Example #1
0
	peer_request file_storage::map_file(int file_index, size_type file_offset
		, int size) const
	{
		TORRENT_ASSERT(file_index < num_files());
		TORRENT_ASSERT(file_index >= 0);

		peer_request ret;
		if (file_index < 0 || file_index >= num_files())
		{
			ret.piece = m_num_pieces;
			ret.start = 0;
			ret.length = 0;
			return ret;
		}

		size_type offset = file_offset + this->file_offset(file_index);

		if (offset >= total_size())
		{
			ret.piece = m_num_pieces;
			ret.start = 0;
			ret.length = 0;
		}
		else
		{
			ret.piece = int(offset / piece_length());
			ret.start = int(offset % piece_length());
			ret.length = size;
			if (offset + size > total_size())
				ret.length = total_size() - offset;
		}
		return ret;
	}
void gt::Platform::openTorrent(shared_ptr<gt::Torrent> t)
{
    auto files = t->getInfo()->files();
    string path = t->getSavePath() + '/' + t->getInfo()->file_at(0).path;

    if (files.num_files() > 1) // if there's more than a file, we open the containing folder
        path = path.substr(0, path.find_last_of('/'));

    // HURR system() IS BAD BECAUSE IT'S NOT USED TO MAKE PORTABLE CODE, pls refer to the filename, if you're expecting anything portable here you've come to the wrong place.
    system(string(string("xdg-open \'") + path + "\'").c_str()); // Either use system or fork and exec with the xdg binary it's literraly the same shit, or even worst, link with even more libs, pick your poison
}
Example #3
0
	peer_request file_storage::map_file(int file_index, size_type file_offset
		, int size) const
	{
		TORRENT_ASSERT(file_index < num_files());
		TORRENT_ASSERT(file_index >= 0);
		size_type offset = file_offset + at(file_index).offset;

		peer_request ret;
		ret.piece = int(offset / piece_length());
		ret.start = int(offset % piece_length());
		ret.length = size;
		return ret;
	}
Example #4
0
	std::vector<file_slice> file_storage::map_block(int piece, size_type offset
		, int size) const
	{
		TORRENT_ASSERT(num_files() > 0);
		std::vector<file_slice> ret;

		if (m_files.empty()) return ret;

		// find the file iterator and file offset
		internal_file_entry target;
		target.offset = piece * (size_type)m_piece_length + offset;
		TORRENT_ASSERT(target.offset + size <= m_total_size);
		TORRENT_ASSERT(!compare_file_offset(target, m_files.front()));

		std::vector<internal_file_entry>::const_iterator file_iter = std::upper_bound(
			begin(), end(), target, compare_file_offset);

		TORRENT_ASSERT(file_iter != begin());
		--file_iter;

		size_type file_offset = target.offset - file_iter->offset;
		for (; size > 0; file_offset -= file_iter->size, ++file_iter)
		{
			TORRENT_ASSERT(file_iter != end());
			if (file_offset < file_iter->size)
			{
				file_slice f;
				f.file_index = file_iter - begin();
				f.offset = file_offset + file_base(*file_iter);
				f.size = (std::min)(file_iter->size - file_offset, (size_type)size);
				TORRENT_ASSERT(f.size <= size);
				size -= int(f.size);
				file_offset += f.size;
				ret.push_back(f);
			}
			
			TORRENT_ASSERT(size >= 0);
		}
		return ret;
	}
Example #5
0
File: bloom.c Project: hagemt/bloom
int
main(int argc, char *argv[])
{
	size_t path_len, total_files;
	off_t bytes_wasted, total_wasted;
	char path_buffer[PATH_MAX_LEN], *hash_value;
	struct file_entry_t *file_entry, *trie_entry;

	SListIterator slist_iterator;
	SetIterator set_iterator;

	/* Step 0: Session data */
	struct file_info_t file_info;
	clear_info(&file_info);

	/* Step 1: Parse arguments */
	while (--argc) {
		/* Being unable to record implies insufficient resources */
		if (!record(argv[argc], &file_info)){
			fprintf(stderr, "[FATAL] out of memory\n");
			destroy_info(&file_info);
			return (EXIT_FAILURE);
		}
	}

	/* Step 2: Fully explore any directories specified */
	#ifndef NDEBUG
	printf("[DEBUG] Creating file list...\n");
	#endif
	while (slist_length(file_info.file_stack) > 0) {
		/* Pick off the top of the file stack */
		file_entry = (struct file_entry_t *)(slist_data(file_info.file_stack));
		slist_remove_entry(&file_info.file_stack, file_info.file_stack);
		assert(file_entry->type == DIRECTORY);
		/* Copy the basename to a buffer */
		memset(path_buffer, '\0', PATH_MAX_LEN);
		path_len = strnlen(file_entry->path, PATH_MAX_LEN);
		memcpy(path_buffer, file_entry->path, path_len);
		/* Ignore cases that would cause overflow */
		if (path_len < PATH_MAX_LEN) {
			/* Append a trailing slash */
			path_buffer[path_len] = '/';
			/* Record all contents (may push onto file stack or one of the lists) */
			DIR *directory = opendir(file_entry->path);
			if (traverse(&file_info, directory, path_buffer, ++path_len)) {
				fprintf(stderr, "[FATAL] out of memory\n");
				destroy_info(&file_info);
				return (EXIT_FAILURE);
			} else if (closedir(directory)) {
				fprintf(stderr, "[WARNING] '%s' (close failed)\n", file_entry->path);
			}
		}
		/* Discard this entry */
		destroy_entry(file_entry);
	}

	/* Step 3: Warn about any ignored files */
	if (slist_length(file_info.bad_files) > 0) {
		slist_iterate(&file_info.bad_files, &slist_iterator);
		while (slist_iter_has_more(&slist_iterator)) {
			file_entry = slist_iter_next(&slist_iterator);
			fprintf(stderr, "[WARNING] '%s' ", file_entry->path);
			switch (file_entry->type) {
			case INVALID:
				++file_info.invalid_files;
				fprintf(stderr, "(invalid file)\n");
				break;
			case INACCESSIBLE:
				++file_info.protected_files;
				fprintf(stderr, "(protected file)\n");
				break;
			default:
				++file_info.irregular_files;
				fprintf(stderr, "(irregular file)\n");
				break;
			}
		}
		fprintf(stderr, "[WARNING] %lu file(s) ignored\n",
			(long unsigned)(num_errors(&file_info)));
	}
	#ifndef NDEBUG
	if (num_errors(&file_info) > 0) {
		fprintf(stderr, "[FATAL] cannot parse entire file tree\n");
		destroy_info(&file_info);
		return (EXIT_FAILURE);
	}
	printf("[DEBUG] Found %lu / %lu valid files\n",
		(unsigned long)(num_files(&file_info)),
		(unsigned long)(file_info.total_files));
	#endif

	/* Step 4: Begin the filtering process */
	#ifndef NDEBUG
	printf("[DEBUG] Creating file table...\n");
	#endif
	if (slist_length(file_info.good_files) > 0) {
		file_info.hash_trie = trie_new();
		file_info.shash_trie = trie_new();
		optimize_filter(&file_info);
		/* Extract each file from the list (they should all be regular) */
		slist_iterate(&file_info.good_files, &slist_iterator);
		while (slist_iter_has_more(&slist_iterator)) {
			file_entry = slist_iter_next(&slist_iterator);
			assert(file_entry->type == REGULAR);
			/* Perform a "shallow" hash of the file */
			hash_value = hash_entry(file_entry, SHALLOW);
			#ifndef NDEBUG
			printf("[SHASH] %s\t*%s\n", file_entry->path, hash_value);
			#endif
			/* Check to see if we might have seen this file before */
			if (bloom_filter_query(file_info.shash_filter, hash_value)) {
				/* Get the full hash of the new file */
				hash_value = hash_entry(file_entry, FULL);
				#ifndef NDEBUG
				printf("[+HASH] %s\t*%s\n", file_entry->path, hash_value);
				#endif
				archive(&file_info, file_entry);
				/* Check to see if bloom failed us */
				trie_entry = trie_lookup(file_info.shash_trie, file_entry->shash);
				if (trie_entry == TRIE_NULL) {
					#ifndef NDEBUG
					printf("[DEBUG] '%s' (false positive)\n", file_entry->path);
					#endif
					trie_insert(file_info.shash_trie, file_entry->shash, file_entry);
				} else {
					/* Get the full hash of the old file */
					hash_value = hash_entry(trie_entry, FULL);
					#ifndef NDEBUG
					if (hash_value) {
						printf("[-HASH] %s\t*%s\n", trie_entry->path, hash_value);
					}
					#endif
					archive(&file_info, trie_entry);
				}
			} else {
				/* Add a record of this shash to the filter */
				bloom_filter_insert(file_info.shash_filter, hash_value);
				trie_insert(file_info.shash_trie, hash_value, file_entry);
			}
		}
		persist("bloom_store", &file_info);
	}

	/* Step 5: Output results and cleanup before exit */
	printf("[EXTRA] Found %lu sets of duplicates...\n",
		(unsigned long)(slist_length(file_info.duplicates)));
	slist_iterate(&file_info.duplicates, &slist_iterator);
	for (total_files = total_wasted = bytes_wasted = 0;
		slist_iter_has_more(&slist_iterator);
		total_wasted += bytes_wasted)
	{
		Set *set = slist_iter_next(&slist_iterator);
		int size = set_num_entries(set);
		if (size < 2) { continue; }
		printf("[EXTRA] %lu files (w/ same hash):\n", (unsigned long)(size));
		set_iterate(set, &set_iterator);
		for (bytes_wasted = 0;
			set_iter_has_more(&set_iterator);
			bytes_wasted += file_entry->size,
			++total_files)
		{
			file_entry = set_iter_next(&set_iterator);
			printf("\t%s (%lu bytes)\n",
				file_entry->path,
				(unsigned long)(file_entry->size));
		}
	}
	printf("[EXTRA] %lu bytes in %lu files (wasted)\n",
		(unsigned long)(total_wasted),
		(unsigned long)(total_files));
	destroy_info(&file_info);
	return (EXIT_SUCCESS);
}
Example #6
0
int main(int argc, char** argv) {

	if (argc < 8) {

		std::cout << "usage: watershed <aff_x_dir> <aff_y_dir> <aff_z_dir> <t_l> <t_h> <t_s> <ms>" << std::endl;
		return 1;
	}

	std::string aff_x_dir = argv[1];
	std::string aff_y_dir = argv[2];
	std::string aff_z_dir = argv[3];
	float t_l = boost::lexical_cast<float>(argv[4]);
	float t_h = boost::lexical_cast<float>(argv[5]);
	float t_s = boost::lexical_cast<float>(argv[6]);
	int   ms  = boost::lexical_cast<float>(argv[7]);

	std::cout
			<< "Performing affinity graph watershed on volumes "
			<< aff_x_dir << ", " << aff_y_dir << ", " << aff_z_dir
			<< std::endl;

	boost::filesystem::path aff_x_path(aff_x_dir);
	boost::filesystem::path aff_y_path(aff_y_dir);
	boost::filesystem::path aff_z_path(aff_z_dir);

	int size_z = num_files(aff_x_path);
	if (size_z != num_files(aff_y_dir) || size_z != num_files(aff_z_dir)) {

		std::cerr << "directories contain different number of files" << std::endl;
		return 1;
	}

	if (size_z == 0) {

		std::cerr << "directories contain no files" << std::endl;
		return 1;
	}

	std::vector<boost::filesystem::path> aff_x_files = files(aff_x_path);
	std::vector<boost::filesystem::path> aff_y_files = files(aff_y_path);
	std::vector<boost::filesystem::path> aff_z_files = files(aff_z_path);
	aff_x_files.resize(1); // one section only
	aff_y_files.resize(1); // one section only
	aff_z_files.resize(1); // one section only
	size_z = 1;

	std::sort(aff_x_files.begin(), aff_x_files.end());
	std::sort(aff_y_files.begin(), aff_y_files.end());
	std::sort(aff_z_files.begin(), aff_z_files.end());

	vigra::ImageImportInfo info(aff_x_files[0].native().c_str());
	int size_x = info.width();
	int size_y = info.height();

	std::cout << "reading affinity graph of size " << size_x << "x" << size_y << "x" << size_z << std::endl;

	affinity_graph_ptr<float> aff(
			new affinity_graph<float>(
					boost::extents[size_x][size_y][size_z][3],
					boost::fortran_storage_order()));

	for (int z = 0; z < size_z; z++) {

		auto slice = (*aff)[ boost::indices[range()][range()][z][range()] ];
		read_slice(slice, aff_x_files[z], aff_y_files[z], aff_z_files[z]);
	}

	std::cout << "performing simple_watershed" << std::endl;

	std::vector<std::size_t> counts;
	auto result = simple_watershed<uint32_t>(aff, t_l, t_h, counts);

	auto segmentation = result.first;
	int num_segments  = result.second;

	std::cout << "found " << num_segments << " segments" << std::endl;

	auto rg = get_region_graph<uint32_t,float>(aff, segmentation, num_segments);

	std::cout << "performing region merging" << std::endl;

	// I guess the last parameter is to discard regions smaller than that
	//merge_segments_with_function(result.first, rg, counts, 
	//dynamic_size_threshold(t_s, ms), 0);

	for (int z = 0; z < size_z; z++) {

		auto slice = (*segmentation)[ boost::indices[range()][range()][z] ];
		std::stringstream filename;
		filename << "watershed_" << std::setw(5) << std::setfill('0') << z << "_" << t_l << "_" << t_h << "_" << t_s << "_" << ms << ".tif";
		write_slice(slice, filename.str());
	}
}