コード例 #1
0
/**
 * Writes the browse host data of the context ``ctx'' to the buffer
 * ``dest''. This must be called multiple times to retrieve the complete
 * data until zero is returned i.e., the end of file is reached.
 *
 * This routine deals with query hit data generation.
 *
 * @param ctx an initialized browse host context.
 * @param dest the destination buffer.
 * @param size the amount of bytes ``dest'' can hold.
 *
 * @return -1 on failure, zero at the end-of-file condition or if size
 *         was zero. On success, the amount of bytes copied to ``dest''
 *         is returned.
 */
static ssize_t
browse_host_read_qhits(struct special_upload *ctx,
	void *const dest, size_t size)
{
	struct browse_host_upload *bh = cast_to_browse_host_upload(ctx);
	size_t remain = size;
	char *p = dest;

	/*
	 * If we have no hit pending that we can send, build some more.
	 */

	if (NULL == bh->hits) {
		pslist_t *files = NULL, *sl;
		int i;

		for (i = 0; i < BH_SCAN_AHEAD; i++) {
			shared_file_t *sf;

			do {
				/* Skip holes in indices */
				bh->file_index++;
				sf = shared_file_sorted(bh->file_index);
			} while (NULL == sf && bh->file_index <= shared_files_scanned());

			if (SHARE_REBUILDING == sf || NULL == sf)
				break;

			files = pslist_prepend(files, sf);
		}

		if (NULL == files)		/* Did not find any more file to include */
			return 0;			/* We're done */

		/*
		 * Now build the query hits containing the files we selected.
		 */

		files = pslist_reverse(files);			/* Preserve order */

		if (bh->flags & BH_F_G2) {
			g2_build_qh2_results(files, i, BH_MAX_QH2_SIZE,
				browse_host_record_qh2, bh, &blank_guid, 0);
		} else {
			qhit_build_results(files, i, BH_MAX_QHIT_SIZE,
				browse_host_record_hit, bh, &blank_guid, 0, &zero_array);
		}

		g_assert(bh->hits != NULL);		/* At least 1 hit enqueued */

		bh->hits = pslist_reverse(bh->hits);	/* Preserve order */
		PSLIST_FOREACH(files, sl) {
			shared_file_t *sf = sl->data;
			shared_file_unref(&sf);
		}
コード例 #2
0
ファイル: bh_upload.c プロジェクト: MrJoe/gtk-gnutella
/**
 * Writes the browse host data of the context ``ctx'' to the buffer
 * ``dest''. This must be called multiple times to retrieve the complete
 * data until zero is returned i.e., the end of file is reached.
 *
 * This routine deals with query hit data generation.
 *
 * @param ctx an initialized browse host context.
 * @param dest the destination buffer.
 * @param size the amount of bytes ``dest'' can hold.
 *
 * @return -1 on failure, zero at the end-of-file condition or if size
 *         was zero. On success, the amount of bytes copied to ``dest''
 *         is returned.
 */
static ssize_t
browse_host_read_qhits(struct special_upload *ctx,
	void *const dest, size_t size)
{
	struct browse_host_upload *bh = cast_to_browse_host_upload(ctx);
	size_t remain = size;
	char *p = dest;

	/*
	 * If we have no hit pending that we can send, build some more.
	 */

	if (NULL == bh->hits) {
		GSList *files = NULL;
		int i;

		for (i = 0; i < BH_SCAN_AHEAD; i++) {
			const shared_file_t *sf;

			do {
				/* Skip holes in indices */
				bh->file_index++;
				sf = shared_file_sorted(bh->file_index);
			} while (NULL == sf && bh->file_index <= shared_files_scanned());

			if (SHARE_REBUILDING == sf || NULL == sf)
				break;
			
			files = g_slist_prepend(files, deconstify_pointer(sf));
		}

		if (NULL == files)		/* Did not find any more file to include */
			return 0;			/* We're done */

		/*
		 * Now build the query hits containing the files we selected.
		 */

		files = g_slist_reverse(files);			/* Preserve order */

		qhit_build_results(files, i, BH_MAX_QHIT_SIZE,
			browse_host_record_hit, bh, &blank_guid, FALSE, &zero_array);

		g_assert(bh->hits != NULL);		/* At least 1 hit enqueued */

		bh->hits = g_slist_reverse(bh->hits);	/* Preserve order */
 		gm_slist_free_null(&files);
	}

	/*
	 * Read each query hit in turn.
	 */

	while (remain > 0 && NULL != bh->hits) {
		pmsg_t *mb = bh->hits->data;
		int r;

		r = pmsg_read(mb, p, remain);
		p += r;
		remain -= r;

		if (r == 0 || 0 == pmsg_size(mb)) {
			bh->hits = g_slist_remove(bh->hits, mb);
			pmsg_free(mb);
		}
	}

	return size - remain;
}
コード例 #3
0
ファイル: bh_upload.c プロジェクト: MrJoe/gtk-gnutella
/**
 * Writes the browse host data of the context ``ctx'' to the buffer
 * ``dest''. This must be called multiple times to retrieve the complete
 * data until zero is returned i.e., the end of file is reached.
 *
 * This routine deals with HTML data generation.
 *
 * @param ctx an initialized browse host context.
 * @param dest the destination buffer.
 * @param size the amount of bytes ``dest'' can hold.
 *
 * @return -1 on failure, zero at the end-of-file condition or if size
 *         was zero. On success, the amount of bytes copied to ``dest''
 *         is returned.
 */
static ssize_t
browse_host_read_html(struct special_upload *ctx,
	void *const dest, size_t size)
{
	static const char header[] =
		"<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01//EN\">\r\n"
		"<html>\r\n"
		"<head>\r\n"
		"<title>Browse Host</title>\r\n"
		"</head>\r\n"
		"<body>\r\n";
	static const char trailer[] = "</ul>\r\n</body>\r\n</html>\r\n";
	struct browse_host_upload *bh = cast_to_browse_host_upload(ctx);
	char *p = dest;

	g_assert(NULL != bh);
	g_assert(NULL != dest);
	g_assert(size <= INT_MAX);

	g_assert(UNSIGNED(bh->state) < NUM_BH_STATES);
	g_assert(bh->b_size <= INT_MAX);
	g_assert(bh->b_offset <= bh->b_size);

	do {
		switch (bh->state) {
		case BH_STATE_HEADER:
			if (!bh->b_data) {
				bh->b_data = header;
				bh->b_size = CONST_STRLEN(header);
			}
			p += browse_host_read_data(bh, p, &size);
			if (bh->b_size == bh->b_offset)
				browse_host_next_state(bh, BH_STATE_LIBRARY_INFO);
			break;

		case BH_STATE_LIBRARY_INFO:
			if (!bh->b_data) {
				bh->w_buf_size = w_concat_strings(&bh->w_buf,
					"<h1>", product_get_name(), "</h1>\r\n"
					"<h3>", version_get_string(),
				   	" sharing ",
					uint64_to_string(shared_files_scanned()),
					" file",
					shared_files_scanned() == 1 ? "" : "s",
					" ",
					short_kb_size(shared_kbytes_scanned(),
						GNET_PROPERTY(display_metric_units)),
					" total</h3>\r\n"
					"<ul>\r\n", (void *) 0);
				bh->b_data = bh->w_buf;
				bh->b_size = bh->w_buf_size - 1; /* minus trailing NUL */
				bh->b_offset = 0;
			}
			p += browse_host_read_data(bh, p, &size);
			if (bh->b_size == bh->b_offset)
				browse_host_next_state(bh, BH_STATE_FILES);
			break;

		case BH_STATE_TRAILER:
			if (!bh->b_data) {
				bh->b_data = trailer;
				bh->b_size = CONST_STRLEN(trailer);
			}
			p += browse_host_read_data(bh, p, &size);
			if (bh->b_size == bh->b_offset)
				browse_host_next_state(bh, BH_STATE_EOF);
			break;

		case BH_STATE_FILES:
			if (bh->b_data && bh->b_size == bh->b_offset) {
				g_assert(bh->w_buf == bh->b_data);
				wfree(bh->w_buf, bh->w_buf_size);
				bh->w_buf = NULL;
				bh->w_buf_size = 0;
				bh->b_data = NULL;
			}

			if (!bh->b_data) {
				const shared_file_t *sf;

				bh->file_index++;
				sf = shared_file_sorted(bh->file_index);
				if (!sf) {
				   	if (bh->file_index > shared_files_scanned())
						browse_host_next_state(bh, BH_STATE_TRAILER);
					/* Skip holes in the file_index table */
				} else if (SHARE_REBUILDING == sf) {
					browse_host_next_state(bh, BH_STATE_REBUILDING);
				} else {
					const char * const name_nfc = shared_file_name_nfc(sf);
					const filesize_t file_size = shared_file_size(sf);
					size_t html_size;
					char *html_name;

					{
						const char *dir;
						char *name;
						
						dir = shared_file_relative_path(sf);
						if (dir) {
							name = h_strconcat(dir, "/", name_nfc, (void *) 0);
						} else {
							name = deconstify_char(name_nfc);
						}

						html_size = 1 + html_escape(name, NULL, 0);
						html_name = walloc(html_size);
						html_escape(name, html_name, html_size);
						if (name != name_nfc) {
							HFREE_NULL(name);
						}
					}

					if (sha1_hash_available(sf)) {
						const struct sha1 *sha1 = shared_file_sha1(sf);

						bh->w_buf_size = w_concat_strings(&bh->w_buf,
							"<li><a href=\"/uri-res/N2R?urn:sha1:",
							sha1_base32(sha1),
							"\">", html_name, "</a>&nbsp;[",
							short_html_size(file_size,
								GNET_PROPERTY(display_metric_units)),
							"]</li>\r\n",
							(void *) 0);
					} else {
						char *escaped;

						escaped = url_escape(name_nfc);
						bh->w_buf_size = w_concat_strings(&bh->w_buf,
							"<li><a href=\"/get/",
							uint32_to_string(shared_file_index(sf)),
							"/", escaped, "\">", html_name, "</a>"
							"&nbsp;[",
							short_html_size(file_size,
								GNET_PROPERTY(display_metric_units)),
							"]</li>\r\n", (void *) 0);

						if (escaped != name_nfc) {
							HFREE_NULL(escaped);
						}
					}

					wfree(html_name, html_size);
					bh->b_data = bh->w_buf;
					bh->b_size = bh->w_buf_size - 1; /* minus trailing NUL */
					bh->b_offset = 0;
				}
			}

			if (bh->b_data)
				p += browse_host_read_data(bh, p, &size);

			break;

		case BH_STATE_REBUILDING:
			if (!bh->b_data) {
				static const char msg[] =
					"<li>"
						"<b>"
							"The library is currently being rebuild. Please, "
							"try again in a moment."
						"</b>"
					"</li>";

				bh->b_data = msg;
				bh->b_size = CONST_STRLEN(msg);
			}
			p += browse_host_read_data(bh, p, &size);
			if (bh->b_size == bh->b_offset)
				browse_host_next_state(bh, BH_STATE_TRAILER);
			break;

		case BH_STATE_EOF:
			return p - cast_to_char_ptr(dest);

		case NUM_BH_STATES:
			g_assert_not_reached();
		}
	} while (size > 0);

	return p - cast_to_char_ptr(dest);
}