Пример #1
0
/**
 * Discard `n_bytes' from the pmsg_t buffer slist and free all completely
 * discarded buffers.
 */
void
pmsg_slist_discard(slist_t *slist, size_t n_bytes)
{
	slist_iter_t *iter;

	g_assert(slist);

	iter = slist_iter_removable_on_head(slist);
	while (n_bytes > 0) {
		pmsg_t *mb;
		size_t size;

		g_assert(slist_iter_has_item(iter));
		mb = slist_iter_current(iter);
		pmsg_check_consistency(mb);

		size = pmsg_size(mb);
		if (size > n_bytes) {
			pmsg_discard(mb, n_bytes);
			break;
		} else {
			pmsg_free(mb);
			n_bytes -= size;
			slist_iter_remove(iter);
		}
	}
	slist_iter_free(&iter);
}
Пример #2
0
/**
 * Convenience routine: format tree to memory buffer.
 *
 * @param root		tree to dump
 * @param buf		buffer where formatting is done
 * @param len		buffer length
 * @param options	formatting options
 *
 * @return length of generated string, -1 on failure.
 */
size_t
xfmt_tree_to_buffer(const xnode_t *root, void *buf, size_t len, uint32 options)
{
    ostream_t *os;
    pdata_t *pd;
    pmsg_t *mb;
    bool ok;
    size_t written = (size_t) -1;

    g_assert(root != NULL);
    g_assert(buf != NULL);
    g_assert(size_is_non_negative(len));

    pd = pdata_allocb_ext(buf, len, pdata_free_nop, NULL);
    mb = pmsg_alloc(PMSG_P_DATA, pd, 0, 0);
    os = ostream_open_pmsg(mb);

    ok = xfmt_tree(root, os, options);
    ok = ostream_close(os) && ok;

    if (ok)
        written = pmsg_size(mb);

    pmsg_free(mb);

    g_assert((size_t) -1 == written || written <= len);

    return written;
}
Пример #3
0
/**
 * Attach meta information to supplied message block, returning a possibly
 * new message block to use.
 */
static pmsg_t *
mq_udp_attach_metadata(pmsg_t *mb, const gnet_host_t *to)
{
	pmsg_t *result;

	if (pmsg_is_extended(mb)) {
		struct mq_udp_info_extended *mi;

		WALLOC(mi);
		gnet_host_copy(&mi->to, to);
		result = mb;

		/*
		 * Replace original free routine with the new one, saving the original
		 * metadata and its free routine in the new metadata for later
		 * transparent dispatching at free time.
		 */

		mi->orig_free = pmsg_replace_ext(mb,
			mq_udp_pmsg_free_extended, mi, &mi->orig_arg);

	} else {
		struct mq_udp_info *mi;

		WALLOC(mi);
		gnet_host_copy(&mi->to, to);
		result = pmsg_clone_extend(mb, mq_udp_pmsg_free, mi);
		pmsg_free(mb);
	}

	g_assert(pmsg_is_extended(result));

	return result;
}
Пример #4
0
/**
 * Read data from the pmsg list into supplied buffer.  Copied data is
 * removed from the list.
 *
 * @param slist		the pmsg list
 * @param buf		start of buffer where data must be copied
 * @param len		length of buffer
 *
 * @return amount of copied bytes.
 */
size_t
pmsg_slist_read(slist_t *slist, void *buf, size_t len)
{
	slist_iter_t *iter;
	size_t remain = len;
	void *p;

	g_assert(slist != NULL);

	iter = slist_iter_removable_on_head(slist);
	p = buf;

	while (remain != 0 && slist_iter_has_item(iter)) {
		pmsg_t *mb = slist_iter_current(iter);
		int n;

		n = pmsg_read(mb, p, remain);
		remain -= n;
		p = ptr_add_offset(p, n);
		if (0 == pmsg_size(mb)) {			/* Fully copied message */
			pmsg_free(mb);
			slist_iter_remove(iter);		/* Warning: moves to next */
		} else {
			break;		/* No need to continue on partial copy */
		}
	}
	slist_iter_free(&iter);

	return len - remain;
}
Пример #5
0
/**
 * Send a message to target node.
 *
 * @param n		the G2 node to which message should be sent
 * @param mb	the message to sent (ownership taken, will be freed later)
 */
void
g2_node_send(const gnutella_node_t *n, pmsg_t *mb)
{
	node_check(n);
	g_assert(NODE_TALKS_G2(n));

	if (NODE_IS_UDP(n))
		mq_udp_node_putq(n->outq, mb, n);
	else if (NODE_IS_WRITABLE(n))
		mq_tcp_putq(n->outq, mb, NULL);
	else
		goto drop;

	if (GNET_PROPERTY(log_sending_g2)) {
		g_debug("%s(): sending %s to %s",
			G_STRFUNC, g2_msg_infostr_mb(mb), node_infostr(n));
	}

	return;

drop:
	if (GNET_PROPERTY(log_sending_g2)) {
		g_debug("%s(): aborting sending %s to %s",
			G_STRFUNC, g2_msg_infostr_mb(mb), node_infostr(n));
	}

	pmsg_free(mb);		/* Cannot send it, free it now */
}
Пример #6
0
/**
 * Closes the browse host context and releases its memory.
 *
 * @return An initialized browse host context.
 */
static void
browse_host_close(struct special_upload *ctx, bool fully_served)
{
	struct browse_host_upload *bh = cast_to_browse_host_upload(ctx);
	GSList *sl;

	g_assert(bh);

	for (sl = bh->hits; sl; sl = g_slist_next(sl)) {
		pmsg_t *mb = sl->data;
		pmsg_free(mb);
	}
	gm_slist_free_null(&bh->hits);

	if (bh->w_buf) {
		wfree(bh->w_buf, bh->w_buf_size);
		bh->w_buf = NULL;
	}
	tx_free(bh->tx);

	/*
	 * Update statistics if fully served.
	 */

	if (fully_served) {
		if (bh->flags & BH_F_HTML) {
			gnet_prop_incr_guint32(PROP_HTML_BROWSE_SERVED);
		} else if (bh->flags & BH_F_QHITS) {
			gnet_prop_incr_guint32(PROP_QHITS_BROWSE_SERVED);
		}
	}

	wfree(bh, sizeof *bh);
}
Пример #7
0
/**
 * RX data indication callback used to give us some new Gnet traffic in a
 * low-level message structure (which can contain several Gnet messages).
 *
 * @return FALSE if an error occurred.
 */
static bool
thex_download_data_ind(rxdrv_t *rx, pmsg_t *mb)
{
	struct thex_download *ctx = rx_owner(rx);
	struct download *d;
	bool error;

	d = ctx->owner;
	download_check(d);

	/*
	 * When we receive THEX data with an advertised size, the remote
	 * end will simply stop emitting data when we're done and could maintain
	 * the HTTP connection alive.  Therefore, since we don't intend to
	 * issue any more request on that connection, we must check for completion.
	 *
	 * When chunked data is received (unknown size), the last chunk will
	 * trigger completion via an RX-callback invoked from the dechunking
	 * layer, but in that case it is harmless to make the call anyway.
	 */

	error = thex_download_data_read(ctx, mb);
	if (!error) {
		download_maybe_finished(d);
		download_check(d);
	}

	pmsg_free(mb);
	return !error && DOWNLOAD_IS_RUNNING(d);
}
Пример #8
0
/**
 * Got data from lower layer.
 */
static bool
rx_chunk_recv(rxdrv_t *rx, pmsg_t *mb)
{
	struct attr *attr = rx->opaque;
	bool error = FALSE;
	pmsg_t *imb;		/* Dechunked message */

	rx_check(rx);
	g_assert(mb);

	/*
	 * Dechunk the stream, forwarding dechunked data to the upper layer.
	 * At any time, a packet we forward can cause the reception to be
	 * disabled, in which case we must stop.
	 */

	while ((attr->flags & IF_ENABLED) && (imb = dechunk_data(rx, mb))) {
		error = !(*rx->data.ind)(rx, imb);
		if (error)
			break;
	}

	pmsg_free(mb);

	/*
	 * When we encountered the end of the stream, let them know.
	 */

	if ((attr->flags & IF_ENABLED) && attr->state == CHUNK_STATE_END)
		attr->cb->chunk_end(rx->owner);

	return !error;
}
Пример #9
0
/**
 * Dispose of the search queue entry and of all its contained data.
 * Used only when the query described in `sb' is not dispatched.
 */
static void
smsg_discard(smsg_t *sb)
{
	pmsg_free(sb->mb);
	if (sb->qhv)
		qhvec_free(sb->qhv);

	smsg_free(sb);
}
Пример #10
0
/**
 * Free all items from the pmsg list, keeping the list container.
 */
void
pmsg_slist_discard_all(slist_t *slist)
{
	pmsg_t *mb;

	while ((mb = slist_shift(slist)) != NULL) {
		pmsg_free(mb);
	}
}
Пример #11
0
/**
 * Free message block referenced in the variable and nullify it.
 */
void
pmsg_free_null(pmsg_t **mb_ptr)
{
	pmsg_t *mb = *mb_ptr;

	if (mb) {
		pmsg_free(mb);
		*mb_ptr = NULL;
	}
}
Пример #12
0
/**
 * Send a message to the DHT node through UDP.
 *
 * It is up to the caller to clone the message if needed, otherwise the
 * node's queue becomes the sole owner of the message and will pmsg_free() it.
 */
void
udp_dht_send_mb(const gnutella_node_t *n, pmsg_t *mb)
{
	if (NULL == n || NULL == n->outq) {
		pmsg_free(mb);
		/* emit warnings */
		g_return_if_fail(n);
		g_return_if_fail(n->outq);
		g_assert_not_reached();
	}
	g_assert(NODE_IS_DHT(n));
	mq_udp_node_putq(n->outq, mb, n);
}
Пример #13
0
/**
 * Destroy the DBM wrapper, optionally closing the underlying DB map.
 */
void
dbmw_destroy(dbmw_t *dw, bool close_map)
{
	dbmw_check(dw);

	if (common_stats) {
		s_debug("DBMW destroying \"%s\" with %s back-end "
			"(read cache hits = %.2f%% on %s request%s, "
			"write cache hits = %.2f%% on %s request%s)",
			dw->name, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map",
			dw->r_hits * 100.0 / MAX(1, dw->r_access),
			uint64_to_string(dw->r_access), plural(dw->r_access),
			dw->w_hits * 100.0 / MAX(1, dw->w_access),
			uint64_to_string2(dw->w_access), plural(dw->w_access));
	}

	if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_DESTROY)) {
		dbg_ds_log(dw->dbg, dw, "%s: with %s back-end "
			"(read cache hits = %.2f%% on %s request%s, "
			"write cache hits = %.2f%% on %s request%s)",
			G_STRFUNC, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map",
			dw->r_hits * 100.0 / MAX(1, dw->r_access),
			uint64_to_string(dw->r_access), plural(dw->r_access),
			dw->w_hits * 100.0 / MAX(1, dw->w_access),
			uint64_to_string2(dw->w_access), plural(dw->w_access));
	}

	/*
	 * If we close the map and we're volatile, there's no need to flush
	 * the cache as the data is going to be gone soon anyway.
	 */

	if (!close_map || !dw->is_volatile) {
		dbmw_sync(dw, DBMW_SYNC_CACHE);
	}

	dbmw_clear_cache(dw);
	hash_list_free(&dw->keys);
	map_destroy(dw->values);

	if (dw->mb)
		pmsg_free(dw->mb);
	bstr_free(&dw->bs);

	if (close_map)
		dbmap_destroy(dw->dm);

	WFREE_TYPE_NULL(dw->dbmap_dbg);
	dw->magic = 0;
	WFREE(dw);
}
Пример #14
0
/**
 * Mutate the message so that we can be notified about its freeing by
 * the mq to which it will be sent to.
 */
static void
smsg_mutate(smsg_t *sb, struct gnutella_node *n)
{
	struct smsg_info *smi;
	pmsg_t *omb;

	WALLOC(smi);
	smi->shandle = sb->shandle;
	smi->node_id = nid_ref(NODE_ID(n));

	omb = sb->mb;
	sb->mb = pmsg_clone_extend(omb, sq_pmsg_free, smi);
	pmsg_free(omb);
}
Пример #15
0
/**
 * Send a message to specified UDP node.
 *
 * It is up to the caller to clone the message if needed, otherwise the
 * node's queue becomes the sole owner of the message and will pmsg_free() it.
 */
void
udp_send_mb(const gnutella_node_t *n, pmsg_t *mb)
{
	if (NULL == n || NULL == n->outq) {
		pmsg_free(mb);
		/* emit warnings */
		g_return_if_fail(n);
		g_return_if_fail(n->outq);
		g_assert_not_reached();
	}
	g_assert(NODE_IS_UDP(n));
	if (NODE_CAN_SR_UDP(n))
		pmsg_mark_reliable(mb);		/* Send reliably if node supports it */
	mq_udp_node_putq(n->outq, mb, n);
}
Пример #16
0
/**
 * Destroy the DBM wrapper, optionally closing the underlying DB map.
 */
void
dbmw_destroy(dbmw_t *dw, gboolean close_map)
{
	dbmw_check(dw);

	if (common_stats)
		g_debug("DBMW destroying \"%s\" with %s back-end "
			"(read cache hits = %.2f%% on %s request%s, "
			"write cache hits = %.2f%% on %s request%s)",
			dw->name, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map",
			dw->r_hits * 100.0 / MAX(1, dw->r_access),
			uint64_to_string(dw->r_access), 1 == dw->r_access ? "" : "s",
			dw->w_hits * 100.0 / MAX(1, dw->w_access),
			uint64_to_string2(dw->w_access), 1 == dw->w_access ? "" : "s");

	/*
	 * If we close the map and we're volatile, there's no need to flush
	 * the cache as the data is going to be gone soon anyway.
	 */

	if (!close_map || !dw->is_volatile) {
		dbmw_sync(dw, DBMW_SYNC_CACHE);
	}

	dbmw_clear_cache(dw);
	hash_list_free(&dw->keys);
	map_destroy(dw->values);

	if (dw->mb)
		pmsg_free(dw->mb);
	bstr_free(&dw->bs);

	if (close_map)
		dbmap_destroy(dw->dm);

	dw->magic = 0;
	WFREE(dw);
}
Пример #17
0
/**
 * Enqueue query message in specified queue.
 */
static void
sq_puthere(squeue_t *sq, gnet_search_t sh, pmsg_t *mb, query_hashvec_t *qhv)
{
	smsg_t *sb;

	g_assert(sq);
	g_assert(mb);

	if (sqh_exists(sq, sh)) {
		pmsg_free(mb);
		if (qhv)
			qhvec_free(qhv);
		return;						/* Search already in queue */
	}

	sb = smsg_alloc(sh, mb, qhv);

	sqh_put(sq, sh);
	sq->searches = g_list_prepend(sq->searches, sb);
	sq->count++;

	if (sq->count > GNET_PROPERTY(search_queue_size))
		cap_queue(sq);
}
Пример #18
0
/**
 * RX data indication callback used to give us some new Gnet traffic in a
 * low-level message structure (which can contain several Gnet messages).
 *
 * @return FALSE if an error occurred.
 */
static gboolean
browse_data_ind(rxdrv_t *rx, pmsg_t *mb)
{
	struct browse_ctx *bc = rx_owner(rx);
	struct download *d;
	gboolean error = FALSE;

	while (browse_data_read(bc, mb)) {
		if (!browse_data_process(bc)) {
			error = TRUE;
			break;
		}
	}

	/*
	 * When we receive browse-host data with an advertised size, the remote
	 * end will simply stop emitting data when we're done and could maintain
	 * the HTTP connection alive.  Therefore, since we don't intend to
	 * issue any more request on that connection, we must check for completion.
	 *
	 * When chunked data is received (unknown size), the last chunk will
	 * trigger completion via an RX-callback invoked from the dechunking
	 * layer, but in that case it is harmless to make the call anyway.
	 */

	d = bc->owner;
	download_check(d);

	if (!error) {
		download_maybe_finished(d);
		download_check(d);
	}

	pmsg_free(mb);
	return !error && DOWNLOAD_IS_RUNNING(d);
}
Пример #19
0
/**
 * Writes the browse host data of the context ``ctx'' to the buffer
 * ``dest''. This must be called multiple times to retrieve the complete
 * data until zero is returned i.e., the end of file is reached.
 *
 * This routine deals with query hit data generation.
 *
 * @param ctx an initialized browse host context.
 * @param dest the destination buffer.
 * @param size the amount of bytes ``dest'' can hold.
 *
 * @return -1 on failure, zero at the end-of-file condition or if size
 *         was zero. On success, the amount of bytes copied to ``dest''
 *         is returned.
 */
static ssize_t
browse_host_read_qhits(struct special_upload *ctx,
	void *const dest, size_t size)
{
	struct browse_host_upload *bh = cast_to_browse_host_upload(ctx);
	size_t remain = size;
	char *p = dest;

	/*
	 * If we have no hit pending that we can send, build some more.
	 */

	if (NULL == bh->hits) {
		GSList *files = NULL;
		int i;

		for (i = 0; i < BH_SCAN_AHEAD; i++) {
			const shared_file_t *sf;

			do {
				/* Skip holes in indices */
				bh->file_index++;
				sf = shared_file_sorted(bh->file_index);
			} while (NULL == sf && bh->file_index <= shared_files_scanned());

			if (SHARE_REBUILDING == sf || NULL == sf)
				break;
			
			files = g_slist_prepend(files, deconstify_pointer(sf));
		}

		if (NULL == files)		/* Did not find any more file to include */
			return 0;			/* We're done */

		/*
		 * Now build the query hits containing the files we selected.
		 */

		files = g_slist_reverse(files);			/* Preserve order */

		qhit_build_results(files, i, BH_MAX_QHIT_SIZE,
			browse_host_record_hit, bh, &blank_guid, FALSE, &zero_array);

		g_assert(bh->hits != NULL);		/* At least 1 hit enqueued */

		bh->hits = g_slist_reverse(bh->hits);	/* Preserve order */
 		gm_slist_free_null(&files);
	}

	/*
	 * Read each query hit in turn.
	 */

	while (remain > 0 && NULL != bh->hits) {
		pmsg_t *mb = bh->hits->data;
		int r;

		r = pmsg_read(mb, p, remain);
		p += r;
		remain -= r;

		if (r == 0 || 0 == pmsg_size(mb)) {
			bh->hits = g_slist_remove(bh->hits, mb);
			pmsg_free(mb);
		}
	}

	return size - remain;
}
Пример #20
0
/**
 * Enqueue message, which becomes owned by the queue.
 *
 * The data held in `to' is copied, so the structure can be reclaimed
 * immediately by the caller.
 */
void
mq_udp_putq(mqueue_t *q, pmsg_t *mb, const gnet_host_t *to)
{
	size_t size;
	char *mbs;
	uint8 function;
	pmsg_t *mbe = NULL;		/* Extended message with destination info */
	bool error = FALSE;

	mq_check_consistency(q);

	dump_tx_udp_packet(to, mb);

again:
	mq_check_consistency(q);
	g_assert(mb);
	g_assert(!pmsg_was_sent(mb));
	g_assert(pmsg_is_unread(mb));
	g_assert(q->ops == &mq_udp_ops);	/* Is an UDP queue */

	/*
	 * Trap messages enqueued whilst in the middle of an mq_clear() operation
	 * by marking them as sent and dropping them.  Idem if queue was
	 * put in "discard" mode.
	 */

	if (q->flags & (MQ_CLEAR | MQ_DISCARD)) {
		pmsg_mark_sent(mb);	/* Let them think it was sent */
		pmsg_free(mb);		/* Drop message */
		return;
	}

	mq_check(q, 0);

	size = pmsg_size(mb);

	if (size == 0) {
		g_carp("%s: called with empty message", G_STRFUNC);
		goto cleanup;
	}

	/*
	 * Protect against recursion: we must not invoke puthere() whilst in
	 * the middle of another putq() or we would corrupt the qlink array:
	 * Messages received during recursion are inserted into the qwait list
	 * and will be stuffed back into the queue when the initial putq() ends.
	 *		--RAM, 2006-12-29
	 */

	if (q->putq_entered > 0) {
		pmsg_t *extended;

		if (debugging(20))
			g_warning("%s: %s recursion detected (%u already pending)",
				G_STRFUNC, mq_info(q), slist_length(q->qwait));

		/*
		 * We insert extended messages into the waiting queue since we need
		 * the destination information as well.
		 */

		extended = mq_udp_attach_metadata(mb, to);
		slist_append(q->qwait, extended);
		return;
	}
	q->putq_entered++;

	mbs = pmsg_start(mb);
	function = gmsg_function(mbs);

	gnet_stats_count_queued(q->node, function, mbs, size);

	/*
	 * If queue is empty, attempt a write immediatly.
	 */

	if (q->qhead == NULL) {
		ssize_t written;

		if (pmsg_check(mb, q)) {
			written = tx_sendto(q->tx_drv, mb, to);
		} else {
			gnet_stats_count_flowc(mbs, FALSE);
			node_inc_txdrop(q->node);		/* Dropped during TX */
			written = (ssize_t) -1;
		}

		if ((ssize_t) -1 == written)
			goto cleanup;

		node_add_tx_given(q->node, written);

		if ((size_t) written == size) {
			if (GNET_PROPERTY(mq_udp_debug) > 5)
				g_debug("MQ UDP sent %s",
					gmsg_infostr_full(pmsg_start(mb), pmsg_written_size(mb)));

			goto cleanup;
		}

		/*
		 * Since UDP respects write boundaries, the following can never
		 * happen in practice: either we write the whole datagram, or none
		 * of it.
		 */

		if (written > 0) {
			g_warning(
				"partial UDP write (%zu bytes) to %s for %zu-byte datagram",
				written, gnet_host_to_string(to), size);
			goto cleanup;
		}

		/* FALL THROUGH */
	}

	if (GNET_PROPERTY(mq_udp_debug) > 5)
		g_debug("MQ UDP queued %s",
			gmsg_infostr_full(pmsg_start(mb), pmsg_written_size(mb)));

	/*
	 * Attach the destination information as metadata to the message, unless
	 * it is already known (possible only during unfolding of the queued data
	 * during re-entrant calls).
	 *
	 * This is later extracted via pmsg_get_metadata() on the extended
	 * message by the message queue to get the destination information.
	 *
	 * Then enqueue the extended message.
	 */

	if (NULL == mbe)
		mbe = mq_udp_attach_metadata(mb, to);

	q->cops->puthere(q, mbe, size);
	mb = NULL;

	/* FALL THROUGH */

cleanup:

	if (mb) {
		pmsg_free(mb);
		mb = NULL;
	}

	/*
	 * When reaching that point with a zero putq_entered counter, it means
	 * we triggered an early error condition.  Bail out.
	 */

	g_assert(q->putq_entered >= 0);

	if (q->putq_entered == 0)
		error = TRUE;
	else
		q->putq_entered--;

	mq_check(q, 0);

	/*
	 * If we're exiting here with no other putq() registered, then we must
	 * pop an item off the head of the list and iterate again.
	 */

	if (0 == q->putq_entered && !error) {
		mbe = slist_shift(q->qwait);
		if (mbe) {
			struct mq_udp_info *mi = pmsg_get_metadata(mbe);

			mb = mbe;		/* An extended message "is-a" message */
			to = &mi->to;

			if (debugging(20))
				g_warning(
					"%s: %s flushing waiting to %s (%u still pending)",
					G_STRFUNC, mq_info(q), gnet_host_to_string(to),
					slist_length(q->qwait));

			goto again;
		}
	}

	return;
}
Пример #21
0
/**
 * Decides if the queue can send a message. Currently use simple fixed
 * time base heuristics. May add bursty control later...
 */
void
sq_process(squeue_t *sq, time_t now)
{
    time_delta_t spacing = GNET_PROPERTY(search_queue_spacing);
	GList *item;
	smsg_t *sb;
	struct gnutella_node *n;
	bool sent;

	g_assert(sq->node == NULL || sq->node->outq != NULL);

retry:
	/*
	 * We don't need to do anything if either:
	 *
	 * 1. The queue is empty.
	 * 2. We sent our last search less than "search_queue_spacing" seconds ago.
	 * 3. We never got a packet from that node.
	 * 4. The node activated hops-flow to shut all queries
	 * 5. We activated flow-control on the node locally.
	 *
	 *		--RAM, 01/05/2002
	 */

	if (sq->count == 0)
		return;

    if (delta_time(now, sq->last_sent) < spacing)
		return;

	n = sq->node;					/* Will be NULL for the global SQ */

	if (n != NULL) {
		if (n->received == 0)		/* RX = 0, wait for handshaking ping */
			return;

		if (!node_query_hops_ok(n, 0))		/* Cannot send hops=0 query */
			return;

		if (!NODE_IS_WRITABLE(n))
			return;

		if (NODE_IN_TX_FLOW_CONTROL(n))		/* Don't add to the mqueue yet */
			return;
	} else {
		/*
		 * Processing the global SQ.
		 */

		if (settings_is_leaf())
			return;

		if (3*UNSIGNED(node_keep_missing()) > 2*GNET_PROPERTY(up_connections))
			return;		/* Not enough nodes for querying */
	}

	/*
	 * Queue is managed as a LIFO: we extract the first message, i.e. the last
	 * one enqueued, and pass it along to the node's message queue.
	 */

	g_assert(sq->searches);

	item = g_list_first(sq->searches);
	sb = item->data;

	g_assert(sq->count > 0);
	sq->count--;
	sent = TRUE;			/* Assume we're going to send/initiate it */

	if (n == NULL) {
		g_assert(sb->qhv != NULL);		/* Enqueued via sq_global_putq() */

		if (GNET_PROPERTY(sq_debug) > 2)
			g_debug("sq GLOBAL, queuing \"%s\" (%u left, %d sent)",
				gnutella_msg_search_get_text(pmsg_start(sb->mb)),
				sq->count, sq->n_sent);

		dq_launch_local(sb->shandle, sb->mb, sb->qhv);

	} else if (search_query_allowed(sb->shandle)) {
		/*
		 * Must log before sending, in case the queue discards the message
		 * buffer immediately.
		 */

		g_assert(sb->qhv == NULL);		/* Enqueued via sq_putq() */

		if (GNET_PROPERTY(sq_debug) > 2)
			g_debug("sq for node %s, queuing \"%s\" (%u left, %d sent)",
				node_addr(n), gnutella_msg_search_get_text(pmsg_start(sb->mb)),
				sq->count, sq->n_sent);

		/*
		 * If we're a leaf node, we're doing a leaf-guided dynamic query.
		 * In order to be able to report hits we get to the UPs to whom
		 * we sent our searches, we need to be notified of all the physical
		 * queries that go out.
		 */

		if (settings_is_leaf())
			smsg_mutate(sb, n);

		mq_tcp_putq(n->outq, sb->mb, NULL);

	} else {
		if (GNET_PROPERTY(sq_debug) > 4)
			g_debug("sq for node %s, ignored \"%s\" (%u left, %d sent)",
				node_addr(n), gnutella_msg_search_get_text(pmsg_start(sb->mb)),
				sq->count, sq->n_sent);
		pmsg_free(sb->mb);
		if (sb->qhv)
			qhvec_free(sb->qhv);
		sent = FALSE;
	}

	if (sent) {
		sq->n_sent++;
		sq->last_sent = now;
	}

	sqh_remove(sq, sb->shandle);
	smsg_free(sb);
	sq->searches = g_list_remove_link(sq->searches, item);
	g_list_free_1(item);

	/*
	 * If we ignored the query, retry with the next in the queue.
	 * We don't use a do/while() loop to avoid identing the whole body.
	 */

	if (!sent)
		goto retry;
}
Пример #22
0
/**
 * Route query hits from one node to the other.
 */
void
dh_route(gnutella_node_t *src, gnutella_node_t *dest, int count)
{
	pmsg_t *mb;
	struct dh_pmsg_info *pmi;
	const struct guid *muid;
	dqhit_t *dh;
	mqueue_t *mq;

	g_assert(
		gnutella_header_get_function(&src->header) == GTA_MSG_SEARCH_RESULTS);
	g_assert(count >= 0);

	if (!NODE_IS_WRITABLE(dest))
		goto drop_shutdown;

	muid = gnutella_header_get_muid(&src->header);
	dh = dh_locate(muid);

	g_assert(dh != NULL);		/* Must have called dh_got_results() first! */

	if (GNET_PROPERTY(dh_debug) > 19) {
		g_debug("DH #%s got %d hit%s: "
			"msg=%u, hits_recv=%u, hits_sent=%u, hits_queued=%u",
			guid_hex_str(muid), count, plural(count),
			dh->msg_recv, dh->hits_recv, dh->hits_sent,
			dh->hits_queued);
	}

	mq = dest->outq;

	/*
	 * Can we forward the message?
	 */

	switch (dh_can_forward(dh, mq, FALSE)) {
	case DH_DROP_FC:
		goto drop_flow_control;
	case DH_DROP_THROTTLE:
		goto drop_throttle;
	case DH_DROP_TRANSIENT:
		goto drop_transient;
	case DH_FORWARD:
	default:
		break;
	}

	/*
	 * Allow message through.
	 */

	WALLOC(pmi);
	pmi->hits = count;

	dh->hits_queued += count;
	dh->msg_queued++;

	g_assert(dh->hits_queued >= UNSIGNED(count));

	/*
	 * Magic: we create an extended version of a pmsg_t that contains a
	 * free routine, which will be invoked when the message queue frees
	 * the message.
	 *
	 * This enables us to track how much results we already queued/sent.
	 */

	if (NODE_IS_UDP(dest)) {
		gnet_host_t to;
		pmsg_t *mbe;

		gnet_host_set(&to, dest->addr, dest->port);

		/*
		 * With GUESS we may route back a query hit to an UDP node.
		 */

		if (GNET_PROPERTY(guess_server_debug) > 19) {
			g_debug("GUESS sending %d hit%s (%s) for #%s to %s",
				count, plural(count),
				NODE_CAN_SR_UDP(dest) ? "reliably" :
				NODE_CAN_INFLATE(dest) ? "possibly deflated" : "uncompressed",
				guid_hex_str(muid), node_infostr(dest));
		}

		/*
		 * Attempt to compress query hit if the destination supports it.
		 *
		 * If we're going to send the hit using semi-reliable UDP, there's
		 * no need to compress beforehand, since the transport layer will
		 * attempt its own compression anyway.
		 */

		if (!NODE_CAN_SR_UDP(dest) && NODE_CAN_INFLATE(dest)) {
			mb = gmsg_split_to_deflated_pmsg(&src->header, src->data,
					src->size + GTA_HEADER_SIZE);

			if (gnutella_header_get_ttl(pmsg_start(mb)) & GTA_UDP_DEFLATED)
				gnet_stats_inc_general(GNR_UDP_TX_COMPRESSED);
		} else {
			mb = gmsg_split_to_pmsg(&src->header, src->data,
					src->size + GTA_HEADER_SIZE);
		}

		mbe = pmsg_clone_extend(mb, dh_pmsg_free, pmi);
		pmsg_free(mb);

		if (NODE_CAN_SR_UDP(dest))
			pmsg_mark_reliable(mbe);

		mq_udp_putq(mq, mbe, &to);
	} else {
		mb = gmsg_split_to_pmsg_extend(&src->header, src->data,
				src->size + GTA_HEADER_SIZE, dh_pmsg_free, pmi);
		mq_tcp_putq(mq, mb, src);

		if (GNET_PROPERTY(dh_debug) > 19) {
			g_debug("DH enqueued %d hit%s for #%s to %s",
				count, plural(count), guid_hex_str(muid),
				node_infostr(dest));
		}
	}

	return;

drop_shutdown:
	gnet_stats_count_dropped(src, MSG_DROP_SHUTDOWN);
	return;

drop_flow_control:
	gnet_stats_count_dropped(src, MSG_DROP_FLOW_CONTROL);
	gnet_stats_count_flowc(&src->header, TRUE);
	return;

drop_throttle:
	gnet_stats_count_dropped(src, MSG_DROP_THROTTLE);
	return;

drop_transient:
	gnet_stats_count_dropped(src, MSG_DROP_TRANSIENT);
	return;
}
Пример #23
0
/**
 * Initiate a SOAP remote procedure call.
 *
 * Call will be launched asynchronously, not immediately upon return so that
 * callbacks are never called on the same stack frame and to allow further
 * options to be set on the handle before the call begins.
 *
 * Initially the request is sent as a regular POST.  It is possible to force
 * the usage of the HTTP Extension Framework by using the SOAP_RPC_O_MAN_FORCE
 * option, in which case an M-POST will be sent with the proper SOAP Man:
 * header.  Finally, automatic retry of the request can be requested via the
 * SOAP_RPC_O_MAN_RETRY option: it will start with POST and switch to M-POST
 * on 405 or 510 errors.
 *
 * @param url		the HTTP URL to contact for the RPC
 * @param action	the SOAP action to perform
 * @param maxlen	maximum length of data we accept to receive
 * @param options	user-supplied options
 * @param xn		SOAP RPC data payload (XML tree root, will be freed)
 * @param soap_ns	requested SOAP namespace prefix, NULL to use default
 * @param reply_cb	callback to invoke when we get a reply
 * @param error_cb	callback to invoke on error
 * @param arg		additional user-defined callback parameter
 *
 * @return a SOAP RPC handle, NULL if the request cannot be initiated (XML
 * payload too large).  In any case, the XML tree is freed.
 */
soap_rpc_t *
soap_rpc(const char *url, const char *action, size_t maxlen, guint32 options,
	xnode_t *xn, const char *soap_ns,
	soap_reply_cb_t reply_cb, soap_error_cb_t error_cb, void *arg)
{
	soap_rpc_t *sr;
	xnode_t *root, *body;
	pmsg_t *mb;
	ostream_t *os;
	gboolean failed = FALSE;

	g_assert(url != NULL);
	g_assert(action != NULL);

	/*
	 * Create the SOAP XML request.
	 */

	root = xnode_new_element(NULL, SOAP_NAMESPACE, SOAP_X_ENVELOPE);
	xnode_add_namespace(root, soap_ns ? soap_ns : "SOAP", SOAP_NAMESPACE);
	xnode_prop_ns_set(root, SOAP_NAMESPACE, SOAP_X_ENC_STYLE, SOAP_ENCODING);

	body = xnode_new_element(root, SOAP_NAMESPACE, SOAP_X_BODY);
	xnode_add_child(body, xn);

	/*
	 * Serialize the XML tree to a PDU message buffer.
	 */

	mb = pmsg_new(PMSG_P_DATA, NULL, SOAP_MAX_PAYLOAD);
	os = ostream_open_pmsg(mb);
	xfmt_tree(root, os, XFMT_O_NO_INDENT);

	if (!ostream_close(os)) {
		failed = TRUE;
		g_warning("SOAP unable to serialize payload within %d bytes",
			SOAP_MAX_PAYLOAD);
		if (GNET_PROPERTY(soap_debug) > 1)
			xfmt_tree_dump(root, stderr);
	}

	/*
	 * Free the XML tree, including the supplied user nodes.
	 */

	xnode_tree_free(root);

	if (failed) {
		pmsg_free(mb);
		return NULL;
	}

	/*
	 * Serialization of the XML payload was successful, prepare the
	 * asynchronous SOAP request.
	 */

	sr = soap_rpc_alloc();
	sr->url = atom_str_get(url);
	sr->action = atom_str_get(action);
	sr->maxlen = maxlen;
	sr->content_len = maxlen;		/* Until we see a Content-Length */
	sr->options = options;
	sr->mb = mb;
	sr->reply_cb = reply_cb;
	sr->error_cb = error_cb;
	sr->arg = arg;

	/*
	 * Make sure the error callback is not called synchronously, and give
	 * them time to supply other options after creating the request before
	 * it starts.
	 */

	sr->delay_ev = cq_main_insert(1, soap_rpc_launch, sr);

	return sr;
}