/** * Dump relayed or locally-emitted packet. * If ``from'' is NULL, packet was emitted locally. */ static void dump_packet_from_to(struct dump *dump, const struct gnutella_node *from, const struct gnutella_node *to, const pmsg_t *mb) { struct dump_header dh_to; struct dump_header dh_from; g_assert(to != NULL); g_assert(mb != NULL); g_assert(pmsg_read_base(mb) == pmsg_start(mb)); if (!dump_initialize(dump)) return; /* * This is only for Gnutella packets, leave DHT messages out. */ if (GTA_MSG_DHT == gnutella_header_get_function(pmsg_start(mb))) return; if (!ipset_contains_addr(&dump_tx_to_addrs, to->addr, TRUE)) return; if (NULL == from) { struct gnutella_node local; local.peermode = NODE_IS_UDP(to) ? NODE_P_UDP : NODE_P_NORMAL; local.addr = listen_addr(); local.port = GNET_PROPERTY(listen_port); if (!ipset_contains_addr(&dump_tx_from_addrs, local.addr, TRUE)) return; dump_header_set(&dh_from, &local); } else { if (!ipset_contains_addr(&dump_tx_from_addrs, from->addr, TRUE)) return; dump_header_set(&dh_from, from); } dump_header_set(&dh_to, to); dh_to.data[0] |= DH_F_TO; if (pmsg_prio(mb) != PMSG_P_DATA) dh_to.data[0] |= DH_F_CTRL; dump_append(dump, dh_to.data, sizeof dh_to.data); dump_append(dump, dh_from.data, sizeof dh_from.data); dump_append(dump, pmsg_read_base(mb), pmsg_size(mb)); dump_flush(dump); }
/** * Fetch the query text from a /Q2 message. * * @param mb a message block containing a serialized /Q2 * * @return a pointer to the search text string (as static data), NULL if * is no text in the query or the message is not a /Q2. */ const char * g2_msg_search_get_text(const pmsg_t *mb) { str_t *s = str_private(G_STRFUNC, 64); const g2_tree_t *t; t = g2_frame_deserialize( pmsg_start(mb), pmsg_written_size(mb), NULL, FALSE); if (NULL == t) { return NULL; } else { const char *payload; size_t paylen; payload = g2_tree_payload(t, "/Q2/DN", &paylen); if (NULL == payload) { g2_tree_free_null_const(&t); return NULL; } str_cpy_len(s, payload, paylen); } g2_tree_free_null_const(&t); return str_2c(s); }
/** * Signals the search queue that a search was closed. * Any query for that search still in the queue is dropped. */ void sq_search_closed(squeue_t *sq, gnet_search_t sh) { GList *l; GList *next; for (l = sq->searches; l; l = next) { smsg_t *sb = l->data; next = g_list_next(l); if (sb->shandle != sh) continue; g_assert(sq->count > 0); sq->count--; sq->searches = g_list_remove_link(sq->searches, l); if (GNET_PROPERTY(sq_debug) > 4) g_debug("sq for node %s, dropped \"%s\" on search close (%u left)", sq->node ? node_addr(sq->node) : "GLOBAL", gnutella_msg_search_get_text(pmsg_start(sb->mb)), sq->count); sqh_remove(sq, sb->shandle); smsg_discard(sb); g_list_free_1(l); } g_assert(sq->searches || sq->count == 0); }
/** * Log a dropped message. */ void g2_msg_log_dropped_pmsg(const pmsg_t *mb, const char *fmt, ...) { va_list args; va_start(args, fmt); g2_msg_log_dropped(pmsg_start(mb), pmsg_size(mb), fmt, args); va_end(args); }
/** * Delayed RPC start. */ static void soap_rpc_launch(cqueue_t *unused_cq, gpointer obj) { soap_rpc_t *sr = obj; http_post_data_t post; (void) unused_cq; soap_rpc_check(sr); sr->delay_ev = NULL; if (GNET_PROPERTY(soap_debug) > 4) { g_debug("SOAP \"%s\" at \"%s\": launching (%s)", sr->action, sr->url, sr->retry ? "retry" : "initial"); } sr->reply_len = 0; /* In case we retry, clear out older data */ /* * Launch the asynchronous POST request. */ post.content_type = SOAP_CONTENT_TYPE; post.data = pmsg_start(sr->mb); post.datalen = pmsg_size(sr->mb); post.data_free = NULL; post.data_free_arg = NULL; sr->ha = http_async_post(sr->url, &post, soap_header_ind, soap_data_ind, soap_error_ind); /* * If we cannot create the HTTP request, it can be the URL is wrong, * or no connection can be established to the host. Hence it's a * contacting error, not an I/O error at this stage. */ if (sr->ha == NULL) { if (GNET_PROPERTY(soap_debug)) { g_warning("SOAP cannot contact \"%s\": %s", sr->url, http_async_strerror(http_async_errno)); } soap_error(sr, SOAP_E_CONTACT); return; } /* * Customize the HTTP layer. */ http_async_set_opaque(sr->ha, sr, NULL); http_async_set_op_post_request(sr->ha, soap_build_request); http_async_set_op_headsent(sr->ha, soap_sent_head); http_async_set_op_datasent(sr->ha, soap_sent_data); http_async_set_op_gotreply(sr->ha, soap_got_reply); http_async_option_ctl(sr->ha, HTTP_O_READ_REPLY, HTTP_CTL_ADD); }
/** * Create new message holding serialized tree. * * @param t the tree to serialize * @param prio priority of the message * @param freecb if non-NULL, the free routine to attach to message * @param arg additional argument for the free routine * * @return a message containing the serialized tree. */ static pmsg_t * g2_build_pmsg_prio(const g2_tree_t *t, int prio, pmsg_free_t freecb, void *arg) { size_t len; pmsg_t *mb; len = g2_frame_serialize(t, NULL, 0); if (NULL == freecb) mb = pmsg_new(prio, NULL, len); else mb = pmsg_new_extend(prio, NULL, len, freecb, arg); g2_frame_serialize(t, pmsg_start(mb), len); pmsg_seek(mb, len); g_assert(UNSIGNED(pmsg_size(mb)) == len); return mb; }
/** * Creates an iovec from a singly-linked list of pmsg_t buffers. * It should be freed via hfree(). * * NOTE: The iovec will hold no more than MAX_IOV_COUNT items. That means * the iovec might not cover the whole buffered data. This limit * is applied because writev() could fail with EINVAL otherwise * which would simply add more unnecessary complexity. */ iovec_t * pmsg_slist_to_iovec(slist_t *slist, int *iovcnt_ptr, size_t *size_ptr) { iovec_t *iov; size_t held = 0; int n; g_assert(slist); n = slist_length(slist); if (n > 0) { slist_iter_t *iter; int i; n = MIN(n, MAX_IOV_COUNT); HALLOC_ARRAY(iov, n); iter = slist_iter_before_head(slist); for (i = 0; i < n; i++) { pmsg_t *mb; size_t size; mb = slist_iter_next(iter); pmsg_check(mb); size = pmsg_size(mb); g_assert(size > 0); held += size; iovec_set(&iov[i], deconstify_pointer(pmsg_start(mb)), size); } slist_iter_free(&iter); } else { iov = NULL; } if (iovcnt_ptr) { *iovcnt_ptr = MAX(0, n); } if (size_ptr) { *size_ptr = held; } return iov; }
/** * Free routine for query hit message. */ static void dh_pmsg_free(pmsg_t *mb, void *arg) { struct dh_pmsg_info *pmi = arg; const struct guid *muid; dqhit_t *dh; g_assert(pmsg_is_extended(mb)); muid = gnutella_header_get_muid(pmsg_start(mb)); dh = dh_locate(muid); if (dh == NULL) goto cleanup; /* * It can happen that an initial query hit comes and is queued for * transmission, but the node is so clogged we don't actually send * it before the entry expires in our tracking tables. When we later * get the ACK that it was sent, we can therefore get obsolete data. * Hence we're very careful updating the stats, and we can't assert * that we're tracking everything correctly. * --RAM, 2004-09-04 */ if (pmsg_was_sent(mb)) dh->hits_sent += pmi->hits; if (dh->msg_queued == 0) /* We did not expect this ACK */ goto cleanup; dh->msg_queued--; if (dh->hits_queued >= pmi->hits) dh->hits_queued -= pmi->hits; /* FALL THROUGH */ cleanup: WFREE(pmi); }
/** * Main RPC iteration loop. */ static void natpmp_rpc_iterate(cqueue_t *unused_cq, void *obj) { struct natpmp_rpc *rd = obj; int ret; natpmp_rpc_check(rd); (void) unused_cq; if (rd->count++ > rd->retries) goto finished; ret = urpc_send("NAT-PMP", rd->gateway, NATPMP_SRV_PORT, pmsg_start(rd->mb), pmsg_size(rd->mb), rd->timeout, natpmp_rpc_reply, rd); if (0 != ret) { if (GNET_PROPERTY(natpmp_debug)) { g_warning("NATPMP could not send \"%s\" #%u to %s: %m", natpmp_op_to_string(rd->op), rd->count, host_addr_port_to_string(rd->gateway, NATPMP_SRV_PORT)); } goto finished; } else { if (GNET_PROPERTY(natpmp_debug) > 4) { g_debug("NATPMP sent \"%s\" #%u to %s, with %u ms timeout", natpmp_op_to_string(rd->op), rd->count, host_addr_port_to_string(rd->gateway, NATPMP_SRV_PORT), rd->timeout); } } rd->timeout = uint_saturate_mult(rd->timeout, 2); /* For next time */ return; finished: natpmp_rpc_error(rd); }
/** * Decides if it needs to drop the oldest messages on the * search queue based on the search count */ static void cap_queue(squeue_t *sq) { while (sq->count > GNET_PROPERTY(search_queue_size)) { GList *item = g_list_last(sq->searches); smsg_t *sb = item->data; sq->searches = g_list_remove_link(sq->searches, item); g_assert(sq->count > 0); sq->count--; sq->n_dropped++; if (GNET_PROPERTY(sq_debug) > 4) g_debug("sq for node %s, dropped \"%s\" (%u left, %d dropped)", node_addr(sq->node), gnutella_msg_search_get_text(pmsg_start(sb->mb)), sq->count, sq->n_dropped); sqh_remove(sq, sb->shandle); smsg_discard(sb); g_list_free_1(item); } }
/** * Write back cached value to disk. * @return TRUE on success */ static gboolean write_back(dbmw_t *dw, gconstpointer key, struct cached *value) { dbmap_datum_t dval; gboolean ok; g_assert(value->dirty); if (value->absent) { /* Key not present, value is null item */ dval.data = NULL; dval.len = 0; } else { /* * Serialize value into our reused message block if a * serialization routine was provided. */ if (dw->pack) { pmsg_reset(dw->mb); (*dw->pack)(dw->mb, value->data); dval.data = pmsg_start(dw->mb); dval.len = pmsg_size(dw->mb); /* * We allocated the message block one byte larger than the * maximum size, in order to detect unexpected serialization * overflows. */ if (dval.len > dw->value_data_size) { /* Don't g_carp() as this is asynchronous wrt data change */ g_warning("DBMW \"%s\" serialization overflow in %s() " "whilst %s dirty entry", dw->name, stacktrace_routine_name(func_to_pointer(dw->pack), FALSE), value->absent ? "deleting" : "flushing"); return FALSE; } } else { dval.data = value->data; dval.len = value->len; } } /* * If cached entry is absent, delete the key. * Otherwise store the serialized value. * * Dirty bit is cleared on success. */ if (common_dbg > 4) g_debug("DBMW \"%s\" %s dirty value (%lu byte%s)", dw->name, value->absent ? "deleting" : "flushing", (unsigned long) dval.len, 1 == dval.len ? "" : "s"); dw->ioerr = FALSE; ok = value->absent ? dbmap_remove(dw->dm, key) : dbmap_insert(dw->dm, key, dval); if (ok) { value->dirty = FALSE; } else if (dbmap_has_ioerr(dw->dm)) { dw->ioerr = TRUE; dw->error = errno; g_warning("DBMW \"%s\" I/O error whilst %s dirty entry: %s", dw->name, value->absent ? "deleting" : "flushing", dbmap_strerror(dw->dm)); } else { g_warning("DBMW \"%s\" error whilst %s dirty entry: %s", dw->name, value->absent ? "deleting" : "flushing", dbmap_strerror(dw->dm)); } return ok; }
/** * Send message block to IP:port. * * @param us the UDP scheduler * @param mb the message to send * @param to the IP:port destination of the message * @param tx the TX stack sending the message * @param cb callback actions on the datagram * * @return TRUE if message was sent or dropped, FALSE if there is no more * bandwidth to send anything. */ static bool udp_sched_mb_sendto(udp_sched_t *us, pmsg_t *mb, const gnet_host_t *to, const txdrv_t *tx, const struct tx_dgram_cb *cb) { ssize_t r; int len = pmsg_size(mb); bio_source_t *bio = NULL; if (0 == gnet_host_get_port(to)) return TRUE; /* * Check whether message still needs to be sent. */ if (!pmsg_hook_check(mb)) return TRUE; /* Dropped */ /* * Select the proper I/O source depending on the network address type. */ switch (gnet_host_get_net(to)) { case NET_TYPE_IPV4: bio = us->bio[UDP_SCHED_IPv4]; break; case NET_TYPE_IPV6: bio = us->bio[UDP_SCHED_IPv6]; break; case NET_TYPE_NONE: case NET_TYPE_LOCAL: g_assert_not_reached(); } /* * If there is no I/O source, then the socket to send that type of traffic * was cleared, hence we simply need to discard the message. */ if (NULL == bio) { udp_sched_log(4, "%p: discarding mb=%p (%d bytes) to %s", us, mb, pmsg_size(mb), gnet_host_to_string(to)); return udp_tx_drop(tx, cb); /* TRUE, for "sent" */ } /* * OK, proceed if we have bandwidth. */ r = bio_sendto(bio, to, pmsg_start(mb), len); if (r < 0) { /* Error, or no bandwidth */ if (udp_sched_write_error(us, to, mb, G_STRFUNC)) { udp_sched_log(4, "%p: dropped mb=%p (%d bytes): %m", us, mb, pmsg_size(mb)); return udp_tx_drop(tx, cb); /* TRUE, for "sent" */ } udp_sched_log(3, "%p: no bandwidth for mb=%p (%d bytes)", us, mb, pmsg_size(mb)); us->used_all = TRUE; return FALSE; } if (r != len) { g_warning("%s: partial UDP write (%zd bytes) to %s " "for %d-byte datagram", G_STRFUNC, r, gnet_host_to_string(to), len); } else { udp_sched_log(5, "%p: sent mb=%p (%d bytes) prio=%u", us, mb, pmsg_size(mb), pmsg_prio(mb)); pmsg_mark_sent(mb); if (cb->msg_account != NULL) (*cb->msg_account)(tx->owner, mb); inet_udp_record_sent(gnet_host_get_addr(to)); } return TRUE; /* Message sent */ }
/** * Enqueue message, which becomes owned by the queue. * * The data held in `to' is copied, so the structure can be reclaimed * immediately by the caller. */ void mq_udp_putq(mqueue_t *q, pmsg_t *mb, const gnet_host_t *to) { size_t size; char *mbs; uint8 function; pmsg_t *mbe = NULL; /* Extended message with destination info */ bool error = FALSE; mq_check_consistency(q); dump_tx_udp_packet(to, mb); again: mq_check_consistency(q); g_assert(mb); g_assert(!pmsg_was_sent(mb)); g_assert(pmsg_is_unread(mb)); g_assert(q->ops == &mq_udp_ops); /* Is an UDP queue */ /* * Trap messages enqueued whilst in the middle of an mq_clear() operation * by marking them as sent and dropping them. Idem if queue was * put in "discard" mode. */ if (q->flags & (MQ_CLEAR | MQ_DISCARD)) { pmsg_mark_sent(mb); /* Let them think it was sent */ pmsg_free(mb); /* Drop message */ return; } mq_check(q, 0); size = pmsg_size(mb); if (size == 0) { g_carp("%s: called with empty message", G_STRFUNC); goto cleanup; } /* * Protect against recursion: we must not invoke puthere() whilst in * the middle of another putq() or we would corrupt the qlink array: * Messages received during recursion are inserted into the qwait list * and will be stuffed back into the queue when the initial putq() ends. * --RAM, 2006-12-29 */ if (q->putq_entered > 0) { pmsg_t *extended; if (debugging(20)) g_warning("%s: %s recursion detected (%u already pending)", G_STRFUNC, mq_info(q), slist_length(q->qwait)); /* * We insert extended messages into the waiting queue since we need * the destination information as well. */ extended = mq_udp_attach_metadata(mb, to); slist_append(q->qwait, extended); return; } q->putq_entered++; mbs = pmsg_start(mb); function = gmsg_function(mbs); gnet_stats_count_queued(q->node, function, mbs, size); /* * If queue is empty, attempt a write immediatly. */ if (q->qhead == NULL) { ssize_t written; if (pmsg_check(mb, q)) { written = tx_sendto(q->tx_drv, mb, to); } else { gnet_stats_count_flowc(mbs, FALSE); node_inc_txdrop(q->node); /* Dropped during TX */ written = (ssize_t) -1; } if ((ssize_t) -1 == written) goto cleanup; node_add_tx_given(q->node, written); if ((size_t) written == size) { if (GNET_PROPERTY(mq_udp_debug) > 5) g_debug("MQ UDP sent %s", gmsg_infostr_full(pmsg_start(mb), pmsg_written_size(mb))); goto cleanup; } /* * Since UDP respects write boundaries, the following can never * happen in practice: either we write the whole datagram, or none * of it. */ if (written > 0) { g_warning( "partial UDP write (%zu bytes) to %s for %zu-byte datagram", written, gnet_host_to_string(to), size); goto cleanup; } /* FALL THROUGH */ } if (GNET_PROPERTY(mq_udp_debug) > 5) g_debug("MQ UDP queued %s", gmsg_infostr_full(pmsg_start(mb), pmsg_written_size(mb))); /* * Attach the destination information as metadata to the message, unless * it is already known (possible only during unfolding of the queued data * during re-entrant calls). * * This is later extracted via pmsg_get_metadata() on the extended * message by the message queue to get the destination information. * * Then enqueue the extended message. */ if (NULL == mbe) mbe = mq_udp_attach_metadata(mb, to); q->cops->puthere(q, mbe, size); mb = NULL; /* FALL THROUGH */ cleanup: if (mb) { pmsg_free(mb); mb = NULL; } /* * When reaching that point with a zero putq_entered counter, it means * we triggered an early error condition. Bail out. */ g_assert(q->putq_entered >= 0); if (q->putq_entered == 0) error = TRUE; else q->putq_entered--; mq_check(q, 0); /* * If we're exiting here with no other putq() registered, then we must * pop an item off the head of the list and iterate again. */ if (0 == q->putq_entered && !error) { mbe = slist_shift(q->qwait); if (mbe) { struct mq_udp_info *mi = pmsg_get_metadata(mbe); mb = mbe; /* An extended message "is-a" message */ to = &mi->to; if (debugging(20)) g_warning( "%s: %s flushing waiting to %s (%u still pending)", G_STRFUNC, mq_info(q), gnet_host_to_string(to), slist_length(q->qwait)); goto again; } } return; }
/** * Write back cached value to disk. * @return TRUE on success */ static bool write_back(dbmw_t *dw, const void *key, struct cached *value) { dbmap_datum_t dval; bool ok; g_assert(value->dirty); if (value->absent) { /* Key not present, value is null item */ dval.data = NULL; dval.len = 0; } else { /* * Serialize value into our reused message block if a * serialization routine was provided. */ if (dw->pack) { pmsg_reset(dw->mb); (*dw->pack)(dw->mb, value->data); dval.data = pmsg_start(dw->mb); dval.len = pmsg_size(dw->mb); /* * We allocated the message block one byte larger than the * maximum size, in order to detect unexpected serialization * overflows. */ if (dval.len > dw->value_data_size) { /* Don't s_carp() as this is asynchronous wrt data change */ s_critical("DBMW \"%s\" serialization overflow in %s() " "whilst flushing dirty entry", dw->name, stacktrace_function_name(dw->pack)); return FALSE; } } else { dval.data = value->data; dval.len = value->len; } } /* * If cached entry is absent, delete the key. * Otherwise store the serialized value. * * Dirty bit is cleared on success. */ if ( dbg_ds_debugging(dw->dbg, 1, DBG_DSF_CACHING | DBG_DSF_UPDATE | DBG_DSF_INSERT | DBG_DSF_DELETE) ) { dbg_ds_log(dw->dbg, dw, "%s: %s dirty value (%zu byte%s) key=%s", G_STRFUNC, value->absent ? "deleting" : "flushing", dval.len, plural(dval.len), dbg_ds_keystr(dw->dbg, key, (size_t) -1)); } dw->ioerr = FALSE; ok = value->absent ? dbmap_remove(dw->dm, key) : dbmap_insert(dw->dm, key, dval); if (ok) { value->dirty = FALSE; } else if (dbmap_has_ioerr(dw->dm)) { dw->ioerr = TRUE; dw->error = errno; s_warning("DBMW \"%s\" I/O error whilst %s dirty entry: %s", dw->name, value->absent ? "deleting" : "flushing", dbmap_strerror(dw->dm)); } else { s_warning("DBMW \"%s\" error whilst %s dirty entry: %s", dw->name, value->absent ? "deleting" : "flushing", dbmap_strerror(dw->dm)); } return ok; }
/** * Route query hits from one node to the other. */ void dh_route(gnutella_node_t *src, gnutella_node_t *dest, int count) { pmsg_t *mb; struct dh_pmsg_info *pmi; const struct guid *muid; dqhit_t *dh; mqueue_t *mq; g_assert( gnutella_header_get_function(&src->header) == GTA_MSG_SEARCH_RESULTS); g_assert(count >= 0); if (!NODE_IS_WRITABLE(dest)) goto drop_shutdown; muid = gnutella_header_get_muid(&src->header); dh = dh_locate(muid); g_assert(dh != NULL); /* Must have called dh_got_results() first! */ if (GNET_PROPERTY(dh_debug) > 19) { g_debug("DH #%s got %d hit%s: " "msg=%u, hits_recv=%u, hits_sent=%u, hits_queued=%u", guid_hex_str(muid), count, plural(count), dh->msg_recv, dh->hits_recv, dh->hits_sent, dh->hits_queued); } mq = dest->outq; /* * Can we forward the message? */ switch (dh_can_forward(dh, mq, FALSE)) { case DH_DROP_FC: goto drop_flow_control; case DH_DROP_THROTTLE: goto drop_throttle; case DH_DROP_TRANSIENT: goto drop_transient; case DH_FORWARD: default: break; } /* * Allow message through. */ WALLOC(pmi); pmi->hits = count; dh->hits_queued += count; dh->msg_queued++; g_assert(dh->hits_queued >= UNSIGNED(count)); /* * Magic: we create an extended version of a pmsg_t that contains a * free routine, which will be invoked when the message queue frees * the message. * * This enables us to track how much results we already queued/sent. */ if (NODE_IS_UDP(dest)) { gnet_host_t to; pmsg_t *mbe; gnet_host_set(&to, dest->addr, dest->port); /* * With GUESS we may route back a query hit to an UDP node. */ if (GNET_PROPERTY(guess_server_debug) > 19) { g_debug("GUESS sending %d hit%s (%s) for #%s to %s", count, plural(count), NODE_CAN_SR_UDP(dest) ? "reliably" : NODE_CAN_INFLATE(dest) ? "possibly deflated" : "uncompressed", guid_hex_str(muid), node_infostr(dest)); } /* * Attempt to compress query hit if the destination supports it. * * If we're going to send the hit using semi-reliable UDP, there's * no need to compress beforehand, since the transport layer will * attempt its own compression anyway. */ if (!NODE_CAN_SR_UDP(dest) && NODE_CAN_INFLATE(dest)) { mb = gmsg_split_to_deflated_pmsg(&src->header, src->data, src->size + GTA_HEADER_SIZE); if (gnutella_header_get_ttl(pmsg_start(mb)) & GTA_UDP_DEFLATED) gnet_stats_inc_general(GNR_UDP_TX_COMPRESSED); } else { mb = gmsg_split_to_pmsg(&src->header, src->data, src->size + GTA_HEADER_SIZE); } mbe = pmsg_clone_extend(mb, dh_pmsg_free, pmi); pmsg_free(mb); if (NODE_CAN_SR_UDP(dest)) pmsg_mark_reliable(mbe); mq_udp_putq(mq, mbe, &to); } else { mb = gmsg_split_to_pmsg_extend(&src->header, src->data, src->size + GTA_HEADER_SIZE, dh_pmsg_free, pmi); mq_tcp_putq(mq, mb, src); if (GNET_PROPERTY(dh_debug) > 19) { g_debug("DH enqueued %d hit%s for #%s to %s", count, plural(count), guid_hex_str(muid), node_infostr(dest)); } } return; drop_shutdown: gnet_stats_count_dropped(src, MSG_DROP_SHUTDOWN); return; drop_flow_control: gnet_stats_count_dropped(src, MSG_DROP_FLOW_CONTROL); gnet_stats_count_flowc(&src->header, TRUE); return; drop_throttle: gnet_stats_count_dropped(src, MSG_DROP_THROTTLE); return; drop_transient: gnet_stats_count_dropped(src, MSG_DROP_TRANSIENT); return; }
/** * Decides if the queue can send a message. Currently use simple fixed * time base heuristics. May add bursty control later... */ void sq_process(squeue_t *sq, time_t now) { time_delta_t spacing = GNET_PROPERTY(search_queue_spacing); GList *item; smsg_t *sb; struct gnutella_node *n; bool sent; g_assert(sq->node == NULL || sq->node->outq != NULL); retry: /* * We don't need to do anything if either: * * 1. The queue is empty. * 2. We sent our last search less than "search_queue_spacing" seconds ago. * 3. We never got a packet from that node. * 4. The node activated hops-flow to shut all queries * 5. We activated flow-control on the node locally. * * --RAM, 01/05/2002 */ if (sq->count == 0) return; if (delta_time(now, sq->last_sent) < spacing) return; n = sq->node; /* Will be NULL for the global SQ */ if (n != NULL) { if (n->received == 0) /* RX = 0, wait for handshaking ping */ return; if (!node_query_hops_ok(n, 0)) /* Cannot send hops=0 query */ return; if (!NODE_IS_WRITABLE(n)) return; if (NODE_IN_TX_FLOW_CONTROL(n)) /* Don't add to the mqueue yet */ return; } else { /* * Processing the global SQ. */ if (settings_is_leaf()) return; if (3*UNSIGNED(node_keep_missing()) > 2*GNET_PROPERTY(up_connections)) return; /* Not enough nodes for querying */ } /* * Queue is managed as a LIFO: we extract the first message, i.e. the last * one enqueued, and pass it along to the node's message queue. */ g_assert(sq->searches); item = g_list_first(sq->searches); sb = item->data; g_assert(sq->count > 0); sq->count--; sent = TRUE; /* Assume we're going to send/initiate it */ if (n == NULL) { g_assert(sb->qhv != NULL); /* Enqueued via sq_global_putq() */ if (GNET_PROPERTY(sq_debug) > 2) g_debug("sq GLOBAL, queuing \"%s\" (%u left, %d sent)", gnutella_msg_search_get_text(pmsg_start(sb->mb)), sq->count, sq->n_sent); dq_launch_local(sb->shandle, sb->mb, sb->qhv); } else if (search_query_allowed(sb->shandle)) { /* * Must log before sending, in case the queue discards the message * buffer immediately. */ g_assert(sb->qhv == NULL); /* Enqueued via sq_putq() */ if (GNET_PROPERTY(sq_debug) > 2) g_debug("sq for node %s, queuing \"%s\" (%u left, %d sent)", node_addr(n), gnutella_msg_search_get_text(pmsg_start(sb->mb)), sq->count, sq->n_sent); /* * If we're a leaf node, we're doing a leaf-guided dynamic query. * In order to be able to report hits we get to the UPs to whom * we sent our searches, we need to be notified of all the physical * queries that go out. */ if (settings_is_leaf()) smsg_mutate(sb, n); mq_tcp_putq(n->outq, sb->mb, NULL); } else { if (GNET_PROPERTY(sq_debug) > 4) g_debug("sq for node %s, ignored \"%s\" (%u left, %d sent)", node_addr(n), gnutella_msg_search_get_text(pmsg_start(sb->mb)), sq->count, sq->n_sent); pmsg_free(sb->mb); if (sb->qhv) qhvec_free(sb->qhv); sent = FALSE; } if (sent) { sq->n_sent++; sq->last_sent = now; } sqh_remove(sq, sb->shandle); smsg_free(sb); sq->searches = g_list_remove_link(sq->searches, item); g_list_free_1(item); /* * If we ignored the query, retry with the next in the queue. * We don't use a do/while() loop to avoid identing the whole body. */ if (!sent) goto retry; }