/** * Dump relayed or locally-emitted packet. * If ``from'' is NULL, packet was emitted locally. */ static void dump_packet_from_to(struct dump *dump, const struct gnutella_node *from, const struct gnutella_node *to, const pmsg_t *mb) { struct dump_header dh_to; struct dump_header dh_from; g_assert(to != NULL); g_assert(mb != NULL); g_assert(pmsg_read_base(mb) == pmsg_start(mb)); if (!dump_initialize(dump)) return; /* * This is only for Gnutella packets, leave DHT messages out. */ if (GTA_MSG_DHT == gnutella_header_get_function(pmsg_start(mb))) return; if (!ipset_contains_addr(&dump_tx_to_addrs, to->addr, TRUE)) return; if (NULL == from) { struct gnutella_node local; local.peermode = NODE_IS_UDP(to) ? NODE_P_UDP : NODE_P_NORMAL; local.addr = listen_addr(); local.port = GNET_PROPERTY(listen_port); if (!ipset_contains_addr(&dump_tx_from_addrs, local.addr, TRUE)) return; dump_header_set(&dh_from, &local); } else { if (!ipset_contains_addr(&dump_tx_from_addrs, from->addr, TRUE)) return; dump_header_set(&dh_from, from); } dump_header_set(&dh_to, to); dh_to.data[0] |= DH_F_TO; if (pmsg_prio(mb) != PMSG_P_DATA) dh_to.data[0] |= DH_F_CTRL; dump_append(dump, dh_to.data, sizeof dh_to.data); dump_append(dump, dh_from.data, sizeof dh_from.data); dump_append(dump, pmsg_read_base(mb), pmsg_size(mb)); dump_flush(dump); }
/** * Send message (eslist iterator callback). * * @return TRUE if message was sent and freed up. */ static bool udp_tx_desc_send(void *data, void *udata) { struct udp_tx_desc *txd = data; udp_sched_t *us = udata; unsigned prio; udp_sched_check(us); udp_tx_desc_check(txd); if (us->used_all) return FALSE; /* * Avoid flushing consecutive queued messages to the same destination, * for regular (non-prioritary) messages. * * This serves two purposes: * * 1- It makes sure one single host does not capture all the available * outgoing bandwidth. * * 2- It somehow delays consecutive packets to a given host thereby reducing * flooding and hopefully avoiding saturation of its RX flow. */ prio = pmsg_prio(txd->mb); if (PMSG_P_DATA == prio && hset_contains(us->seen, txd->to)) { udp_sched_log(2, "%p: skipping mb=%p (%d bytes) to %s", us, txd->mb, pmsg_size(txd->mb), gnet_host_to_string(txd->to)); return FALSE; } if (udp_sched_mb_sendto(us, txd->mb, txd->to, txd->tx, txd->cb)) { if (PMSG_P_DATA == prio && pmsg_was_sent(txd->mb)) hset_insert(us->seen, atom_host_get(txd->to)); } else { return FALSE; /* Unsent, leave it in the queue */ } us->buffered = size_saturate_sub(us->buffered, pmsg_size(txd->mb)); udp_tx_desc_flag_release(txd, us); return TRUE; }
/** * Remove expired messages (eslist iterator). * * @return TRUE if message has expired and was freed up. */ static bool udp_tx_desc_expired(void *data, void *udata) { struct udp_tx_desc *txd = data; udp_sched_t *us = udata; udp_sched_check(us); udp_tx_desc_check(txd); if (delta_time(tm_time(), txd->expire) > 0) { udp_sched_log(1, "%p: expiring mb=%p (%d bytes) prio=%u", us, txd->mb, pmsg_size(txd->mb), pmsg_prio(txd->mb)); if (txd->cb->add_tx_dropped != NULL) (*txd->cb->add_tx_dropped)(txd->tx->owner, 1); /* Dropped in TX */ return udp_tx_desc_drop(data, udata); /* Returns TRUE */ } return FALSE; }
/** * Send datagram. * * @param us the UDP scheduler responsible for sending the datagram * @param mb the message to send * @param to the IP:port destination of the message * @param tx the TX stack sending the message * @param cb callback actions on the datagram * * @return 0 if message was unsent, length of message if sent, queued or * dropped. */ size_t udp_sched_send(udp_sched_t *us, pmsg_t *mb, const gnet_host_t *to, const txdrv_t *tx, const struct tx_dgram_cb *cb) { int len; struct udp_tx_desc *txd; uint prio; len = pmsg_size(mb); /* * Try to send immediately if we have bandwidth. */ if (!us->used_all && udp_sched_mb_sendto(us, mb, to, tx, cb)) return len; /* Message "sent" */ /* * If we already have enough data enqueued, flow-control the upper * layer by acting as if we do not have enough bandwidth. * * However, we now always accept traffic sent with the highest priority * since it is important to send those as soon as possible, i.e. ahead * of any other pending data we would otherwise flush locally before * servicing upper queues. * --RAM, 2012-10-12 */ prio = pmsg_prio(mb); if ( PMSG_P_HIGHEST != prio && us->buffered >= UDP_SCHED_FACTOR * udp_sched_bw_per_second(us) ) { udp_sched_log(1, "%p: flow-controlled", us); us->flow_controlled = TRUE; return 0; /* Flow control upper layers */ } /* * Message is going to be enqueued. * * However, from the upper layers (the message queue in particular), * the message is considered as being sent, and therefore these layers * are going to call pmsg_free() on the message. * * We do not want to pmsg_clone() the message because that would render * uses of pmsg_was_sent() useless in free routines, and upper layers * would think the message was dropped if they installed a free routine * on the message. * * Hence we use pmsg_ref(). */ txd = palloc(us->txpool); txd->magic = UDP_TX_DESC_MAGIC; txd->mb = pmsg_ref(mb); /* Take ownership of message */ txd->to = atom_host_get(to); txd->tx = tx; txd->cb = cb; txd->expire = time_advance(tm_time(), UDP_SCHED_EXPIRE); udp_sched_log(4, "%p: queuing mb=%p (%d bytes) prio=%u", us, mb, pmsg_size(mb), pmsg_prio(mb)); /* * The queue used is a LIFO to avoid buffering delaying all the messages. * Since UDP traffic is unordered, it's better to send the most recent * datagrams first, to reduce the perceived average latency. */ g_assert(prio < N_ITEMS(us->lifo)); eslist_prepend(&us->lifo[prio], txd); us->buffered = size_saturate_add(us->buffered, len); return len; /* Message queued, but tell upper layers it's sent */ }
/** * Send message block to IP:port. * * @param us the UDP scheduler * @param mb the message to send * @param to the IP:port destination of the message * @param tx the TX stack sending the message * @param cb callback actions on the datagram * * @return TRUE if message was sent or dropped, FALSE if there is no more * bandwidth to send anything. */ static bool udp_sched_mb_sendto(udp_sched_t *us, pmsg_t *mb, const gnet_host_t *to, const txdrv_t *tx, const struct tx_dgram_cb *cb) { ssize_t r; int len = pmsg_size(mb); bio_source_t *bio = NULL; if (0 == gnet_host_get_port(to)) return TRUE; /* * Check whether message still needs to be sent. */ if (!pmsg_hook_check(mb)) return TRUE; /* Dropped */ /* * Select the proper I/O source depending on the network address type. */ switch (gnet_host_get_net(to)) { case NET_TYPE_IPV4: bio = us->bio[UDP_SCHED_IPv4]; break; case NET_TYPE_IPV6: bio = us->bio[UDP_SCHED_IPv6]; break; case NET_TYPE_NONE: case NET_TYPE_LOCAL: g_assert_not_reached(); } /* * If there is no I/O source, then the socket to send that type of traffic * was cleared, hence we simply need to discard the message. */ if (NULL == bio) { udp_sched_log(4, "%p: discarding mb=%p (%d bytes) to %s", us, mb, pmsg_size(mb), gnet_host_to_string(to)); return udp_tx_drop(tx, cb); /* TRUE, for "sent" */ } /* * OK, proceed if we have bandwidth. */ r = bio_sendto(bio, to, pmsg_start(mb), len); if (r < 0) { /* Error, or no bandwidth */ if (udp_sched_write_error(us, to, mb, G_STRFUNC)) { udp_sched_log(4, "%p: dropped mb=%p (%d bytes): %m", us, mb, pmsg_size(mb)); return udp_tx_drop(tx, cb); /* TRUE, for "sent" */ } udp_sched_log(3, "%p: no bandwidth for mb=%p (%d bytes)", us, mb, pmsg_size(mb)); us->used_all = TRUE; return FALSE; } if (r != len) { g_warning("%s: partial UDP write (%zd bytes) to %s " "for %d-byte datagram", G_STRFUNC, r, gnet_host_to_string(to), len); } else { udp_sched_log(5, "%p: sent mb=%p (%d bytes) prio=%u", us, mb, pmsg_size(mb), pmsg_prio(mb)); pmsg_mark_sent(mb); if (cb->msg_account != NULL) (*cb->msg_account)(tx->owner, mb); inet_udp_record_sent(gnet_host_get_addr(to)); } return TRUE; /* Message sent */ }