/** * Compare function which returns TRUE if the host addresses are equivalent. */ bool gnet_host_addr_equiv(const void *v1, const void *v2) { const gnet_host_t *h1 = v1, *h2 = v2; return host_addr_equiv(gnet_host_get_addr(h1), gnet_host_get_addr(h2)); }
/** * Compare function which returns TRUE if the hosts are equal. * * @note For use in hash tables and sets. */ bool gnet_host_equal(const void *v1, const void *v2) { const gnet_host_t *h1 = v1, *h2 = v2; return gnet_host_get_port(h1) == gnet_host_get_port(h2) && host_addr_equal(gnet_host_get_addr(h1), gnet_host_get_addr(h2)); }
/** * Creates a new THEX upload context. The context must be freed with * thex_upload_close(). * * @param owner the owner of the TX stack (the upload) * @param host the host to which we're talking to * @param writable no document * @param link_cb callbacks for the link layer * @param wio no document * @param flags opening flags * * @return An initialized THEX upload context. */ struct special_upload * thex_upload_open( void *owner, const struct gnutella_host *host, const shared_file_t *sf, special_upload_writable_t writable, const struct tx_link_cb *link_cb, struct wrap_io *wio, int flags) { struct thex_upload *ctx; WALLOC(ctx); ctx->special_upload.read = thex_upload_read; ctx->special_upload.write = thex_upload_write; ctx->special_upload.flush = thex_upload_flush; ctx->special_upload.close = thex_upload_close; ctx->tth = atom_tth_get(shared_file_tth(sf)); ctx->filesize = shared_file_size(sf); ctx->data = NULL; ctx->size = 0; ctx->offset = 0; ctx->state = THEX_STATE_INITIAL; /* * Instantiate the TX stack. */ { struct tx_link_args args; args.cb = link_cb; args.wio = wio; args.bws = bsched_out_select_by_addr(gnet_host_get_addr(host)); ctx->tx = tx_make(owner, host, tx_link_get_ops(), &args); } if (flags & THEX_UPLOAD_F_CHUNKED) { ctx->tx = tx_make_above(ctx->tx, tx_chunk_get_ops(), 0); } /* * Put stack in "eager" mode: we want to be notified whenever * we can write something. */ tx_srv_register(ctx->tx, writable, owner); tx_eager_mode(ctx->tx, TRUE); /* * Update statistics. */ gnet_prop_incr_guint32(PROP_THEX_FILES_REQUESTED); return &ctx->special_upload; }
/** * Prepare reception of query hit data by building an appropriate RX stack. * * @return TRUE if we may continue with the download, FALSE if the search * was already closed in the GUI. */ gboolean browse_host_dl_receive( struct browse_ctx *bc, gnet_host_t *host, wrap_io_t *wio, const char *vendor, guint32 flags) { g_assert(bc != NULL); if (bc->closed) return FALSE; gnet_host_copy(&bc->host, host); bc->vendor = atom_str_get(vendor); /* * Freeing of the RX stack must be asynchronous: each time we establish * a new connection, dismantle the previous stack. Otherwise the RX * stack will be freed when the corresponding download structure is * reclaimed. */ if (bc->rx != NULL) { rx_free(bc->rx); bc->rx = NULL; } { struct rx_link_args args; args.cb = &browse_rx_link_cb; args.bws = bsched_in_select_by_addr(gnet_host_get_addr(&bc->host)); args.wio = wio; bc->rx = rx_make(bc, &bc->host, rx_link_get_ops(), &args); } if (flags & BH_DL_CHUNKED) { struct rx_chunk_args args; args.cb = &browse_rx_chunk_cb; bc->rx = rx_make_above(bc->rx, rx_chunk_get_ops(), &args); } if (flags & BH_DL_INFLATE) { struct rx_inflate_args args; args.cb = &browse_rx_inflate_cb; bc->rx = rx_make_above(bc->rx, rx_inflate_get_ops(), &args); } rx_set_data_ind(bc->rx, browse_data_ind); rx_enable(bc->rx); return TRUE; }
/** * Alternative hash function for use in hash table and sets. */ uint G_HOT gnet_host_hash2(const void *key) { const gnet_host_t *host = key; host_addr_t addr; uint16 port; addr = gnet_host_get_addr(host); port = gnet_host_get_port(host); return host_addr_port_hash2(addr, port); }
/** * Compute maximum delay before we can issue an RPC to the specified host. * * @param host the host with whom we want to issue an RPC * @param type the type of RPC message we wish to send * * @return 0 if we can launch the RPC, the amount of seconds to wait * (conservative) if there is already a similar RPC pending. The amount * is conservative in the sense that it is the time up to the final * timeout, but if a reply comes back, we could launch it earlier. */ time_delta_t g2_rpc_launch_delay(const gnet_host_t *host, enum g2_msg type) { struct g2_rpc_key key; struct g2_rpc *gr; key.type = type; key.addr = gnet_host_get_addr(host); gr = hevset_lookup(g2_rpc_pending, &key); if (NULL == gr) return 0; /* Can issue RPC immediately */ return cq_remaining(gr->timeout_ev) / 1000; /* Seconds */ }
/** * Dump locally-emitted message block sent via UDP. */ void dump_tx_udp_packet(const gnet_host_t *to, const pmsg_t *mb) { if (GNET_PROPERTY(dump_transmitted_gnutella_packets)) { struct gnutella_node udp; g_assert(to != NULL); g_assert(mb != NULL); /* * Fill only the fields which will be perused by * dump_packet_from_to(). */ udp.peermode = NODE_P_UDP; udp.addr = gnet_host_get_addr(to); udp.port = gnet_host_get_port(to); dump_packet_from_to(&dump_tx, NULL, &udp, mb); } else if (dump_tx.initialized) { dump_disable(&dump_tx); } }
/** * Send buffer datagram to specified destination `to'. * * @returns amount of bytes written, or -1 on error with errno set. */ static ssize_t tx_dgram_sendto(txdrv_t *tx, const gnet_host_t *to, gconstpointer data, size_t len) { ssize_t r; struct attr *attr = tx->opaque; if (gnet_host_get_port(to) > 0) { r = bio_sendto(attr->bio, to, data, len); } else { errno = EINVAL; r = -1; } if ((ssize_t) -1 == r) return tx_dgram_write_error(tx, to, "tx_dgram_sendto"); if (attr->cb->add_tx_written != NULL) attr->cb->add_tx_written(tx->owner, r); inet_udp_record_sent(gnet_host_get_addr(to)); return r; }
/* * @return stringified host vector as newly allocated string via halloc() */ char * gnet_host_vec_to_string(const gnet_host_vec_t *hvec) { str_t *s; uint i, n; g_return_val_if_fail(hvec, NULL); s = str_new(0); n = gnet_host_vec_count(hvec); for (i = 0; i < n; i++) { gnet_host_t host; gchar buf[128]; if (i > 0) { STR_CAT(s, ", "); } host = gnet_host_vec_get(hvec, i); host_addr_port_to_string_buf(gnet_host_get_addr(&host), gnet_host_get_port(&host), buf, sizeof buf); str_cat(s, buf); } return str_s2c_null(&s); }
/** * Periodic host heartbeat timer. */ void host_timer(void) { guint count; int missing; host_addr_t addr; guint16 port; host_type_t htype; guint max_nodes; gboolean empty_cache = FALSE; if (in_shutdown || !GNET_PROPERTY(online_mode)) return; max_nodes = settings_is_leaf() ? GNET_PROPERTY(max_ultrapeers) : GNET_PROPERTY(max_connections); count = node_count(); /* Established + connecting */ missing = node_keep_missing(); if (GNET_PROPERTY(host_debug) > 1) g_debug("host_timer - count %u, missing %u", count, missing); /* * If we are not connected to the Internet, apparently, make sure to * connect to at most one host, to avoid using all our hostcache. * Also, we don't connect each time we are called. */ if (!GNET_PROPERTY(is_inet_connected)) { static time_t last_try; if (last_try && delta_time(tm_time(), last_try) < 20) return; last_try = tm_time(); if (GNET_PROPERTY(host_debug)) g_debug("host_timer - not connected, trying to connect"); } /* * Allow more outgoing connections than the maximum amount of * established Gnet connection we can maintain, but not more * than quick_connect_pool_size This is the "greedy mode". */ if (count >= GNET_PROPERTY(quick_connect_pool_size)) { if (GNET_PROPERTY(host_debug) > 1) g_debug("host_timer - count %u >= pool size %u", count, GNET_PROPERTY(quick_connect_pool_size)); return; } if (count < max_nodes) missing -= whitelist_connect(); /* * If we are under the number of connections wanted, we add hosts * to the connection list */ htype = HOST_ULTRA; if ( settings_is_ultra() && GNET_PROPERTY(node_normal_count) < GNET_PROPERTY(normal_connections) && GNET_PROPERTY(node_ultra_count) >= (GNET_PROPERTY(up_connections) - GNET_PROPERTY(normal_connections)) ) { htype = HOST_ANY; } if (hcache_size(htype) == 0) htype = HOST_ANY; if (hcache_size(htype) == 0) empty_cache = TRUE; if (GNET_PROPERTY(host_debug) && missing > 0) g_debug("host_timer - missing %d host%s%s", missing, missing == 1 ? "" : "s", empty_cache ? " [empty caches]" : ""); if (!GNET_PROPERTY(stop_host_get)) { if (missing > 0) { static time_t last_try; unsigned fan, max_pool, to_add; max_pool = MAX(GNET_PROPERTY(quick_connect_pool_size), max_nodes); fan = (missing * GNET_PROPERTY(quick_connect_pool_size))/ max_pool; fan = MAX(1, fan); to_add = GNET_PROPERTY(is_inet_connected) ? fan : (guint) missing; /* * Every so many calls, attempt to ping all our neighbours to * get fresh pongs, in case our host cache is not containing * sufficiently fresh hosts and we keep getting connection failures. */ if ( 0 == last_try || delta_time(tm_time(), last_try) >= HOST_PINGING_PERIOD ) { ping_all_neighbours(); last_try = tm_time(); } /* * Make sure that we never use more connections then the * quick pool or the maximum number of hosts allow. */ if (to_add + count > max_pool) to_add = max_pool - count; if (GNET_PROPERTY(host_debug) > 2) { g_debug("host_timer - connecting - " "add: %d fan:%d miss:%d max_hosts:%d count:%d extra:%d", to_add, fan, missing, max_nodes, count, GNET_PROPERTY(quick_connect_pool_size)); } missing = to_add; if (missing > 0 && (0 == connected_nodes() || host_low_on_pongs)) { gnet_host_t host[HOST_DHT_MAX]; int hcount; int i; hcount = dht_fill_random(host, MIN(UNSIGNED(missing), G_N_ELEMENTS(host))); missing -= hcount; for (i = 0; i < hcount; i++) { addr = gnet_host_get_addr(&host[i]); port = gnet_host_get_port(&host[i]); if (!hcache_node_is_bad(addr)) { if (GNET_PROPERTY(host_debug) > 3) { g_debug("host_timer - UHC pinging and connecting " "to DHT node at %s", host_addr_port_to_string(addr, port)); } /* Try to use the host as an UHC before connecting */ udp_send_ping(NULL, addr, port, TRUE); if (!host_gnutella_connect(addr, port)) { missing++; /* Did not use entry */ } } else { missing++; /* Did not use entry */ } } } while (hcache_size(htype) && missing-- > 0) { if (hcache_get_caught(htype, &addr, &port)) { if (!(hostiles_check(addr) || hcache_node_is_bad(addr))) { if (!host_gnutella_connect(addr, port)) { missing++; /* Did not use entry */ } } else { missing++; /* Did not use entry */ } } } if (missing > 0 && (empty_cache || host_cache_allow_bypass())) { if (!uhc_is_waiting()) { if (GNET_PROPERTY(host_debug)) g_debug("host_timer - querying UDP host cache"); uhc_get_hosts(); /* Get new hosts from UHCs */ } } } } else if (GNET_PROPERTY(use_netmasks)) { /* Try to find better hosts */ if (hcache_find_nearby(htype, &addr, &port)) { if (node_remove_worst(TRUE)) node_add(addr, port, 0); else hcache_add_caught(htype, addr, port, "nearby host"); } } }
/** * Creates a new browse host context. The context must be freed with * browse_host_close(). * * @param owner the owner of the TX stack (the upload) * @param host the host to which we're talking to * @param writable no document * @param deflate_cb callbacks for the deflate layer * @param link_cb callbacks for the link layer * @param wio no document * @param flags opening flags * * @return An initialized browse host context. */ struct special_upload * browse_host_open( void *owner, struct gnutella_host *host, special_upload_writable_t writable, const struct tx_deflate_cb *deflate_cb, const struct tx_link_cb *link_cb, struct wrap_io *wio, int flags) { struct browse_host_upload *bh; /* BH_HTML xor BH_QHITS set */ g_assert(flags & (BH_F_HTML|BH_F_QHITS)); g_assert((flags & (BH_F_HTML|BH_F_QHITS)) != (BH_F_HTML|BH_F_QHITS)); WALLOC(bh); bh->special.read = (flags & BH_F_HTML) ? browse_host_read_html : browse_host_read_qhits; bh->special.write = browse_host_write; bh->special.flush = browse_host_flush; bh->special.close = browse_host_close; browse_host_next_state(bh, BH_STATE_HEADER); bh->hits = NULL; bh->file_index = 0; bh->flags = flags; /* * Instantiate the TX stack. */ { struct tx_link_args args; args.cb = link_cb; args.wio = wio; args.bws = bsched_out_select_by_addr(gnet_host_get_addr(host)); bh->tx = tx_make(owner, host, tx_link_get_ops(), &args); } if (flags & BH_F_CHUNKED) { bh->tx = tx_make_above(bh->tx, tx_chunk_get_ops(), 0); } if (flags & (BH_F_DEFLATE | BH_F_GZIP)) { struct tx_deflate_args args; txdrv_t *tx; args.cq = cq_main(); args.cb = deflate_cb; args.nagle = FALSE; args.reduced = FALSE; args.gzip = 0 != (flags & BH_F_GZIP); args.buffer_flush = INT_MAX; /* Flush only at the end */ args.buffer_size = BH_BUFSIZ; tx = tx_make_above(bh->tx, tx_deflate_get_ops(), &args); if (tx == NULL) { tx_free(bh->tx); link_cb->eof_remove(owner, "Cannot setup compressing TX stack"); WFREE(bh); return NULL; } bh->tx = tx; } /* * Put stack in "eager" mode: we want to be notified whenever * we can write something. */ tx_srv_register(bh->tx, writable, owner); tx_eager_mode(bh->tx, TRUE); /* * Update statistics. */ if (flags & BH_F_HTML) { gnet_prop_incr_guint32(PROP_HTML_BROWSE_COUNT); } else if (flags & BH_F_QHITS) { gnet_prop_incr_guint32(PROP_QHITS_BROWSE_COUNT); } return &bh->special; }
/** * Add file to the current query hit. * * @return TRUE if we kept the file, FALSE if we did not include it in the hit. */ static bool g2_build_qh2_add(struct g2_qh2_builder *ctx, const shared_file_t *sf) { const sha1_t *sha1; g2_tree_t *h, *c; shared_file_check(sf); /* * Make sure the file is still in the library. */ if (0 == shared_file_index(sf)) return FALSE; /* * On G2, the H/URN child is required, meaning we need the SHA1 at least. */ if (!sha1_hash_available(sf)) return FALSE; /* * Do not send duplicates, as determined by the SHA1 of the resource. * * A user may share several files with different names but the same SHA1, * and if all of them are hits, we only want to send one instance. * * When generating hits for host-browsing, we do not care about duplicates * and ctx->hs is NULL then. */ sha1 = shared_file_sha1(sf); /* This is an atom */ if (ctx->hs != NULL) { if (hset_contains(ctx->hs, sha1)) return FALSE; hset_insert(ctx->hs, sha1); } /* * Create the "H" child and attach it to the current tree. */ if (NULL == ctx->t) g2_build_qh2_start(ctx); h = g2_tree_alloc_empty("H"); g2_tree_add_child(ctx->t, h); /* * URN -- Universal Resource Name * * If there is a known TTH, then we can generate a bitprint, otherwise * we just convey the SHA1. */ { const tth_t * const tth = shared_file_tth(sf); char payload[SHA1_RAW_SIZE + TTH_RAW_SIZE + sizeof G2_URN_BITPRINT]; char *p = payload; if (NULL == tth) { p = mempcpy(p, G2_URN_SHA1, sizeof G2_URN_SHA1); p += clamp_memcpy(p, sizeof payload - ptr_diff(p, payload), sha1, SHA1_RAW_SIZE); } else { p = mempcpy(p, G2_URN_BITPRINT, sizeof G2_URN_BITPRINT); p += clamp_memcpy(p, sizeof payload - ptr_diff(p, payload), sha1, SHA1_RAW_SIZE); p += clamp_memcpy(p, sizeof payload - ptr_diff(p, payload), tth, TTH_RAW_SIZE); } g_assert(ptr_diff(p, payload) <= sizeof payload); c = g2_tree_alloc_copy("URN", payload, ptr_diff(p, payload)); g2_tree_add_child(h, c); } /* * URL -- empty to indicate that we share the file via uri-res. */ if (ctx->flags & QHIT_F_G2_URL) { uint known; uint16 csc; c = g2_tree_alloc_empty("URL"); g2_tree_add_child(h, c); /* * CSC -- if we know alternate sources, indicate how many in "CSC". * * This child is only emitted when they requested "URL". */ known = dmesh_count(sha1); csc = MIN(known, MAX_INT_VAL(uint16)); if (csc != 0) { char payload[2]; poke_le16(payload, csc); c = g2_tree_alloc_copy("CSC", payload, sizeof payload); g2_tree_add_child(h, c); } /* * PART -- if we only have a partial file, indicate how much we have. * * This child is only emitted when they requested "URL". */ if (shared_file_is_partial(sf) && !shared_file_is_finished(sf)) { filesize_t available = shared_file_available(sf); char payload[8]; /* If we have to encode file size as 64-bit */ uint32 av32; time_t mtime = shared_file_modification_time(sf); c = g2_tree_alloc_empty("PART"); g2_tree_add_child(h, c); av32 = available; if (av32 == available) { /* Fits within a 32-bit quantity */ poke_le32(payload, av32); g2_tree_set_payload(c, payload, sizeof av32, TRUE); } else { /* Encode as a 64-bit quantity then */ poke_le64(payload, available); g2_tree_set_payload(c, payload, sizeof payload, TRUE); } /* * GTKG extension: encode the last modification time of the * partial file in an "MT" child. This lets the other party * determine whether the host is still able to actively complete * the file. */ poke_le32(payload, (uint32) mtime); g2_tree_add_child(c, g2_tree_alloc_copy("MT", payload, sizeof(uint32))); } /* * CT -- creation time of the resource (GTKG extension). */ { time_t create_time = shared_file_creation_time(sf); if ((time_t) -1 != create_time) { char payload[8]; int n; create_time = MAX(0, create_time); n = vlint_encode(create_time, payload); g2_tree_add_child(h, g2_tree_alloc_copy("CT", payload, n)); /* No trailing 0s */ } } } /* * DN -- distinguished name. * * Note that the presence of DN also governs the presence of SZ if the * file length does not fit a 32-bit unsigned quantity. */ if (ctx->flags & QHIT_F_G2_DN) { char payload[8]; /* If we have to encode file size as 64-bit */ uint32 fs32; filesize_t fs = shared_file_size(sf); const char *name; const char *rp; c = g2_tree_alloc_empty("DN"); fs32 = fs; if (fs32 == fs) { /* Fits within a 32-bit quantity */ poke_le32(payload, fs32); g2_tree_set_payload(c, payload, sizeof fs32, TRUE); } else { /* Does not fit a 32-bit quantity, emit a SZ child */ poke_le64(payload, fs); g2_tree_add_child(h, g2_tree_alloc_copy("SZ", payload, sizeof payload)); } name = shared_file_name_nfc(sf); g2_tree_append_payload(c, name, shared_file_name_nfc_len(sf)); g2_tree_add_child(h, c); /* * GTKG extension: if there is a file path, expose it as a "P" child * under the DN node. */ rp = shared_file_relative_path(sf); if (rp != NULL) { g2_tree_add_child(c, g2_tree_alloc_copy("P", rp, strlen(rp))); } } /* * GTKG extension: if they requested alt-locs in the /Q2/I with "A", then * send them some known alt-locs in an "ALT" child. * * Note that these alt-locs can be for Gnutella hosts: since both Gnutella * and G2 share a common HTTP-based file transfer mechanism with compatible * extra headers, there is no need to handle them separately. */ if (ctx->flags & QHIT_F_G2_ALT) { gnet_host_t hvec[G2_BUILD_QH2_MAX_ALT]; int hcnt = 0; hcnt = dmesh_fill_alternate(sha1, hvec, N_ITEMS(hvec)); if (hcnt > 0) { int i; c = g2_tree_alloc_empty("ALT"); for (i = 0; i < hcnt; i++) { host_addr_t addr; uint16 port; addr = gnet_host_get_addr(&hvec[i]); port = gnet_host_get_port(&hvec[i]); if (host_addr_is_ipv4(addr)) { char payload[6]; host_ip_port_poke(payload, addr, port, NULL); g2_tree_append_payload(c, payload, sizeof payload); } } /* * If the payload is still empty, then drop the "ALT" child. * Otherwise, attach it to the "H" node. */ if (NULL == g2_tree_node_payload(c, NULL)) { g2_tree_free_null(&c); } else { g2_tree_add_child(h, c); } } } /* * Update the size of the query hit we're generating. */ ctx->current_size += g2_frame_serialize(h, NULL, 0); return TRUE; }
/** * Send message block to IP:port. * * @param us the UDP scheduler * @param mb the message to send * @param to the IP:port destination of the message * @param tx the TX stack sending the message * @param cb callback actions on the datagram * * @return TRUE if message was sent or dropped, FALSE if there is no more * bandwidth to send anything. */ static bool udp_sched_mb_sendto(udp_sched_t *us, pmsg_t *mb, const gnet_host_t *to, const txdrv_t *tx, const struct tx_dgram_cb *cb) { ssize_t r; int len = pmsg_size(mb); bio_source_t *bio = NULL; if (0 == gnet_host_get_port(to)) return TRUE; /* * Check whether message still needs to be sent. */ if (!pmsg_hook_check(mb)) return TRUE; /* Dropped */ /* * Select the proper I/O source depending on the network address type. */ switch (gnet_host_get_net(to)) { case NET_TYPE_IPV4: bio = us->bio[UDP_SCHED_IPv4]; break; case NET_TYPE_IPV6: bio = us->bio[UDP_SCHED_IPv6]; break; case NET_TYPE_NONE: case NET_TYPE_LOCAL: g_assert_not_reached(); } /* * If there is no I/O source, then the socket to send that type of traffic * was cleared, hence we simply need to discard the message. */ if (NULL == bio) { udp_sched_log(4, "%p: discarding mb=%p (%d bytes) to %s", us, mb, pmsg_size(mb), gnet_host_to_string(to)); return udp_tx_drop(tx, cb); /* TRUE, for "sent" */ } /* * OK, proceed if we have bandwidth. */ r = bio_sendto(bio, to, pmsg_start(mb), len); if (r < 0) { /* Error, or no bandwidth */ if (udp_sched_write_error(us, to, mb, G_STRFUNC)) { udp_sched_log(4, "%p: dropped mb=%p (%d bytes): %m", us, mb, pmsg_size(mb)); return udp_tx_drop(tx, cb); /* TRUE, for "sent" */ } udp_sched_log(3, "%p: no bandwidth for mb=%p (%d bytes)", us, mb, pmsg_size(mb)); us->used_all = TRUE; return FALSE; } if (r != len) { g_warning("%s: partial UDP write (%zd bytes) to %s " "for %d-byte datagram", G_STRFUNC, r, gnet_host_to_string(to), len); } else { udp_sched_log(5, "%p: sent mb=%p (%d bytes) prio=%u", us, mb, pmsg_size(mb), pmsg_prio(mb)); pmsg_mark_sent(mb); if (cb->msg_account != NULL) (*cb->msg_account)(tx->owner, mb); inet_udp_record_sent(gnet_host_get_addr(to)); } return TRUE; /* Message sent */ }
/** * Start a G2 RPC with the specified host. * * @param host the host to which message is sent * @param mb the message to send * @param cb if non-NULL, callback to invoke on reply or timeout * @param arg additional callback argument * @param timeout amount of seconds before timeout * * @return TRUE if we initiated the RPC, FALSE if another of the same * kind was already in progress with the host. */ bool g2_rpc_launch(const gnet_host_t *host, pmsg_t *mb, g2_rpc_cb_t cb, void *arg, unsigned timeout) { struct g2_rpc *gr; struct g2_rpc_key key; gnutella_node_t *n; key.type = g2_msg_type_mb(mb); key.addr = gnet_host_get_addr(host); /* * Because there is no MUID in /PI and /QKR messages, we cannot use that * as a key to detect the RPC reply. Therefore, we use the message type * and the IP address of the host. When a /PO or /QKA comes back, we'll * be able to see whether we had a pending RPC from that host for that * type of transaction. * * The downside is that we can only have one pending RPC at a time of * a given kind towards a given IP address. We don't use the port in * the key because we cannot assume the reply will come from the same port * we sent the message to, if the remote host is behind NAT or does not * use its listening UDP socket to reply. */ if (hevset_contains(g2_rpc_pending, &key)) { if (GNET_PROPERTY(g2_rpc_debug)) { g_debug("%s(): cannot issue /%s RPC to %s: concurrent request", G_STRFUNC, g2_msg_type_name(key.type), gnet_host_to_string(host)); } return FALSE; } /* * Make sure the node is valid. */ n = node_udp_g2_get_addr_port(key.addr, gnet_host_get_port(host)); if (NULL == n) { if (GNET_PROPERTY(g2_rpc_debug)) { g_debug("%s(): cannot issue /%s RPC to %s: cannot get G2 node", G_STRFUNC, g2_msg_type_name(key.type), gnet_host_to_string(host)); } return FALSE; /* Invalid node, or G2 disabled */ } /* * Good, we can issue the RPC. */ WALLOC(gr); gr->magic = G2_RPC_MAGIC; gr->key = key; /* struct copy */ gr->cb = cb; gr->arg = arg; gr->timeout_ev = cq_main_insert(timeout * 1000, g2_rpc_timeout, gr); hevset_insert(g2_rpc_pending, gr); if (GNET_PROPERTY(g2_rpc_debug) > 1) { g_debug("%s(): issuing /%s RPC to %s, timeout %u sec%s", G_STRFUNC, g2_msg_type_name(key.type), gnet_host_to_string(host), timeout, plural(timeout)); } /* * Do not send RPCs reliably: this can cause problems if we don't receive * the ACK backm yet the message was received and processed remotely: the * remote host will send a reply back and the message will still appear to * be "unsent" locally. * * Furthermore, this alleviates the need for the remote side to actually * acknowledge the request: targeted hosts can be busy so it's best to * make the RPC "unreliable" to limit processing and bandwidth requirements. */ g2_node_send(n, mb); return TRUE; }
/** * Prepare reception of THEX data by building an appropriate RX stack. * * @return TRUE if we may continue with the download. */ bool thex_download_receive(struct thex_download *ctx, filesize_t content_length, gnet_host_t *host, struct wrap_io *wio, uint32 flags) { g_assert(ctx != NULL); gnet_host_copy(&ctx->host, host); /* * Freeing of the RX stack must be asynchronous: each time we establish * a new connection, dismantle the previous stack. Otherwise the RX * stack will be freed when the corresponding download structure is * reclaimed. */ if (ctx->rx != NULL) { rx_free(ctx->rx); ctx->rx = NULL; } /* * If there is a Content-Length indication in the HTTP reply, it is * supplied here and will be used as a limit of the data we'll read. * * If there was none (for instance if the output is chunked), then 0 * is given and we'll use a hardwired maximum. */ if (content_length > MAX_INT_VAL(size_t)) return FALSE; ctx->max_size = content_length ? (size_t) content_length : THEX_DOWNLOAD_MAX_SIZE; { struct rx_link_args args; args.cb = &thex_rx_link_cb; args.bws = bsched_in_select_by_addr(gnet_host_get_addr(&ctx->host)); args.wio = wio; ctx->rx = rx_make(ctx, &ctx->host, rx_link_get_ops(), &args); } if (flags & THEX_DOWNLOAD_F_CHUNKED) { struct rx_chunk_args args; args.cb = &thex_rx_chunk_cb; ctx->rx = rx_make_above(ctx->rx, rx_chunk_get_ops(), &args); } if (flags & THEX_DOWNLOAD_F_INFLATE) { struct rx_inflate_args args; args.cb = &thex_rx_inflate_cb; ctx->rx = rx_make_above(ctx->rx, rx_inflate_get_ops(), &args); } rx_set_data_ind(ctx->rx, thex_download_data_ind); rx_enable(ctx->rx); return TRUE; }
/** * Create a new Gnutella host vector out of a sequence of gnet_host_t items. */ static gnet_host_vec_t * gnet_host_vec_from_sequence(sequence_t *s) { sequence_iter_t *iter; gnet_host_vec_t *vec; uint n_ipv6 = 0, n_ipv4 = 0, hcnt; if (sequence_is_empty(s)) return NULL; hcnt = 0; iter = sequence_forward_iterator(s); while (sequence_iter_has_next(iter)) { const gnet_host_t *host = sequence_iter_next(iter); switch (gnet_host_get_net(host)) { case NET_TYPE_IPV4: n_ipv4++; hcnt++; break; case NET_TYPE_IPV6: n_ipv6++; hcnt++; break; case NET_TYPE_LOCAL: case NET_TYPE_NONE: break; } } sequence_iterator_release(&iter); if (0 == hcnt) return NULL; vec = gnet_host_vec_alloc(); vec->n_ipv4 = MIN(n_ipv4, 255); vec->n_ipv6 = MIN(n_ipv6, 255); if (vec->n_ipv4 > 0) WALLOC_ARRAY(vec->hvec_v4, vec->n_ipv4); if (vec->n_ipv6 > 0) WALLOC_ARRAY(vec->hvec_v6, vec->n_ipv6); n_ipv4 = 0; n_ipv6 = 0; iter = sequence_forward_iterator(s); while (sequence_iter_has_next(iter)) { const gnet_host_t *host = sequence_iter_next(iter); host_addr_t addr = gnet_host_get_addr(host); uint16 port = gnet_host_get_port(host); switch (gnet_host_get_net(host)) { case NET_TYPE_IPV4: if (n_ipv4 < vec->n_ipv4) { char *dest = cast_to_pointer(&vec->hvec_v4[n_ipv4++]); poke_be32(&dest[0], host_addr_ipv4(addr)); poke_le16(&dest[4], port); } break; case NET_TYPE_IPV6: if (n_ipv6 < vec->n_ipv6) { char *dest = cast_to_pointer(&vec->hvec_v6[n_ipv6++]); dest = mempcpy(dest, host_addr_ipv6(&addr), 16); poke_le16(dest, port); } break; case NET_TYPE_LOCAL: case NET_TYPE_NONE: break; } } sequence_iterator_release(&iter); return vec; }