Exemple #1
0
/**
 * Checks whether an entry exists in the search queue for given search handle.
 */
static bool
sqh_exists(squeue_t *sq, gnet_search_t sh)
{
	g_assert(sq != NULL);

	return hset_contains(sq->handles, uint_to_pointer(sh));
}
Exemple #2
0
/**
 * DBMW foreach iterator to reload keyinfo.
 *
 * @return TRUE if persisted entry can be deleted.
 */
static G_GNUC_COLD bool
reload_ki(void *key, void *value, size_t u_len, void *data)
{
	struct keys_create_context *ctx = data;
	const struct keydata *kd = value;
	const kuid_t *id = key;
	struct keyinfo *ki;
	size_t common;
	time_t next_expire = TIME_T_MAX, last_expire = 0, now = tm_time();
	unsigned i;

	(void) u_len;

	if (0 == kd->values)
		return TRUE;

	/*
	 * Check whether key has expired: if all the values associated with it
	 * have expired, there will be no need to recreate the keyinfo and
	 * retrieve the associated value data.
	 */

	for (i = 0; i < kd->values; i++) {
		last_expire = MAX(last_expire, kd->expire[i]);
	}

	if (delta_time(now, last_expire) >= 0)
		return TRUE;

	/*
	 * We're going to keep at least one of the values, prepare keyinfo.
	 */

	common = kuid_common_prefix(ctx->our_kuid, id);
	ki = allocate_keyinfo(id, common);
	hikset_insert_key(keys, &ki->kuid);

	/*
	 * Values will be inserted later, just prepare the DB keys we need to
	 * load to restore them.
	 */

	for (i = 0; i < kd->values; i++) {
		uint64 dbkey = kd->dbkeys[i];

		if (delta_time(now, kd->expire[i]) >= 0)
			continue;		/* This value has already expired */

		next_expire = MIN(next_expire, kd->expire[i]);

		if (!hset_contains(ctx->dbkeys, &dbkey)) {
			const uint64 *dbatom = atom_uint64_get(&dbkey);
			hset_insert(ctx->dbkeys, dbatom);
		}
	}

	ki->next_expire = next_expire;

	return FALSE;		/* Keep keydata */
}
Exemple #3
0
static inline void
nodes_gui_reverse_lookup_selected_helper(GtkTreeModel *model,
		GtkTreePath *unused_path, GtkTreeIter *iter, gpointer unused_data)
{
	struct node_data *data;
	gnet_node_info_t info;

	(void) unused_path;
	(void) unused_data;

	gtk_tree_model_get(model, iter, 0, &data, (-1));
	g_assert(NULL != find_node(data->node_id));

	if (hset_contains(ht_pending_lookups, data->node_id))
		return;

	guc_node_fill_info(data->node_id, &info);
	g_assert(data->node_id == info.node_id);

	if (!info.is_pseudo) {
		const struct nid *key = nid_ref(data->node_id);

		WFREE_NULL(data->host, data->host_size);
		data->host_size = w_concat_strings(&data->host,
				_("Reverse lookup in progress..."),
				" (", host_addr_port_to_string(info.addr, info.port), ")",
				(void *) 0);

		hset_insert(ht_pending_lookups, key);
		adns_reverse_lookup(info.addr, host_lookup_callback,
			deconstify_gpointer(nid_ref(key)));
	}
	guc_node_clear_info(&info);
}
Exemple #4
0
/**
 * Callback invoked when the node's user-visible flags are changed.
 *
 * This schedules an update of the node information in the gui at the
 * next tick.
 */
static void
nodes_gui_node_flags_changed(const struct nid *node_id)
{
    if (!hset_contains(hs_node_flags_changed, node_id)) {
		const struct nid *key = nid_ref(node_id);
    	hset_insert(hs_node_flags_changed, key);
	}
}
Exemple #5
0
/**
 * Unregister object from the hash table.
 */
static void
cq_unregister_object(hset_t *h, void *o)
{
	g_assert(h != NULL);
	g_assert(o != NULL);
	g_assert(hset_contains(h, o));

	hset_remove(h, o);
}
Exemple #6
0
/**
 * Register object in the supplied hash table (passed by reference, created
 * if not existing already).
 */
static void
cq_register_object(hset_t **hptr, void *o)
{
	hset_t *h = *hptr;

	g_assert(o != NULL);

	if (NULL == h)
		*hptr = h = hset_create(HASH_KEY_SELF, 0);

	g_assert(!hset_contains(h, o));

	hset_insert(h, o);
}
Exemple #7
0
static void
result_data_free(search_t *search, struct result_data *rd)
{
	record_check(rd->record);

	g_assert(hset_contains(search->dups, rd->record));
	hset_remove(search->dups, rd->record);
	search_gui_unref_record(rd->record);

	search_gui_unref_record(rd->record);
	/*
	 * rd->record may point to freed memory now if this was the last reference
	 */

	WFREE(rd);
}
Exemple #8
0
static GSList *
resolve_hostname(const char *host, enum net_type net)
#ifdef HAS_GETADDRINFO
{
	static const struct addrinfo zero_hints;
	struct addrinfo hints, *ai, *ai0 = NULL;
	hset_t *hs;
	GSList *sl_addr;
	int error;

	g_assert(host);
	
	hints = zero_hints;
	hints.ai_family = net_type_to_pf(net);

	error = getaddrinfo(host, NULL, &hints, &ai0);
	if (error) {
		g_message("getaddrinfo() failed for \"%s\": %s",
				host, gai_strerror(error));
		return NULL;
	}

	sl_addr = NULL;
	hs = hset_create_any(host_addr_hash_func, NULL, host_addr_eq_func);
	for (ai = ai0; ai; ai = ai->ai_next) {
		host_addr_t addr;

		if (!ai->ai_addr)
			continue;

		addr = addrinfo_to_addr(ai);

		if (is_host_addr(addr) && !hset_contains(hs, &addr)) {
			host_addr_t *addr_copy;

			addr_copy = wcopy(&addr, sizeof addr);
			sl_addr = g_slist_prepend(sl_addr, addr_copy);
			hset_insert(hs, addr_copy);
		}
	}
	hset_free_null(&hs);

	if (ai0)
		freeaddrinfo(ai0);

	return g_slist_reverse(sl_addr);
}
/**
 * Send message (eslist iterator callback).
 *
 * @return TRUE if message was sent and freed up.
 */
static bool
udp_tx_desc_send(void *data, void *udata)
{
	struct udp_tx_desc *txd = data;
	udp_sched_t *us = udata;
	unsigned prio;

	udp_sched_check(us);
	udp_tx_desc_check(txd);

	if (us->used_all)
		return FALSE;

	/*
	 * Avoid flushing consecutive queued messages to the same destination,
	 * for regular (non-prioritary) messages.
	 *
	 * This serves two purposes:
	 *
	 * 1- It makes sure one single host does not capture all the available
	 *    outgoing bandwidth.
	 *
	 * 2- It somehow delays consecutive packets to a given host thereby reducing
	 *    flooding and hopefully avoiding saturation of its RX flow.
	 */

	prio = pmsg_prio(txd->mb);

	if (PMSG_P_DATA == prio && hset_contains(us->seen, txd->to)) {
		udp_sched_log(2, "%p: skipping mb=%p (%d bytes) to %s",
			us, txd->mb, pmsg_size(txd->mb), gnet_host_to_string(txd->to));
		return FALSE;
	}

	if (udp_sched_mb_sendto(us, txd->mb, txd->to, txd->tx, txd->cb)) {
		if (PMSG_P_DATA == prio && pmsg_was_sent(txd->mb))
			hset_insert(us->seen, atom_host_get(txd->to));
	} else {
		return FALSE;		/* Unsent, leave it in the queue */
	}

	us->buffered = size_saturate_sub(us->buffered, pmsg_size(txd->mb));
	udp_tx_desc_flag_release(txd, us);
	return TRUE;
}
Exemple #10
0
/**
 * Emit namespace declarations.
 */
static void
xfmt_pass2_declare_ns(struct xfmt_pass2 *xp2, GSList *ns)
{
    GSList *sl;

    GM_SLIST_FOREACH(ns, sl) {
        const char *prefix = sl->data;
        const char *uri;
        int c;

        /*
         * Do not declare the "xml" namespace.
         */

        if (0 == strcmp(prefix, VXS_XML))
            continue;

        /*
         * We don't need to declare the default namespace though, unless
         * it is used in attributes (since there is no default namespace
         * for attributes).
         */

        uri = symtab_lookup(xp2->prefixes, prefix);

        if (
            xp2->default_ns != NULL && 0 == strcmp(uri, xp2->default_ns) &&
            !hset_contains(xp2->attr_uris, xp2->default_ns)
        )
            continue;

        c = xfmt_quoting_char(uri);
        g_assert(c != '\0');
        ostream_printf(xp2->os, " xmlns:%s=%c%s%c", prefix, c, uri, c);
    }
}
Exemple #11
0
/**
 * Add file to the current query hit.
 *
 * @return TRUE if we kept the file, FALSE if we did not include it in the hit.
 */
static bool
g2_build_qh2_add(struct g2_qh2_builder *ctx, const shared_file_t *sf)
{
	const sha1_t *sha1;
	g2_tree_t *h, *c;

	shared_file_check(sf);

	/*
	 * Make sure the file is still in the library.
	 */

	if (0 == shared_file_index(sf))
		return FALSE;

	/*
	 * On G2, the H/URN child is required, meaning we need the SHA1 at least.
	 */

	if (!sha1_hash_available(sf))
		return FALSE;

	/*
	 * Do not send duplicates, as determined by the SHA1 of the resource.
	 *
	 * A user may share several files with different names but the same SHA1,
	 * and if all of them are hits, we only want to send one instance.
	 *
	 * When generating hits for host-browsing, we do not care about duplicates
	 * and ctx->hs is NULL then.
	 */

	sha1 = shared_file_sha1(sf);		/* This is an atom */

	if (ctx->hs != NULL) {
		if (hset_contains(ctx->hs, sha1))
			return FALSE;

		hset_insert(ctx->hs, sha1);
	}

	/*
	 * Create the "H" child and attach it to the current tree.
	 */

	if (NULL == ctx->t)
		g2_build_qh2_start(ctx);

	h = g2_tree_alloc_empty("H");
	g2_tree_add_child(ctx->t, h);

	/*
	 * URN -- Universal Resource Name
	 *
	 * If there is a known TTH, then we can generate a bitprint, otherwise
	 * we just convey the SHA1.
	 */

	{
		const tth_t * const tth = shared_file_tth(sf);
		char payload[SHA1_RAW_SIZE + TTH_RAW_SIZE + sizeof G2_URN_BITPRINT];
		char *p = payload;

		if (NULL == tth) {
			p = mempcpy(p, G2_URN_SHA1, sizeof G2_URN_SHA1);
			p += clamp_memcpy(p, sizeof payload - ptr_diff(p, payload),
				sha1, SHA1_RAW_SIZE);
		} else {
			p = mempcpy(p, G2_URN_BITPRINT, sizeof G2_URN_BITPRINT);
			p += clamp_memcpy(p, sizeof payload - ptr_diff(p, payload),
				sha1, SHA1_RAW_SIZE);
			p += clamp_memcpy(p, sizeof payload - ptr_diff(p, payload),
				tth, TTH_RAW_SIZE);
		}

		g_assert(ptr_diff(p, payload) <= sizeof payload);

		c = g2_tree_alloc_copy("URN", payload, ptr_diff(p, payload));
		g2_tree_add_child(h, c);
	}

	/*
	 * URL -- empty to indicate that we share the file via uri-res.
	 */

	if (ctx->flags & QHIT_F_G2_URL) {
		uint known;
		uint16 csc;

		c = g2_tree_alloc_empty("URL");
		g2_tree_add_child(h, c);

		/*
		 * CSC -- if we know alternate sources, indicate how many in "CSC".
		 *
		 * This child is only emitted when they requested "URL".
		 */

		known = dmesh_count(sha1);
		csc = MIN(known, MAX_INT_VAL(uint16));

		if (csc != 0) {
			char payload[2];

			poke_le16(payload, csc);
			c = g2_tree_alloc_copy("CSC", payload, sizeof payload);
			g2_tree_add_child(h, c);
		}

		/*
		 * PART -- if we only have a partial file, indicate how much we have.
		 *
		 * This child is only emitted when they requested "URL".
		 */

		if (shared_file_is_partial(sf) && !shared_file_is_finished(sf)) {
			filesize_t available = shared_file_available(sf);
			char payload[8];	/* If we have to encode file size as 64-bit */
			uint32 av32;
			time_t mtime = shared_file_modification_time(sf);

			c = g2_tree_alloc_empty("PART");
			g2_tree_add_child(h, c);

			av32 = available;
			if (av32 == available) {
				/* Fits within a 32-bit quantity */
				poke_le32(payload, av32);
				g2_tree_set_payload(c, payload, sizeof av32, TRUE);
			} else {
				/* Encode as a 64-bit quantity then */
				poke_le64(payload, available);
				g2_tree_set_payload(c, payload, sizeof payload, TRUE);
			}

			/*
			 * GTKG extension: encode the last modification time of the
			 * partial file in an "MT" child.  This lets the other party
			 * determine whether the host is still able to actively complete
			 * the file.
			 */

			poke_le32(payload, (uint32) mtime);
			g2_tree_add_child(c,
				g2_tree_alloc_copy("MT", payload, sizeof(uint32)));
		}

		/*
		 * CT -- creation time of the resource (GTKG extension).
		 */

		{
			time_t create_time = shared_file_creation_time(sf);

			if ((time_t) -1 != create_time) {
				char payload[8];
				int n;

				create_time = MAX(0, create_time);
				n = vlint_encode(create_time, payload);
				g2_tree_add_child(h,
					g2_tree_alloc_copy("CT", payload, n));	/* No trailing 0s */
			}
		}
	}

	/*
	 * DN -- distinguished name.
	 *
	 * Note that the presence of DN also governs the presence of SZ if the
	 * file length does not fit a 32-bit unsigned quantity.
	 */

	if (ctx->flags & QHIT_F_G2_DN) {
		char payload[8];		/* If we have to encode file size as 64-bit */
		uint32 fs32;
		filesize_t fs = shared_file_size(sf);
		const char *name;
		const char *rp;

		c = g2_tree_alloc_empty("DN");

		fs32 = fs;
		if (fs32 == fs) {
			/* Fits within a 32-bit quantity */
			poke_le32(payload, fs32);
			g2_tree_set_payload(c, payload, sizeof fs32, TRUE);
		} else {
			/* Does not fit a 32-bit quantity, emit a SZ child */
			poke_le64(payload, fs);
			g2_tree_add_child(h,
				g2_tree_alloc_copy("SZ", payload, sizeof payload));
		}

		name = shared_file_name_nfc(sf);
		g2_tree_append_payload(c, name, shared_file_name_nfc_len(sf));
		g2_tree_add_child(h, c);

		/*
		 * GTKG extension: if there is a file path, expose it as a "P" child
		 * under the DN node.
		 */

		rp = shared_file_relative_path(sf);
		if (rp != NULL) {
			g2_tree_add_child(c, g2_tree_alloc_copy("P", rp, strlen(rp)));
		}
	}

	/*
	 * GTKG extension: if they requested alt-locs in the /Q2/I with "A", then
	 * send them some known alt-locs in an "ALT" child.
	 *
	 * Note that these alt-locs can be for Gnutella hosts: since both Gnutella
	 * and G2 share a common HTTP-based file transfer mechanism with compatible
	 * extra headers, there is no need to handle them separately.
	 */

	if (ctx->flags & QHIT_F_G2_ALT) {
		gnet_host_t hvec[G2_BUILD_QH2_MAX_ALT];
		int hcnt = 0;

		hcnt = dmesh_fill_alternate(sha1, hvec, N_ITEMS(hvec));

		if (hcnt > 0) {
			int i;

			c = g2_tree_alloc_empty("ALT");

			for (i = 0; i < hcnt; i++) {
				host_addr_t addr;
				uint16 port;

				addr = gnet_host_get_addr(&hvec[i]);
				port = gnet_host_get_port(&hvec[i]);

				if (host_addr_is_ipv4(addr)) {
					char payload[6];

					host_ip_port_poke(payload, addr, port, NULL);
					g2_tree_append_payload(c, payload, sizeof payload);
				}
			}

			/*
			 * If the payload is still empty, then drop the "ALT" child.
			 * Otherwise, attach it to the "H" node.
			 */

			if (NULL == g2_tree_node_payload(c, NULL)) {
				g2_tree_free_null(&c);
			} else {
				g2_tree_add_child(h, c);
			}
		}
	}

	/*
	 * Update the size of the query hit we're generating.
	 */

	ctx->current_size += g2_frame_serialize(h, NULL, 0);

	return TRUE;
}
Exemple #12
0
/**
 * Removes the URL from the set of known URL, but do not free its memory
 * and keeps it in the set of failed URLs for the session.
 */
static void
gwc_forget_url(const char *url)
{
    struct gwc url_tmp[MAX_GWC_URLS];			/* Temporary copy */
    int count = hset_count(gwc_known_url);
    int i;
    int j = 0;

    g_assert(count > 0);
    g_assert(count <= MAX_GWC_URLS);
    g_assert(count == MAX_GWC_URLS || gwc_url_slot < count);
    g_assert(gwc_url_slot >= 0);
    STATIC_ASSERT(sizeof(url_tmp) == sizeof(gwc_url));

    if (GNET_PROPERTY(bootstrap_debug))
        g_warning("forgetting GWC URL \"%s\"", url);

    /*
     * It is possible that the URL we're trying to forget was
     * already removed from the cache if it was at a slot overridden
     * in the round-robin buffer, should we have got new GWC URL since
     * it was selected.
     */

    if (hset_contains(gwc_known_url, url))
        hset_remove(gwc_known_url, url);
    else {
        if (GNET_PROPERTY(bootstrap_debug))
            g_warning("URL was already gone from GWC");
        return;
    }
    hset_insert(gwc_failed_url, url);

    /*
     * Because we have a round-robin buffer, removing something in the
     * middle of the buffer is not straightforward.  The `gwc_url_slot'
     * variable points to the last filled value in the buffer.
     *
     * We're going to build a copy in url_tmp[], filled from 0 to "count - 1",
     * and we'll move back that copy into the regular gwc_url[] cache.
     * The reason is that since there will be less entries in the cache
     * than the maximum amount, the round-robin buffer must be linearily
     * filled from 0 and upwards.
     */

    memset(url_tmp, 0, sizeof(url_tmp));

    if (count == MAX_GWC_URLS) {		/* Buffer was full */
        for (i = gwc_url_slot;;) {
            if (gwc_url[i].url != url)	/* Atoms: we can compare addresses */
                url_tmp[j++] = gwc_url[i];
            i++;
            if (i == MAX_GWC_URLS)
                i = 0;
            if (i == gwc_url_slot)		/* Back to where we started */
                break;
        }
    } else {							/* Buffer was partially filled */
        for (i = 0; i <= gwc_url_slot; i++) {
            if (gwc_url[i].url != url)	/* Atoms: we can compare addresses */
                url_tmp[j++] = gwc_url[i];
        }
    }

    count--;							/* New amount of data in cache */
    gwc_url_slot = j - 1;				/* Last position we filled */
    gwc_url_slot = MAX(0, gwc_url_slot);	/* If we removed ALL entries */
    g_assert(gwc_url_slot == MAX(0, count - 1));
    memcpy(gwc_url, url_tmp, sizeof(gwc_url));
    g_assert(gwc_url_slot >= 0 && gwc_url_slot < MAX_GWC_URLS);

    gwc_file_dirty = TRUE;
}
Exemple #13
0
/**
 * Add new URL to cache, possibly pushing off an older one if cache is full.
 *
 * @return TRUE if the URL was added, FALSE otherwise.
 */
static bool
gwc_add(const char *new_url)
{
    const char *url_atom;
    const char *old_url;
    char *url, *ret;

    url = h_strdup(new_url); /* url_normalize() can modify the URL */

    ret = url_normalize(url, URL_POLICY_GWC_RULES);
    if (!ret) {
        g_warning("%s(): ignoring bad web cache URL \"%s\"",
                  G_STRFUNC, new_url);
        HFREE_NULL(url);
        return FALSE;
    }
    if (ret != url) {
        HFREE_NULL(url);
        url = ret;
    }

    /*
     * Don't add duplicates to the cache.
     */

    if (
        hset_contains(gwc_known_url, url) ||
        hset_contains(gwc_failed_url, url)
    ) {
        HFREE_NULL(url);
        return FALSE;
    }

    /*
     * OK, record new entry at the `gwc_url_slot'.
     */

    if (++gwc_url_slot >= MAX_GWC_URLS)
        gwc_url_slot = 0;

    g_assert(url != NULL);
    url_atom = atom_str_get(url);
    HFREE_NULL(url);

    /*
     * Expire any entry present at the slot we're about to write into.
     */

    old_url = gwc_url[gwc_url_slot].url;

    if (old_url != NULL) {
        g_assert(hset_contains(gwc_known_url, old_url));
        hset_remove(gwc_known_url, old_url);
        atom_str_free_null(&old_url);
        gwc_url[gwc_url_slot].url = NULL;
    }

    hset_insert(gwc_known_url, url_atom);

    gwc_url[gwc_url_slot].url = url_atom;
    gwc_url[gwc_url_slot].stamp = 0;
    gwc_file_dirty = TRUE;

    if (GNET_PROPERTY(bootstrap_debug)) {
        g_debug("%s(): loaded GWC URL %s", G_STRFUNC, url_atom);
    }

    return TRUE;
}
Exemple #14
0
/**
 * ftw_foreach() callback to remove obsolete / spurious files.
 */
static ftw_status_t
tth_cache_cleanup_unlink(
	const ftw_info_t *info, const filestat_t *sb, void *data)
{
	const hset_t *shared = data;

	if (FTW_F_DIR & info->flags)
		return FTW_STATUS_OK;

	if ((FTW_F_OTHER | FTW_F_SYMLINK) & info->flags) {
		tth_cache_file_remove(info->fpath, "alien");
		return FTW_STATUS_OK;
	}

	if (FTW_F_FILE & info->flags) {
		char **path;
		struct tth tth;
		char b32[TTH_BASE32_SIZE + 2];
		size_t len;

		if (FTW_F_NOSTAT & info->flags) {
			g_warning("%s(): ignoring unaccessible cached TTH %s",
				G_STRFUNC, info->fpath);
			return FTW_STATUS_OK;
		}

		if (info->level != 2) {
			tth_cache_file_remove(info->fpath, "spurious");
			return FTW_STATUS_OK;
		}

		path = g_strsplit(info->rpath, "/", 2);

		if (NULL == path)
			return FTW_STATUS_ABORT;	/* Weird, empty relative path? */

		len = str_bprintf(b32, sizeof b32, "%s", path[0]);
		if (len != 2)		/* Expected first path component is 2-char long */
			len = 0;
		len += str_bprintf(&b32[len], sizeof b32 - len, "%s", path[1]);

		if (
			TTH_BASE32_SIZE != len ||
			TTH_RAW_SIZE !=
				base32_decode(&tth, sizeof tth, b32, TTH_BASE32_SIZE)
		) {
			tth_cache_file_remove(info->fpath, "invalid");
			goto done;
		}

		/*
		 * At this point, we have a valid TTH cache filename.
		 *
		 * We want to only process files created before the session started.
		 *
		 * The rationale is that users could start unsharing directories,
		 * moving files around, add new files, etc..  Each time a new library
		 * rescan occurs, we're going to create new TTH cache files, or some
		 * cached files could become unused for a while and then files will
		 * reappear in the library.
		 *
		 * By only ever cleaning up files created before the current session,
		 * we have a higher likelyhood of processing an obsolete cache entry.
		 */

		if (delta_time(sb->st_mtime, GNET_PROPERTY(session_start_stamp)) >= 0)
			goto done;		/* Created after session started, skip */

		if (!hset_contains(shared, &tth)) {
			if (debugging(0))
				g_debug("%s(): unshared TTH (%s)", G_STRFUNC, info->rpath);
			(void) tth_cache_file_unlink(info->fpath, "unshared");
		}

		/* FALL THROUGH */

	done:
		g_strfreev(path);
		return FTW_STATUS_OK;
	}

	g_assert_not_reached();
	return FTW_STATUS_ERROR;
}
Exemple #15
0
/**
 * Closes all file descriptors greater or equal to ``first_fd'', skipping
 * preserved ones if ``preserve'' is TRUE.
 */
static void
fd_close_from_internal(const int first_fd, bool preserve)
{
	int fd;

	g_return_if_fail(first_fd >= 0);

	if (!preserve && try_close_from(first_fd))
		return;

	fd = getdtablesize() - 1;
	while (fd >= first_fd) {
		if (preserve && hset_contains(fd_preserved, int_to_pointer(fd)))
			goto next;

#ifdef HAVE_GTKOSXAPPLICATION
		/* OS X doesn't allow fds being closed not opened by us. During
		 * GUI initialisation a new kqueue fd is created for UI events. This
		 * is visible to us as a fifo which we are not allowed to close.
		 * Set close on exec on all fifo's so we won't leak any of our other
		 * fifo's
		 *	-- JA 2011-11-28 */
		if (is_a_fifo(fd))
			fd_set_close_on_exec(fd);
		else
#endif
		/* OS X frowns upon random fds being closed --RAM 2011-11-13  */
		if (fd_is_opened(fd)) {
			if (close(fd)) {
#if defined(F_MAXFD)
				fd = fcntl(0, F_MAXFD);
				continue;
#endif	/* F_MAXFD */
			}
		}
	next:
		fd--;
	}

	/*
	 * When called with a first_fd of 3, and we are on Windows, also make
	 * sure we close all the known sockets we have.  This lets the process
	 * safely auto-restart, avoiding multiple listening sockets on the same
	 * port.
	 *		--RAM, 2015-04-05
	 */

	if (
		is_running_on_mingw() && !preserve &&
		3 == first_fd && NULL != fd_sockets
	) {
		hset_t *fds = fd_sockets;

		/*
		 * We're about to exec() another process, and we may be crashing,
		 * hence do not bother using hset_foreach_remove() to ensure minimal
		 * processing.  We also reset the fd_sockets pointer to NULL to
		 * make sure s_close() will do nothing when fd_notify_socket_closed()
		 * is called.
		 */

		fd_sockets = NULL;		/* We don't expect race conditions here */
		hset_foreach(fds, fd_socket_close, NULL);

		/* Don't bother freeing / clearing set, we're about to exec() */
	}
}