Пример #1
0
/**
 * Retry publishing after some delay.
 *
 * @param pe		the entry to publish
 * @param delay		delay in seconds
 * @param msg		if non-NULL, logging message explaining the delay
 */
static void
publisher_retry(struct publisher_entry *pe, int delay, const char *msg)
{
	struct pubdata *pd;

	publisher_check(pe);
	g_assert(NULL == pe->publish_ev);
	g_assert(delay > 0);

	pd = get_pubdata(pe->sha1);
	if (pd != NULL) {
		pd->next_enqueue = time_advance(tm_time(), UNSIGNED(delay));
		dbmw_write(db_pubdata, pe->sha1, pd, sizeof *pd);
	}

	pe->publish_ev = cq_insert(publish_cq, delay * 1000, handle_entry, pe);
	pe->last_delayed = tm_time();

	if (GNET_PROPERTY(publisher_debug) > 3) {
		shared_file_t *sf = shared_file_by_sha1(pe->sha1);
		g_debug("PUBLISHER will retry SHA-1 %s %s\"%s\" in %s: %s",
			sha1_to_string(pe->sha1),
			(sf && sf != SHARE_REBUILDING && shared_file_is_partial(sf)) ?
				"partial " : "",
			(sf && sf != SHARE_REBUILDING) ? shared_file_name_nfc(sf) : "",
			compact_time(delay), msg != NULL ? msg : "<no reason>");
		shared_file_unref(&sf);
	}
}
Пример #2
0
/*
  update the last_letter simulation by one time step
 */
void last_letter::update(const struct sitl_input &input)
{
    send_servos(input);
    recv_fdm(input);
    sync_frame_time();

    update_position();
    time_advance();
    // update magnetic field
    update_mag_field_bf();
}
Пример #3
0
/**
 * Send datagram.
 *
 * @param us		the UDP scheduler responsible for sending the datagram
 * @param mb		the message to send
 * @param to		the IP:port destination of the message
 * @param tx		the TX stack sending the message
 * @param cb		callback actions on the datagram
 *
 * @return 0 if message was unsent, length of message if sent, queued or
 * dropped.
 */
size_t
udp_sched_send(udp_sched_t *us, pmsg_t *mb, const gnet_host_t *to,
	const txdrv_t *tx, const struct tx_dgram_cb *cb)
{
	int len;
	struct udp_tx_desc *txd;
	uint prio;

	len = pmsg_size(mb);

	/*
	 * Try to send immediately if we have bandwidth.
	 */

	if (!us->used_all && udp_sched_mb_sendto(us, mb, to, tx, cb))
		return len;		/*  Message "sent" */

	/*
	 * If we already have enough data enqueued, flow-control the upper
	 * layer by acting as if we do not have enough bandwidth.
	 *
	 * However, we now always accept traffic sent with the highest priority
	 * since it is important to send those as soon as possible, i.e. ahead
	 * of any other pending data we would otherwise flush locally before
	 * servicing upper queues.
	 *		--RAM, 2012-10-12
	 */

	prio = pmsg_prio(mb);

	if (
		PMSG_P_HIGHEST != prio &&
		us->buffered >= UDP_SCHED_FACTOR * udp_sched_bw_per_second(us)
	) {
		udp_sched_log(1, "%p: flow-controlled", us);
		us->flow_controlled = TRUE;
		return 0;		/* Flow control upper layers */
	}

	/*
	 * Message is going to be enqueued.
	 *
	 * However, from the upper layers (the message queue in particular),
	 * the message is considered as being sent, and therefore these layers
	 * are going to call pmsg_free() on the message.
	 *
	 * We do not want to pmsg_clone() the message because that would render
	 * uses of pmsg_was_sent() useless in free routines, and upper layers
	 * would think the message was dropped if they installed a free routine
	 * on the message.
	 *
	 * Hence we use pmsg_ref().
	 */

	txd = palloc(us->txpool);
	txd->magic = UDP_TX_DESC_MAGIC;
	txd->mb = pmsg_ref(mb);		/* Take ownership of message */
	txd->to = atom_host_get(to);
	txd->tx = tx;
	txd->cb = cb;
	txd->expire = time_advance(tm_time(), UDP_SCHED_EXPIRE);

	udp_sched_log(4, "%p: queuing mb=%p (%d bytes) prio=%u",
		us, mb, pmsg_size(mb), pmsg_prio(mb));

	/*
	 * The queue used is a LIFO to avoid buffering delaying all the messages.
	 * Since UDP traffic is unordered, it's better to send the most recent
	 * datagrams first, to reduce the perceived average latency.
	 */

	g_assert(prio < N_ITEMS(us->lifo));
	eslist_prepend(&us->lifo[prio], txd);
	us->buffered = size_saturate_add(us->buffered, len);

	return len;		/* Message queued, but tell upper layers it's sent */
}
Пример #4
0
/**
 * Publishing callback invoked when asynchronous publication is completed,
 * or ended with an error.
 *
 * @return TRUE if we accept the publishing, FALSE otherwise to get the
 * publishing layer to continue attempts to failed STORE roots and report
 * on progress using the same callback.
 */
static bool
publisher_done(void *arg, pdht_error_t code, const pdht_info_t *info)
{
	struct publisher_entry *pe = arg;
	struct pubdata *pd;
	int delay = PUBLISH_BUSY;
	bool expired = FALSE;
	bool accepted = TRUE;

	publisher_check(pe);

	pd = get_pubdata(pe->sha1);

	/*
	 * Update stats on republishing before value expiration.
	 */

	if (PDHT_E_OK == code) {
		if (pe->last_publish && info->roots > 0) {
			if (pd != NULL) {
				if (pd->expiration && delta_time(tm_time(), pd->expiration) > 0)
					expired = TRUE;
			} else {
				time_delta_t elapsed = delta_time(tm_time(), pe->last_publish);
				if (elapsed > DHT_VALUE_ALOC_EXPIRE)
					expired = TRUE;
			}
			if (expired)
				gnet_stats_inc_general(GNR_DHT_REPUBLISHED_LATE);
		}
	}

	/*
	 * Compute retry delay.
	 */

	switch (code) {
	case PDHT_E_OK:
		/*
		 * If we were not able to publish to KDA_K nodes, decrease the
		 * delay before republishing.  We use a non-linear decimation of
		 * the republish time, as a function of the number of nodes to which
		 * we could publish.
		 */

		delay = publisher_delay(info, DHT_VALUE_ALOC_EXPIRE);
		accepted = publisher_is_acceptable(info);
		break;
	case PDHT_E_POPULAR:
		/*
		 * Compute the suitable delay: the first time, we use PUBLISH_POPULAR,
		 * and then we double each time until we reach PUBLISH_POPULAR_MAX.
		 *
		 * If we already tried to publish the entry, pe->last_delayed will
		 * be non-zero.
		 */
		if (0 != pe->last_delayed) {
			time_delta_t elapsed = delta_time(tm_time(), pe->last_delayed);
			if (elapsed < PUBLISH_POPULAR) {
				delay = PUBLISH_POPULAR;
			} else if (elapsed >= PUBLISH_POPULAR_MAX / 2) {
				delay = PUBLISH_POPULAR_MAX;
			} else {
				delay = elapsed * 2;
			}
		} else {
			delay = PUBLISH_POPULAR;
		}
		break;
	case PDHT_E_NOT_SHARED:
	case PDHT_E_LOOKUP_EXPIRED:
	case PDHT_E_LOOKUP:
	case PDHT_E_UDP_CLOGGED:
	case PDHT_E_PUBLISH_EXPIRED:
	case PDHT_E_PUBLISH_ERROR:
	case PDHT_E_SHA1:
	case PDHT_E_PENDING:
	case PDHT_E_CANCELLED:
	case PDHT_E_GGEP:
	case PDHT_E_NONE:
		delay = PUBLISH_BUSY;
		break;
	case PDHT_E_MAX:
		g_assert_not_reached();
	}

	/*
	 * For a backgrounded entry publishing, we need to adjust the computed
	 * delay with the time that was elapsed
	 */

	g_assert(!pe->backgrounded == !(pe->publish_ev != NULL));

	if (pe->backgrounded) {
		time_delta_t elapsed = delta_time(tm_time(), pe->last_delayed);
		g_assert(pe->last_delayed > 0);
		cq_cancel(&pe->publish_ev);
		if (delay > elapsed) {
			delay -= elapsed;
		} else {
			delay = 1;
		}
	}

	/*
	 * Logging.
	 */

	if (GNET_PROPERTY(publisher_debug) > 1) {
		shared_file_t *sf = shared_file_by_sha1(pe->sha1);
		char retry[80];
		char after[80];
		const char *late = "";

		after[0] = '\0';
		if (pe->last_publish) {
			time_delta_t elapsed = delta_time(tm_time(), pe->last_publish);

			str_bprintf(after, sizeof after,
				" after %s", compact_time(elapsed));

			if (pd != NULL) {
				if (expired)
					late = "late, ";
			} else {
				late = "no data, ";
			}
		}

		str_bprintf(retry, sizeof retry, "%s", compact_time(delay));

		g_debug("PUBLISHER SHA-1 %s %s%s\"%s\" %spublished to %u node%s%s: %s"
			" (%stook %s, total %u node%s, proba %.3f%%, retry in %s,"
			" %s bg, path %u) [%s]",
			sha1_to_string(pe->sha1),
			pe->backgrounded ? "[bg] " : "",
			(sf && sf != SHARE_REBUILDING && shared_file_is_partial(sf)) ?
				"partial " : "",
			(sf && sf != SHARE_REBUILDING) ? shared_file_name_nfc(sf) : "",
			pe->last_publish ? "re" : "",
			info->roots, plural(info->roots),
			after, pdht_strerror(code), late,
			compact_time(delta_time(tm_time(), pe->last_enqueued)),
			info->all_roots, plural(info->all_roots),
			info->presence * 100.0, retry,
			info->can_bg ? "can" : "no", info->path_len,
			accepted ? "OK" : "INCOMPLETE");

		shared_file_unref(&sf);
	}

	/*
	 * Update last publishing time and remember expiration time.
	 */

	if (PDHT_E_OK == code && info->roots > 0) {
		pe->last_publish = tm_time();
		if (pd != NULL) {
			pd->expiration =
				time_advance(pe->last_publish, DHT_VALUE_ALOC_EXPIRE);
			dbmw_write(db_pubdata, pe->sha1, pd, sizeof *pd);
		}
	}

	/*
	 * If entry was deemed popular, we're going to delay its republishing
	 * by a larger amount of time and any data we published already about
	 * it will surely expire.  Since this is our decision, we do not want
	 * to be told that republishing, if it occurs again, was done later than
	 * required.  Hence call publisher_hold() to mark that we don't care.
	 */

	if (PDHT_E_POPULAR == code)
		publisher_hold(pe, delay, "popular entry");
	else
		publisher_retry(pe, delay, accepted ? "accepted publish" : "published");

	pe->backgrounded = !accepted;

	return accepted;
}
Пример #5
0
/**
 * Called after GUI initialized to warn them about an ancient version.
 * (over a year old).
 *
 * If the version being ran is not a stable one, warn after 60 days, otherwise
 * warn after a year.  If we're not "expired" yet but are approaching the
 * deadline, start to remind them.
 */
void
version_ancient_warn(void)
{
	time_t now = tm_time();
	time_delta_t lifetime, remain, elapsed;
	time_t s;

	g_assert(our_version.timestamp != 0);	/* version_init() called */

	/*
	 * Must reset the property to FALSE so that if it changes and becomes
	 * TRUE, then the necessary GUI callbacks will get triggered.  Indeed,
	 * setting a property to its ancient value is not considered a change,
	 * and rightfully so!
	 */

	gnet_prop_set_boolean_val(PROP_ANCIENT_VERSION, FALSE);

	elapsed = delta_time(now, our_version.timestamp);

	if (elapsed > VERSION_ANCIENT_WARN || tok_is_ancient(now)) {
		static bool warned = FALSE;
		if (GNET_PROPERTY(version_debug)) {
			g_debug("VERSION our_version = %s (elapsed = %ld, token %s)",
				timestamp_to_string(our_version.timestamp),
				(long) elapsed, tok_is_ancient(now) ? "ancient" : "ok");
		}
		if (!warned) {
			g_warning("version of gtk-gnutella is too old, please upgrade!");
			warned = TRUE;
		}
        gnet_prop_set_boolean_val(PROP_ANCIENT_VERSION, TRUE);
		return;
	}

	/*
	 * Check whether we're nearing ancient version status, to warn them
	 * beforehand that the version will become old soon.
	 */

	lifetime = VERSION_ANCIENT_WARN;
	remain = delta_time(lifetime, elapsed);

	g_assert(remain >= 0);		/* None of the checks above have fired */

	/*
	 * Try to see whether the token will expire within the next
	 * VERSION_ANCIENT_REMIND secs, looking for the minimum cutoff date.
	 *
	 * Indeed, it is possible to emit new versions without issuing a
	 * new set of token keys, thereby constraining the lifetime of the
	 * version.  This is usually what happens for bug-fixing releases
	 * that do not introduce significant Gnutella features.
	 */

	s = time_advance(now, VERSION_ANCIENT_REMIND);
	for (/* NOTHING */; delta_time(s, now) > 0; s -= SECS_PER_DAY) {
		if (!tok_is_ancient(s))
			break;
	}

	remain = MIN(remain, delta_time(s, now));

	/*
	 * Let them know when version will expire soon...
	 */

	if (remain < VERSION_ANCIENT_REMIND) {
        gnet_prop_set_guint32_val(PROP_ANCIENT_VERSION_LEFT_DAYS,
			remain / SECS_PER_DAY);
	}
}