Пример #1
0
/**
 * Trigger the Nagle timeout immediately, if registered.
 */
static void
tx_deflate_flush(txdrv_t *tx)
{
	struct attr *attr = tx->opaque;

	if (attr->flags & DF_NAGLE) {
		g_assert(NULL != attr->tm_ev);
		cq_expire(attr->tm_ev);
	} else if (!(attr->flags & DF_FLOWC))
		deflate_flush_send(tx);
}
Пример #2
0
/**
 * The heartbeat of our callout queue.
 *
 * Called to notify us about the elapsed "time" so that we can expire timeouts
 * and maintain our notion of "current time".
 *
 * NB: The time maintained by the callout queue is "virtual".  It's the
 * elapased delay given by regular calls to cq_clock() that define its unit.
 * For gtk-gnutella, the time unit is the millisecond.
 */
static void
cq_clock(cqueue_t *cq, int elapsed)
{
	int bucket;
	int last_bucket, old_last_bucket;
	struct chash *ch, *old_current;
	cevent_t *ev;
	cq_time_t now;
	int processed = 0;

	cqueue_check(cq);
	g_assert(elapsed >= 0);
	g_assert(mutex_is_owned(&cq->cq_lock));

	/*
	 * Recursive calls are possible: in the middle of an event, we could
	 * trigger something that will call cq_dispatch() manually for instance.
	 *
	 * Therefore, we save the cq_current and cq_last_bucket fields upon
	 * entry and restore them at the end as appropriate. If cq_current is
	 * NULL initially, it means we were not in the middle of any recursion
	 * so we won't have to restore cq_last_bucket.
	 *
	 * Note that we enforce recursive calls to cq_clock() to be on the
	 * same thread due to the use of a mutex. However, each initial run of
	 * cq_clock() could happen on a different thread each time.
	 */

	old_current = cq->cq_current;
	old_last_bucket = cq->cq_last_bucket;

	cq->cq_ticks++;
	cq->cq_time += elapsed;
	now = cq->cq_time;

	bucket = cq->cq_last_bucket;		/* Bucket we traversed last time */
	ch = &cq->cq_hash[bucket];
	last_bucket = EV_HASH(now);			/* Last bucket to traverse now */

	/*
	 * If `elapsed' has overflowed the hash size, then we'll need to look at
	 * all the buckets in the table (wrap around).
	 */

	if (EV_OVER(elapsed))
		last_bucket = bucket;

	/*
	 * Since the hashed time is a not a strictly monotonic function of time,
	 * we have to rescan the last bucket, in case the earliest event have
	 * expired now, before moving forward.
	 */

	cq->cq_current = ch;

	while ((ev = ch->ch_head) && ev->ce_time <= now) {
		cq_expire(ev);
		processed++;
	}

	/*
	 * If we don't have to move forward (elapsed is too small), we're done.
	 */

	if (cq->cq_last_bucket == last_bucket && !EV_OVER(elapsed))
		goto done;

	cq->cq_last_bucket = last_bucket;

	do {
		ch++;
		if (++bucket >= HASH_SIZE) {
			bucket = 0;
			ch = cq->cq_hash;
		}

		/*
		 * Since each bucket is sorted, we can stop our walkthrough as
		 * soon as we reach an event scheduled after `now'.
		 */

		cq->cq_current = ch;

		while ((ev = ch->ch_head) && ev->ce_time <= now) {
			cq_expire(ev);
			processed++;
		}

	} while (bucket != last_bucket);

done:
	cq->cq_current = old_current;

	if G_UNLIKELY(old_current != NULL)
		cq->cq_last_bucket = old_last_bucket;	/* Was in recursive call */

	if (cq_debug() > 5) {
		s_debug("CQ: %squeue \"%s\" %striggered %d event%s (%d item%s)",
			cq->cq_magic == CSUBQUEUE_MAGIC ? "sub" : "",
			cq->cq_name, NULL == old_current ? "" : "recursively",
			processed, 1 == processed ? "" : "s",
			cq->cq_items, 1 == cq->cq_items ? "" : "s");
	}

	mutex_unlock(&cq->cq_lock);

	/*
	 * Run idle callbacks if nothing was processed.
	 *
	 * Note that we released the mutex before running idle callbacks, to let
	 * concurrent threads register callout events.
	 */

	if (0 == processed)
		cq_run_idle(cq);
}