Пример #1
0
static void
selected_row_changed(GtkCTree *ctree)
{
	int row;
	
	if (selected_record) {
		search_gui_unref_record(selected_record);
		selected_record = NULL;
	}

	row = clist_get_cursor_row(GTK_CLIST(ctree));
	if (row >= 0) {
		GtkCTreeNode *node;

		node = gtk_ctree_node_nth(GTK_CTREE(ctree), row);
		selected_record = search_gui_get_record(ctree, GTK_CTREE_NODE(node));
		if (selected_record) {
			search_gui_ref_record(selected_record);
		}
	}

	if (row_selected_ev) {
		cq_resched(row_selected_ev, ROW_SELECT_TIMEOUT);
	} else {
		row_selected_ev = cq_main_insert(ROW_SELECT_TIMEOUT,
							row_selected_expire, NULL);
	}
}
Пример #2
0
/**
 * Add value to the table.
 *
 * If it was already present, its lifetime is augmented by the aging delay.
 *
 * The key argument is freed immediately if there is a free routine for
 * keys and the key was present in the table.
 *
 * The previous value is freed and replaced by the new one if there is
 * an insertion conflict and the value pointers are different.
 */
void
aging_insert(aging_table_t *ag, const void *key, void *value)
{
	gboolean found;
	void *okey, *ovalue;
	time_t now = tm_time();
	struct aging_value *aval;

	g_assert(ag->magic == AGING_MAGIC);

	found = g_hash_table_lookup_extended(ag->table, key, &okey, &ovalue);
	if (found) {
		aval = ovalue;

		g_assert(aval->key == okey);

		if (aval->key != key && ag->kvfree != NULL) {
			/*
			 * We discard the new and keep the old key instead.
			 * That way, we don't have to update the hash table.
			 */

			(*ag->kvfree)(deconstify_gpointer(key), aval->value);
		}

		g_assert(aval->cq_ev != NULL);

		/*
		 * Value existed for this key, prolonge its life.
		 */

		aval->value = value;
		aval->ttl -= delta_time(now, aval->last_insert);
		aval->ttl += ag->delay;
		aval->ttl = MAX(aval->ttl, 1);
		aval->ttl = MIN(aval->ttl, INT_MAX / 1000);
		aval->last_insert = now;

		cq_resched(aval->cq_ev, 1000 * aval->ttl);
	} else {
		WALLOC(aval);
		aval->value = value;
		aval->key = deconstify_gpointer(key);
		aval->ttl = ag->delay;
		aval->ttl = MAX(aval->ttl, 1);
		aval->ttl = MIN(aval->ttl, INT_MAX / 1000);
		aval->last_insert = now;
		aval->ag = ag;

		aval->cq_ev = cq_insert(aging_cq, 1000 * aval->ttl, aging_expire, aval);
		gm_hash_table_insert_const(ag->table, key, aval);
	}
}
Пример #3
0
/**
 * Change the period of a registered periodic event.
 */
void
cq_periodic_resched(cperiodic_t *cp, int period)
{
	cperiodic_check(cp);

	cp->period = period;

	/*
	 * If the event is NULL, we're in the middle of cq_periodic_trampoline(),
	 * so the event will be rescheduled once the callback event returns.
	 */

	if (cp->ev != NULL)
		cq_resched(cp->ev, period);
}
Пример #4
0
/**
 * Lookup value in table, and if found, revitalize entry, restoring the
 * initial lifetime the key/value pair had at insertion time.
 */
void *
aging_lookup_revitalise(const aging_table_t *ag, gconstpointer key)
{
	struct aging_value *aval;

	aging_check(ag);

	aval = g_hash_table_lookup(ag->table, key);

	if (aval != NULL) {
		g_assert(aval->cq_ev != NULL);
		aval->last_insert = tm_time();
		cq_resched(aval->cq_ev, 1000 * aval->ttl);
	}

	return aval == NULL ? NULL : aval->value;
}
Пример #5
0
/**
 * Delay the nagle timer when more data is coming.
 */
static void
deflate_nagle_delay(txdrv_t *tx)
{
	struct attr *attr = tx->opaque;

	g_assert(attr->flags & DF_NAGLE);
	g_assert(NULL != attr->tm_ev);
	g_assert(attr->nagle);				/* Nagle is allowed */

	/*
	 * We push back the initial delay a little while when more data comes,
	 * hoping that enough will be output so that we end up sending the TX
	 * buffer without having to trigger a flush too soon, since that would
	 * degrade compression performance.
	 *
	 * If too much time elapsed since the Nagle timer started, do not
	 * postpone the flush otherwise we might delay time-sensitive messages.
	 */

	if (delta_time(tm_time(), attr->nagle_start) < BUFFER_DELAY) {
		int delay = cq_remaining(attr->tm_ev);
		cq_resched(attr->tm_ev, MAX(delay, BUFFER_NAGLE / 2));
	}
}
Пример #6
0
/**
 * Accepted an update due to a lower precision entry, reschedule the
 * expiration timeout.
 */
static void
val_reused(struct used_val *v, int precision)
{
	v->precision = precision;
	cq_resched(v->cq_ev, REUSE_DELAY * 1000);
}