Exemplo n.º 1
0
static void
search_gui_flush_queue(search_t *search)
{
	g_return_if_fail(search);
	g_return_if_fail(search->tree);
	
	if (slist_length(search->queue) > 0) {
		GtkTreeModel *model;
		guint max_items;
		struct result_data *data;
		bool stopped;

		stopped = search_gui_start_massive_update(search);

		model = gtk_tree_view_get_model(GTK_TREE_VIEW(search->tree));
		max_items = search_gui_is_sorted(search) ? 100 : 500;

		while (max_items-- > 0 && NULL != (data = slist_shift(search->queue))) {
			search_gui_flush_queue_data(search, model, data);
		}

		if (stopped)
			search_gui_end_massive_update(search);
	}
}
Exemplo n.º 2
0
static void
search_gui_clear_queue(search_t *search)
{
	struct result_data *rd;

	while (NULL != (rd = slist_shift(search->queue))) {
		result_data_free(search, rd);
	}
}
Exemplo n.º 3
0
/**
 * Free all items from the pmsg list, keeping the list container.
 */
void
pmsg_slist_discard_all(slist_t *slist)
{
	pmsg_t *mb;

	while ((mb = slist_shift(slist)) != NULL) {
		pmsg_free(mb);
	}
}
Exemplo n.º 4
0
/**
 * Enqueue message, which becomes owned by the queue.
 *
 * The data held in `to' is copied, so the structure can be reclaimed
 * immediately by the caller.
 */
void
mq_udp_putq(mqueue_t *q, pmsg_t *mb, const gnet_host_t *to)
{
	size_t size;
	char *mbs;
	uint8 function;
	pmsg_t *mbe = NULL;		/* Extended message with destination info */
	bool error = FALSE;

	mq_check_consistency(q);

	dump_tx_udp_packet(to, mb);

again:
	mq_check_consistency(q);
	g_assert(mb);
	g_assert(!pmsg_was_sent(mb));
	g_assert(pmsg_is_unread(mb));
	g_assert(q->ops == &mq_udp_ops);	/* Is an UDP queue */

	/*
	 * Trap messages enqueued whilst in the middle of an mq_clear() operation
	 * by marking them as sent and dropping them.  Idem if queue was
	 * put in "discard" mode.
	 */

	if (q->flags & (MQ_CLEAR | MQ_DISCARD)) {
		pmsg_mark_sent(mb);	/* Let them think it was sent */
		pmsg_free(mb);		/* Drop message */
		return;
	}

	mq_check(q, 0);

	size = pmsg_size(mb);

	if (size == 0) {
		g_carp("%s: called with empty message", G_STRFUNC);
		goto cleanup;
	}

	/*
	 * Protect against recursion: we must not invoke puthere() whilst in
	 * the middle of another putq() or we would corrupt the qlink array:
	 * Messages received during recursion are inserted into the qwait list
	 * and will be stuffed back into the queue when the initial putq() ends.
	 *		--RAM, 2006-12-29
	 */

	if (q->putq_entered > 0) {
		pmsg_t *extended;

		if (debugging(20))
			g_warning("%s: %s recursion detected (%u already pending)",
				G_STRFUNC, mq_info(q), slist_length(q->qwait));

		/*
		 * We insert extended messages into the waiting queue since we need
		 * the destination information as well.
		 */

		extended = mq_udp_attach_metadata(mb, to);
		slist_append(q->qwait, extended);
		return;
	}
	q->putq_entered++;

	mbs = pmsg_start(mb);
	function = gmsg_function(mbs);

	gnet_stats_count_queued(q->node, function, mbs, size);

	/*
	 * If queue is empty, attempt a write immediatly.
	 */

	if (q->qhead == NULL) {
		ssize_t written;

		if (pmsg_check(mb, q)) {
			written = tx_sendto(q->tx_drv, mb, to);
		} else {
			gnet_stats_count_flowc(mbs, FALSE);
			node_inc_txdrop(q->node);		/* Dropped during TX */
			written = (ssize_t) -1;
		}

		if ((ssize_t) -1 == written)
			goto cleanup;

		node_add_tx_given(q->node, written);

		if ((size_t) written == size) {
			if (GNET_PROPERTY(mq_udp_debug) > 5)
				g_debug("MQ UDP sent %s",
					gmsg_infostr_full(pmsg_start(mb), pmsg_written_size(mb)));

			goto cleanup;
		}

		/*
		 * Since UDP respects write boundaries, the following can never
		 * happen in practice: either we write the whole datagram, or none
		 * of it.
		 */

		if (written > 0) {
			g_warning(
				"partial UDP write (%zu bytes) to %s for %zu-byte datagram",
				written, gnet_host_to_string(to), size);
			goto cleanup;
		}

		/* FALL THROUGH */
	}

	if (GNET_PROPERTY(mq_udp_debug) > 5)
		g_debug("MQ UDP queued %s",
			gmsg_infostr_full(pmsg_start(mb), pmsg_written_size(mb)));

	/*
	 * Attach the destination information as metadata to the message, unless
	 * it is already known (possible only during unfolding of the queued data
	 * during re-entrant calls).
	 *
	 * This is later extracted via pmsg_get_metadata() on the extended
	 * message by the message queue to get the destination information.
	 *
	 * Then enqueue the extended message.
	 */

	if (NULL == mbe)
		mbe = mq_udp_attach_metadata(mb, to);

	q->cops->puthere(q, mbe, size);
	mb = NULL;

	/* FALL THROUGH */

cleanup:

	if (mb) {
		pmsg_free(mb);
		mb = NULL;
	}

	/*
	 * When reaching that point with a zero putq_entered counter, it means
	 * we triggered an early error condition.  Bail out.
	 */

	g_assert(q->putq_entered >= 0);

	if (q->putq_entered == 0)
		error = TRUE;
	else
		q->putq_entered--;

	mq_check(q, 0);

	/*
	 * If we're exiting here with no other putq() registered, then we must
	 * pop an item off the head of the list and iterate again.
	 */

	if (0 == q->putq_entered && !error) {
		mbe = slist_shift(q->qwait);
		if (mbe) {
			struct mq_udp_info *mi = pmsg_get_metadata(mbe);

			mb = mbe;		/* An extended message "is-a" message */
			to = &mi->to;

			if (debugging(20))
				g_warning(
					"%s: %s flushing waiting to %s (%u still pending)",
					G_STRFUNC, mq_info(q), gnet_host_to_string(to),
					slist_length(q->qwait));

			goto again;
		}
	}

	return;
}
Exemplo n.º 5
0
/**
 * Get a new index in the cache, and update LRU data structures.
 *
 * @param db	the database
 * @param num	page number in the DB for which we want a cache index
 *
 *
 * @return -1 on error, or the allocated cache index.
 */
static int
getidx(DBM *db, long num)
{
	struct lru_cache *cache = db->cache;
	long n;		/* Cache index */

	/*
	 * If we invalidated pages, reuse their indices.
	 * If we have not used all the pages yet, get the next one.
	 * Otherwise, use the least-recently requested page.
	 */

	if (slist_length(cache->available)) {
		void *v = slist_shift(cache->available);
		n = pointer_to_int(v);
		g_assert(n >= 0 && n < cache->pages);
		g_assert(!cache->dirty[n]);
		g_assert(-1 == cache->numpag[n]);
		hash_list_prepend(cache->used, int_to_pointer(n));
	} else if (cache->next < cache->pages) {
		n = cache->next++;
		cache->dirty[n] = FALSE;
		hash_list_prepend(cache->used, int_to_pointer(n));
	} else {
		void *last = hash_list_tail(cache->used);
		long oldnum;
		gboolean had_ioerr = booleanize(db->flags & DBM_IOERR_W);

		hash_list_moveto_head(cache->used, last);
		n = pointer_to_int(last);

		/*
		 * This page is no longer cached as its cache index is being reused
		 * Flush it to disk if dirty before discarding it.
		 */

		g_assert(n >= 0 && n < cache->pages);

		oldnum = cache->numpag[n];

		if (cache->dirty[n] && !writebuf(db, oldnum, n)) {
			hash_list_iter_t *iter;
			void *item;
			gboolean found = FALSE;

			/*
			 * Cannot flush dirty page now, probably because we ran out of
			 * disk space.  Look through the cache whether we can reuse a
			 * non-dirty page instead, which would let us keep the dirty
			 * page a little longer in the cache, in the hope it can then
			 * be properly flushed later.
			 */

			iter = hash_list_iterator_tail(cache->used);

			while (NULL != (item = hash_list_iter_previous(iter))) {
				long i = pointer_to_int(item);

				g_assert(i >= 0 && i < cache->pages);

				if (!cache->dirty[i]) {
					found = TRUE;	/* OK, reuse cache slot #i then */
					n = i;
					oldnum = cache->numpag[i];
					break;
				}
			}

			hash_list_iter_release(&iter);

			if (found) {
				g_assert(item != NULL);
				hash_list_moveto_head(cache->used, item);

				/*
				 * Clear error condition if we had none prior to the flush
				 * attempt, since we can do without it for now.
				 */

				if (!had_ioerr)
					db->flags &= ~DBM_IOERR_W;

				g_warning("sdbm: \"%s\": "
					"reusing cache slot used by clean page #%ld instead",
					sdbm_name(db), oldnum);
			} else {
				g_warning("sdbm: \"%s\": cannot discard dirty page #%ld",
					sdbm_name(db), oldnum);
				return -1;
			}
		}

		g_hash_table_remove(cache->pagnum, ulong_to_pointer(oldnum));
		cache->dirty[n] = FALSE;
	}

	/*
	 * Record the association between the cache index and the page number.
	 */

	g_assert(n >= 0 && n < cache->pages);

	cache->numpag[n] = num;
	g_hash_table_insert(cache->pagnum,
		ulong_to_pointer(num), int_to_pointer(n));

	return n;
}