Esempio n. 1
0
/**
 * Display the data gathered during the last time period.
 * Perhaps it would be better to have this done on a button click(?)
 */
static void
search_stats_gui_update_display(void)
{
    gboolean sorting_disabled;
    tm_t start_time, end_time;
    time_delta_t elapsed;

    stat_count = 0;
    g_object_freeze_notify(G_OBJECT(treeview_search_stats));
    gtk_list_store_clear(store_search_stats);

    /*
     * Temporarily disable sorting while inserting the updated table.
     * Otherwise, CPU is overloaded with sorting every addition
     *  to the hash table.
     */
    sorting_disabled = FALSE;
    tm_now_exact(&start_time);
    if (store_search_stats->sort_column_id >= 0) {
        sorting_disabled = TRUE;
        search_stats_gui_sort_save();
    }
    /* insert the hash table contents into the sorted treeview */
    htable_foreach_remove(stat_hash, stats_hash_to_treeview, NULL);

    tm_now_exact(&end_time);
    elapsed = tm_elapsed_ms(&end_time, &start_time);

    /*
     * Re-enable sorting if previously disabled.
     * If too much time has elapsed, leave sorting disabled.
     */
    if (sorting_disabled && elapsed < 100) {
        search_stats_gui_sort_restore();
    } else if (!sorting_disabled && elapsed > 200) {
        /*
         * If sorting is disabled, and too much time is still elapsing,
         * then the search stats collection will need to be
         * discontinued
         */
        search_stats_gui_reset();
        search_stats_gui_disable();
        search_stats_gui_overload = TRUE;
    }

    if (search_stats_gui_overload) {
        /* update status bar message */
        gtk_label_set_text(GTK_LABEL(label_search_stats_count),
                           "Disabling Search Stats due to system load" );
    } else {
        /* update the status bar counter */
        gtk_label_printf(GTK_LABEL(label_search_stats_count),
                         NG_("%u term counted", "%u terms counted", stat_count),
                         stat_count);
    }
    g_object_thaw_notify(G_OBJECT(treeview_search_stats));
}
Esempio n. 2
0
/**
 * Called every period to heartbeat the callout queue.
 */
static void
cq_heartbeat(cqueue_t *cq)
{
	tm_t tv;
	time_delta_t delay;

	cqueue_check(cq);

	/*
	 * How much milliseconds elapsed since last heart beat?
	 */

	mutex_lock(&cq->cq_lock);

	tm_now_exact(&tv);
	delay = tm_elapsed_ms(&tv, &cq->cq_last_heartbeat);
	cq->cq_last_heartbeat = tv;		/* struct copy */

	/*
	 * If too much variation, or too little, maybe the clock was adjusted.
	 * Assume a single period then.
	 */

	if (delay < 0 || delay > 10 * cq->cq_period)
		delay = cq->cq_period;

	/*
	 * We hold the mutex when calling cq_clock(), and it will be released there.
	 */

	cq_clock(cq, delay);
}
Esempio n. 3
0
/**
 * Send time synchronization request to specified node.
 *
 * When node_id is non-zero, it refers to the connected node to which
 * we're sending the time synchronization request.
 */
void
tsync_send(struct gnutella_node *n, const struct nid *node_id)
{
	struct tsync *ts;

	g_return_if_fail(n->port != 0);
	if (!NODE_IS_WRITABLE(n))
		return;

	WALLOC(ts);
	ts->magic = TSYNC_MAGIC;
	tm_now_exact(&ts->sent);
	ts->sent.tv_sec = clock_loc2gmt(ts->sent.tv_sec);
	ts->node_id = nid_ref(node_id);
	ts->udp = booleanize(NODE_IS_UDP(n));

	/*
	 * As far as time synchronization goes, we must get the reply within
	 * the next TSYNC_EXPIRE_MS millisecs.
	 */

	ts->expire_ev = cq_main_insert(TSYNC_EXPIRE_MS, tsync_expire, ts);

	hevset_insert(tsync_by_time, ts);

	vmsg_send_time_sync_req(n, GNET_PROPERTY(ntp_detected), &ts->sent);
}
Esempio n. 4
0
/**
 * Resume task execution.
 */
static void
bg_task_resume(struct bgtask *bt)
{
	bg_task_check(bt);
	g_assert(!(bt->flags & TASK_F_RUNNING));

	bg_sched_remove(bt);
	bt->flags |= TASK_F_RUNNING;

	tm_now_exact(&bt->start);
}
Esempio n. 5
0
static double G_COLD
timeit(
	void (*f)(void *, sha1_t *, size_t),
	void *o, sha1_t *keys, size_t count, size_t iter, const char *what,
	bool verbose)
{
	size_t i;
	tm_t start, end;
	double elapsed;

	tm_now_exact(&start);
	for (i = 0; i < iter; i++)
		(*f)(o, keys, count);
	tm_now_exact(&end);
	elapsed = tm_elapsed_f(&end, &start);

	if (verbose)
		g_debug("%s (%zu items, %zu loop%s): %F s (average: %F s)", what,
			count, iter, plural(iter), elapsed, elapsed / iter);

	return elapsed;
}
Esempio n. 6
0
/**
 * Check which of qsort() or xqsort() is best for sorting aligned arrays with
 * a native item size of OPSIZ.
 */
void
vsort_init(int verbose)
{
	tm_t start, end;
	bool blockable = TRUE;

	STATIC_ASSERT(VSORT_HUGE_ITEMS > VSORT_ITEMS);
	STATIC_ASSERT(VSORT_ITEMS > VSORT_SMALL_ITEMS);

	if (verbose)
		s_info("benchmarking sort routines to select the best one...");

	/*
	 * Allow main thread to block during the duration of our tests.
	 * This is needed since tqsort() can create threads and block.
	 */

	if (thread_is_main() && !thread_main_is_blockable()) {
		thread_set_main(TRUE);
		blockable = FALSE;
	}

	tm_now_exact(&start);
	vsort_init_items(VSORT_HUGE_ITEMS, VSORT_HUGE, verbose, "huge");
	vsort_init_items(VSORT_ITEMS, VSORT_LARGE, verbose, "large");
	vsort_init_items(VSORT_SMALL_ITEMS, VSORT_SMALL, verbose, "small");
	tm_now_exact(&end);

	if (verbose)
		s_info("vsort() benchmarking took %F secs", tm_elapsed_f(&end, &start));

	/*
	 * Restore non-blockable main thread if needed.
	 */

	if (!blockable)
		thread_set_main(FALSE);
}
Esempio n. 7
0
/**
 * Time sorting routine.
 *
 * @param f		the function we call and compute the average execution time for
 * @param vt	describes the data we're sorting, given as input to ``f''
 * @param loops	amount of loops to perform, possibly updated (increased)
 *
 * @return real clock-time in seconds for one single iteration.
 */
static double
vsort_timeit(vsort_timer_t f, struct vsort_timing *vt, size_t *loops)
{
	double start, end;
	size_t n = *loops;
	double elapsed = 0.0, telapsed = 0.0;
	uint attempts = 0;
	tm_t tstart, tend;

retry:
	/*
	 * Safety against broken clocks which would stall the process forever if
	 * we were to continue.
	 */

	if (attempts++ >= VSORT_ATTEMPTS) {
		s_critical("%s(): "
			"either CPU is too fast or kernel clock resultion too low: "
			"elapsed time is %F secs after %zu loops",
			G_STRFUNC, elapsed, n);
		goto done;
	}

	/*
	 * This is a pure CPU grinding algorithm, hence we monitor the amount of
	 * CPU used and not the wall clock: if the process gets suspended in the
	 * middle of the test, that would completely taint the results.
	 *
	 * However, in multi-threaded processes, the accounted CPU time is for
	 * the whole process, and this is not fair for tqsort() which uses multiple
	 * threads in order to minimize the overall elapsed time.
	 *
	 * Hence we measure both the CPU time and the wall-clock time and pick
	 * the lowest figure.
	 */

	(*f)(vt, 1);		/* Blank run to offset effect of memory caching */

	tm_now_exact(&tstart);
	tm_cputime(&start, NULL);
	(*f)(vt, n);
	tm_cputime(&end, NULL);
	tm_now_exact(&tend);

	elapsed = end - start;
	telapsed = tm_elapsed_f(&tend, &tstart);

	/*
	 * If the machine is too powerful (or the clock granularity too low),
	 * double the amount of items and retry.
	 */

	if (elapsed < VSORT_MIN_SECS) {
		*loops = n = n * 2;
		goto retry;
	}

	elapsed = MIN(elapsed, telapsed);

done:
	return elapsed / n;
}
Esempio n. 8
0
/**
 * Suspend task.
 *
 * As a side effect, update the tick cost statistics and elapsed time
 * information for the last scheduling period.
 *
 * @param bt		the task to suspend
 * @param target	the runtime target of the task (0 if unknown)
 */
static void
bg_task_suspend(struct bgtask *bt, int target)
{
	tm_t end;
	time_delta_t elapsed;

	bg_task_check(bt);
	g_assert(bt->flags & TASK_F_RUNNING);

	bg_sched_add(bt);
	bt->flags &= ~TASK_F_RUNNING;

	/*
	 * Update task running time.
	 */

	tm_now_exact(&end);
	elapsed = tm_elapsed_us(&end, &bt->start); 

	/*
	 * Compensate any clock adjustment by reusing the previous value we
	 * measured when we last run that task, taking into accound the fact
	 * that the number of ticks used then might have been different.
	 */

	if (elapsed < 0) {			/* Clock adjustment whilst we ran */
		elapsed = bt->elapsed;	/* Adjust value from last run */
		if (bt->prev_ticks != 0)
			elapsed = elapsed * bt->ticks_used / bt->prev_ticks;
	}

	bt->elapsed = elapsed;
	bt->wtime += (elapsed + 500) / 1000;	/* wtime is in ms */
	bt->prev_ticks = bt->ticks_used;

	/*
	 * Now update the tick cost, if elapsed is not null.
	 *
	 * If task is flagged TASK_F_NOTICK, it was scheduled only to deliver
	 * a signal and we cannot really update the tick cost.
	 */

	if (!(bt->flags & TASK_F_NOTICK)) {
		double new_cost;

		/*
		 * If the task spent more than its target, then the tick cost
		 * was severely under-estimated and we compute a new one.
		 * Otherwise, we use a slow EMA to update the tick cost, in order
		 * to smooth variations.
		 */

		if (target != 0 && elapsed > target) {
			if (bg_debug > 4)
				g_message("BGTASK \"%s\" resetting tick_cost", bt->name);
			new_cost = elapsed / bt->ticks_used;
		} else {
			new_cost = (4 * bt->tick_cost + (elapsed / bt->ticks_used)) / 5.0;
		}

		if (bg_debug > 4) {
			g_debug("BGTASK \"%s\" total=%d msecs, "
				"elapsed=%lu usecs (targeted %d), "
				"ticks=%d, used=%d, tick_cost=%f usecs (was %f)",
				bt->name, bt->wtime, (gulong) elapsed, target,
				bt->ticks, bt->ticks_used,
				new_cost, bt->tick_cost);
		}

		bt->tick_cost = new_cost;
	}
}