Esempio n. 1
0
static void
ctl_refresh(void)
{
	unsigned i;
	VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);

	if (config_stats) {
		malloc_mutex_lock(&chunks_mtx);
		ctl_stats.chunks.current = stats_chunks.curchunks;
		ctl_stats.chunks.total = stats_chunks.nchunks;
		ctl_stats.chunks.high = stats_chunks.highchunks;
		malloc_mutex_unlock(&chunks_mtx);

		malloc_mutex_lock(&huge_mtx);
		ctl_stats.huge.allocated = huge_allocated;
		ctl_stats.huge.nmalloc = huge_nmalloc;
		ctl_stats.huge.ndalloc = huge_ndalloc;
		malloc_mutex_unlock(&huge_mtx);
	}

	/*
	 * Clear sum stats, since they will be merged into by
	 * ctl_arena_refresh().
	 */
	ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
	ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);

	malloc_mutex_lock(&arenas_lock);
	memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
	for (i = 0; i < ctl_stats.narenas; i++) {
		if (arenas[i] != NULL)
			ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
		else
			ctl_stats.arenas[i].nthreads = 0;
	}
	malloc_mutex_unlock(&arenas_lock);
	for (i = 0; i < ctl_stats.narenas; i++) {
		bool initialized = (tarenas[i] != NULL);

		ctl_stats.arenas[i].initialized = initialized;
		if (initialized)
			ctl_arena_refresh(tarenas[i], i);
	}

	if (config_stats) {
		ctl_stats.allocated =
		    ctl_stats.arenas[ctl_stats.narenas].allocated_small
		    + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
		    + ctl_stats.huge.allocated;
		ctl_stats.active =
		    (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
		    + ctl_stats.huge.allocated;
		ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
	}

	ctl_epoch++;
}
Esempio n. 2
0
static void
ctl_refresh(void)
{
	unsigned i;
	arena_t *tarenas[narenas];

#ifdef JEMALLOC_STATS
	malloc_mutex_lock(&chunks_mtx);
	ctl_stats.chunks.current = stats_chunks.curchunks;
	ctl_stats.chunks.total = stats_chunks.nchunks;
	ctl_stats.chunks.high = stats_chunks.highchunks;
	malloc_mutex_unlock(&chunks_mtx);

	malloc_mutex_lock(&huge_mtx);
	ctl_stats.huge.allocated = huge_allocated;
	ctl_stats.huge.nmalloc = huge_nmalloc;
	ctl_stats.huge.ndalloc = huge_ndalloc;
	malloc_mutex_unlock(&huge_mtx);
#endif

	/*
	 * Clear sum stats, since they will be merged into by
	 * ctl_arena_refresh().
	 */
	ctl_stats.arenas[narenas].nthreads = 0;
	ctl_arena_clear(&ctl_stats.arenas[narenas]);

	malloc_mutex_lock(&arenas_lock);
	memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
	for (i = 0; i < narenas; i++) {
		if (arenas[i] != NULL)
			ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
		else
			ctl_stats.arenas[i].nthreads = 0;
	}
	malloc_mutex_unlock(&arenas_lock);
	for (i = 0; i < narenas; i++) {
		bool initialized = (tarenas[i] != NULL);

		ctl_stats.arenas[i].initialized = initialized;
		if (initialized)
			ctl_arena_refresh(tarenas[i], i);
	}

#ifdef JEMALLOC_STATS
	ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
	    + ctl_stats.arenas[narenas].astats.allocated_large
	    + ctl_stats.huge.allocated;
	ctl_stats.active = (ctl_stats.arenas[narenas].pactive << PAGE_SHIFT)
	    + ctl_stats.huge.allocated;
	ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);

#  ifdef JEMALLOC_SWAP
	malloc_mutex_lock(&swap_mtx);
	ctl_stats.swap_avail = swap_avail;
	malloc_mutex_unlock(&swap_mtx);
#  endif
#endif

	ctl_epoch++;
}