/* Call perfctr_cpu_ireload() just before perfctr_cpu_resume() to
   bypass internal caching and force a reload if the I-mode PMCs. */
void perfctr_cpu_ireload(struct perfctr_cpu_state *state)
{
#ifdef CONFIG_SMP
    clear_isuspend_cpu(state);
#else
    get_cpu_cache()->k1.id = 0;
#endif
}
static void perfctr_cpu_clear_counters(void)
{
    struct per_cpu_cache *cache;

    cache = get_cpu_cache();
    memset(cache, 0, sizeof *cache);
    cache->k1.id = -1;

    mips_clear_counters();
}
static void mips_write_control(const struct perfctr_cpu_state *state)
{
    struct per_cpu_cache *cache;
    unsigned int nrctrs, i;

    // cache stores the information pertaining to one id. Under
    // what conditions does that cache state remain intact? Can some
    // processes tell that their statistics be not recorded. In such
    // a case when a thread is rescheuldes on the same processpor
    // without the intervening thread recording the statistics, then
    // the cache will be hot

    cache = get_cpu_cache();
    if (cache->k1.id == state->k1.id) {
        return;
    }
    nrctrs = perfctr_cstatus_nrctrs(state->cstatus);

	preempt_disable();
    for (i = 0; i < nrctrs; ++i) {
        unsigned int ctrl_reg = state->control.pmc[i].ctrl_reg;
        unsigned int pmc = state->pmc[i].map;    // assuming that the 'state' values have been
                                                 // updated from control values specified by users
        if (ctrl_reg != cache->ctrl_regs[pmc]) {
			if (!perfctr_cntmode) {
				MIPS_XLR_UNSET_CNT_ALL_THREADS(ctrl_reg);
				MIPS_XLR_SET_THREADID(ctrl_reg, netlogic_thr_id());
			}
			else {
				MIPS_XLR_SET_CNT_ALL_THREADS(ctrl_reg);
			}
            cache->ctrl_regs[pmc] = ctrl_reg;
            write_pmctrl(pmc, ctrl_reg);
        }
    }
    cache->k1.id = state->k1.id;
	preempt_enable();
}
Beispiel #4
0
/*
 *  stress_cache_alloc()
 *	allocate shared cache buffer
 */
int stress_cache_alloc(const char *name)
{
#if defined(__linux__)
	cpus_t *cpu_caches = NULL;
	cpu_cache_t *cache = NULL;
	uint16_t max_cache_level = 0;
#endif

#if !defined(__linux__)
	shared->mem_cache_size = MEM_CACHE_SIZE;
#else
	cpu_caches = get_all_cpu_cache_details();
	if (!cpu_caches) {
		pr_inf(stderr, "%s: using built-in defaults as unable to "
			"determine cache details\n", name);
		shared->mem_cache_size = MEM_CACHE_SIZE;
		goto init_done;
	}

	max_cache_level = get_max_cache_level(cpu_caches);

	if (shared->mem_cache_level > max_cache_level) {
		pr_dbg(stderr, "%s: reducing cache level from L%d (too high) "
			"to L%d\n", name,
			shared->mem_cache_level, max_cache_level);
		shared->mem_cache_level = max_cache_level;
	}

	cache = get_cpu_cache(cpu_caches, shared->mem_cache_level);
	if (!cache) {
		pr_inf(stderr, "%s: using built-in defaults as no suitable "
			"cache found\n", name);
		shared->mem_cache_size = MEM_CACHE_SIZE;
		goto init_done;
	}

	if (shared->mem_cache_ways > 0) {
		uint64_t way_size;

		if (shared->mem_cache_ways > cache->ways) {
			pr_inf(stderr, "%s: cache way value too high - "
				"defaulting to %d (the maximum)\n",
				name, cache->ways);
			shared->mem_cache_ways = cache->ways;
		}

		way_size = cache->size / cache->ways;

		/* only fill the specified number of cache ways */
		shared->mem_cache_size = way_size * shared->mem_cache_ways;
	} else {
		/* fill the entire cache */
		shared->mem_cache_size = cache->size;
	}

	if (!shared->mem_cache_size) {
		pr_inf(stderr, "%s: using built-in defaults as unable to "
			"determine cache size\n", name);
		shared->mem_cache_size = MEM_CACHE_SIZE;
	}
init_done:
	free_cpu_caches(cpu_caches);
#endif
	shared->mem_cache = calloc(shared->mem_cache_size, 1);
	if (!shared->mem_cache) {
		pr_err(stderr, "%s: failed to allocate shared cache buffer\n",
			name);
		return -1;
	}
	pr_inf(stderr, "%s: default cache size: %" PRIu64 "K\n",
		name, shared->mem_cache_size / 1024);

	return 0;
}