// Sampling only a-mode registers
void perfctr_cpu_sample(struct perfctr_cpu_state *state)
{
    unsigned int i, cstatus, nractrs;
    struct perfctr_low_ctrs now;
	int   cpu_id;

	cpu_id = hard_smp_processor_id() / 4;
	spin_lock (&pmc_resource[cpu_id].lock);
	if ( pmc_resource[cpu_id].current_thread != netlogic_thr_id() ) {
		// printk (KERN_INFO "PMCounters do not belong to this process[%d]\n", current->pid);
		spin_unlock (&pmc_resource[cpu_id].lock);
		return;
	}
	spin_unlock (&pmc_resource[cpu_id].lock);

    perfctr_cpu_read_counters(state, &now);        // reads only a-mode registers
    cstatus = state->cstatus;
    if (perfctr_cstatus_has_tsc(cstatus)) {
        state->tsc_sum += now.tsc - state->tsc_start;
        // one needs to update the start status as we continue to gather
        // statistics without interruption
        state->tsc_start = now.tsc;
    }
    nractrs = perfctr_cstatus_nractrs(cstatus);
    for(i = 0; i < nractrs; ++i) {
		
        state->pmc[i].sum += now.pmc[i] - state->pmc[i].start;
        state->pmc[i].start = now.pmc[i];
    }
}
inline void perfctr_cpu_suspend(struct perfctr_cpu_state *state)
{
    unsigned int i, cstatus, nractrs;
    struct perfctr_low_ctrs now;
	int   cpu_id;

	cpu_id = hard_smp_processor_id() / 4;
	spin_lock (&pmc_resource[cpu_id].lock);
	if ( pmc_resource[cpu_id].current_thread != netlogic_thr_id() ) {
		// printk (KERN_INFO "PMCounters do not belong to this process[%d]\n", current->pid);
		spin_unlock (&pmc_resource[cpu_id].lock);
		return;
	}
	pmc_resource[cpu_id].current_thread = -1;
	spin_unlock (&pmc_resource[cpu_id].lock);

    // To prevent polluting the numbers, can we freeze the counters
    // here, as early as possible ?

    if (perfctr_cstatus_has_ictrs(state->cstatus)) {
        perfctr_cpu_isuspend(state);
	}
    perfctr_cpu_read_counters(state, &now);
    cstatus = state->cstatus;
    if (perfctr_cstatus_has_tsc(cstatus)) {
        state->tsc_sum += now.tsc - state->tsc_start;
    }
    nractrs = perfctr_cstatus_nractrs(cstatus);
    for(i = 0; i < nractrs; ++i) {
        state->pmc[i].sum += now.pmc[i] - state->pmc[i].start;
    }
}
Beispiel #3
0
static int nlm_common_pmc_owned(void)
{
	int cpu_id ;
	unsigned long flags;
	int thr_id;

	/* Allow only thread0 in each core to set perfcounter events */
	spin_lock_irqsave(&nlm_common_perf_lock, flags);
	thr_id = netlogic_thr_id();
	if(thr_id) {
		spin_unlock_irqrestore(&nlm_common_perf_lock, flags);
		return 1;
	}
	cpu_id = netlogic_cpu_id();
	if(nlm_common_perf_core_setup[cpu_id] == -1) {
		nlm_common_perf_core_setup[cpu_id] = hard_smp_processor_id();
		spin_unlock_irqrestore(&nlm_common_perf_lock, flags);
		return 0;
	}
	if(nlm_common_perf_core_setup[cpu_id] == hard_smp_processor_id()) {
		spin_unlock_irqrestore(&nlm_common_perf_lock, flags);
		return 0;
	}
	spin_unlock_irqrestore(&nlm_common_perf_lock, flags);
	return 1;
}
inline void perfctr_cpu_resume(struct perfctr_cpu_state *state)
{
	int   cpu_id;

	cpu_id = hard_smp_processor_id() / 4;
	spin_lock (&pmc_resource[cpu_id].lock);
	if ( pmc_resource[cpu_id].current_thread != -1 ) {
		// printk (KERN_INFO "PMCounters unavailable for process %d\n", current->pid);
		spin_unlock (&pmc_resource[cpu_id].lock);
		return;
	}
	pmc_resource[cpu_id].current_thread = netlogic_thr_id();
	spin_unlock (&pmc_resource[cpu_id].lock);

    if (perfctr_cstatus_has_ictrs(state->cstatus)) {
        perfctr_cpu_iresume(state);
	}

    // the counters are triggered, having been frozen in _iresume()
    // that preceded this point. So, the model is to trigger the
    // registere to collect the numbers and record the start state
    // that completes the 'resume' process.

    perfctr_cpu_write_control(state);
    {
        struct perfctr_low_ctrs now;
        unsigned int i, cstatus, nrctrs;
        perfctr_cpu_read_counters(state, &now);
        cstatus = state->cstatus;

        // the start state of the registers has to be recorded only
        // in resume() and that is what is being done.

        if (perfctr_cstatus_has_tsc(cstatus)) {
            state->tsc_start = now.tsc;
		}
        nrctrs = perfctr_cstatus_nractrs(cstatus);
        for (i = 0; i < nrctrs; ++i) {
            state->pmc[i].start = now.pmc[i];
		}
    }
    /* XXX: if (SMP && start.tsc == now.tsc) ++now.tsc; */
}
static void mips_write_control(const struct perfctr_cpu_state *state)
{
    struct per_cpu_cache *cache;
    unsigned int nrctrs, i;

    // cache stores the information pertaining to one id. Under
    // what conditions does that cache state remain intact? Can some
    // processes tell that their statistics be not recorded. In such
    // a case when a thread is rescheuldes on the same processpor
    // without the intervening thread recording the statistics, then
    // the cache will be hot

    cache = get_cpu_cache();
    if (cache->k1.id == state->k1.id) {
        return;
    }
    nrctrs = perfctr_cstatus_nrctrs(state->cstatus);

	preempt_disable();
    for (i = 0; i < nrctrs; ++i) {
        unsigned int ctrl_reg = state->control.pmc[i].ctrl_reg;
        unsigned int pmc = state->pmc[i].map;    // assuming that the 'state' values have been
                                                 // updated from control values specified by users
        if (ctrl_reg != cache->ctrl_regs[pmc]) {
			if (!perfctr_cntmode) {
				MIPS_XLR_UNSET_CNT_ALL_THREADS(ctrl_reg);
				MIPS_XLR_SET_THREADID(ctrl_reg, netlogic_thr_id());
			}
			else {
				MIPS_XLR_SET_CNT_ALL_THREADS(ctrl_reg);
			}
            cache->ctrl_regs[pmc] = ctrl_reg;
            write_pmctrl(pmc, ctrl_reg);
        }
    }
    cache->k1.id = state->k1.id;
	preempt_enable();
}