Пример #1
0
static irqreturn_t px_timer_isr(unsigned int pid,
                                unsigned int tid,
                                struct pt_regs * const regs,
                                unsigned int cpu,
                                unsigned long long ts)
{
	bool buffer_full = false;

	if (tbs_running)
	{
		char ** bt_buffer = &per_cpu(g_bt_buffer, cpu);
		PXD32_CSS_Call_Stack_V2 *css_data = (PXD32_CSS_Call_Stack_V2 *)*bt_buffer;

		backtrace(regs, cpu, pid, tid, css_data);

		fill_css_data_head(css_data, pid, tid, COUNTER_A9_OS_TIMER, ts);

		buffer_full = write_css_data(cpu, css_data);

		if (buffer_full)
		{
			tbs_running = false;
		}
		else
		{
			tbs_running = true;
		}
	}

	return IRQ_HANDLED;
}
Пример #2
0
static irqreturn_t px_timer_isr(struct pt_regs * const regs,
                                unsigned int pid,
                                unsigned int tid,
                                unsigned int cpu,
                                unsigned long long ts)
{
	bool buffer_full = false;

	if (OSSR & OSSR_PX_INTERRUPT)
	{
		char ** bt_buffer = &per_cpu(g_bt_buffer, cpu);
		PXD32_CSS_Call_Stack_V2 *css_data = (PXD32_CSS_Call_Stack_V2 *)*bt_buffer;

		timer_int_count++;

		/* disable the counter */
		OMCR5 &= ~0x7;

		/* clear the interrupt flag */
		OSSR = OSSR_PX_INTERRUPT;

		/* write call stack record to sample buffer */
		//buffer_full = backtrace(regs, pid, tid, COUNTER_PXA2_OS_TIMER, cpu, ts);
		backtrace(regs, cpu, pid, tid, css_data);
		
		fill_css_data_head(css_data, pid, tid, COUNTER_PXA2_OS_TIMER, ts);

		buffer_full = write_css_data(cpu, css_data);

		if (buffer_full)
		{
			/* if sample buffer is full, pause sampling */
			OMCR5 &= ~0x7;
		}
		else
		{
			/* enable the counter */
			OMCR5 |= OMCR_FREQ_32K;
		}

		return IRQ_HANDLED;
	}

	return IRQ_NONE;
}
Пример #3
0
static irqreturn_t px_pmu_isr(struct pt_regs * const regs,
                              unsigned int pid,
                              unsigned int tid,
                              unsigned int cpu,
                              unsigned long long ts)
{
	u32 pmnc_value;
	u32 flag_value;
	unsigned int i;
	unsigned int reg_id;
	bool buffer_full = false;

	char ** bt_buffer = &per_cpu(g_bt_buffer, cpu);
	PXD32_CSS_Call_Stack_V2 *css_data = (PXD32_CSS_Call_Stack_V2 *)*bt_buffer;

	/* disable the counters */
	pmnc_value = PJ4_Read_PMNC();
	pmnc_value &= ~0x1;
	PJ4_Write_PMNC(pmnc_value);

	/* clear the overflow flag */
	flag_value = PJ4_Read_FLAG();
	PJ4_Write_FLAG(0xffffffff);
	
	if (flag_value != 0)
		backtrace(regs, cpu, pid, tid, css_data);

	if ((flag_value & 0x80000000) && es[COUNTER_PJ4_PMU_CCNT].enabled)
	{
		reg_id = COUNTER_PJ4_PMU_CCNT;

		/* ccnt overflow */
		if (es[reg_id].calibration == false)
		{
			/* write css data in non-calibration mode */
			if (!buffer_full)
			{
				fill_css_data_head(css_data, pid, tid, reg_id, ts);
				buffer_full |= write_css_data(cpu, css_data);
			}
		}
		else
		{
			/* calculate the overflow count in calibration mode */
			es[reg_id].overflow++;
		}

		PJ4_WritePMUCounter(reg_id, es[reg_id].reset_value);
	}

	for (i=0; i<PJ4_PMN_NUM; i++)
	{
		if (flag_value & (0x1 << i))
		{
			switch (i)
			{
			case 0:	reg_id = COUNTER_PJ4_PMU_PMN0; break;
			case 1:	reg_id = COUNTER_PJ4_PMU_PMN1; break;
			case 2:	reg_id = COUNTER_PJ4_PMU_PMN2; break;
			case 3:	reg_id = COUNTER_PJ4_PMU_PMN3; break;
			default: break;
			}

			if (es[reg_id].calibration == false)
			{
				/* write css data in non-calibration mode */
				if (!buffer_full)
				{
					fill_css_data_head(css_data, pid, tid, reg_id, ts);
					buffer_full |= write_css_data(cpu, css_data);
				}
			}
			else
			{
				/* calculate the overflow count in calibration mode */
				es[reg_id].overflow++;
			}

			PJ4_WritePMUCounter(reg_id, es[reg_id].reset_value);
		}

	}

	if (!buffer_full)
	{
		/* enable the counters */
		pmnc_value |= 0x1;
		PJ4_Write_PMNC(pmnc_value);
	}

	return IRQ_HANDLED;
}
Пример #4
0
static irqreturn_t px_pmu_isr(struct pt_regs * const regs,
                              unsigned int pid,
                              unsigned int tid,
                              unsigned int cpu,
                              unsigned long long ts)
{
	unsigned int cor_value[PJ1_PMN_NUM];
	unsigned int flag_value;
	unsigned int i;
	unsigned int reg = 0;
	unsigned int reg_id = 0;
	bool buffer_full = false;
	char ** bt_buffer = &per_cpu(g_bt_buffer, cpu);
	PXD32_CSS_Call_Stack_V2 *css_data = (PXD32_CSS_Call_Stack_V2 *)*bt_buffer;

	/* disable the counters */
	cor_value[0] = PJ1_ReadCOR(PJ1_PMU_COR0);
	cor_value[1] = PJ1_ReadCOR(PJ1_PMU_COR1);
	cor_value[2] = PJ1_ReadCOR(PJ1_PMU_COR2);
	cor_value[3] = PJ1_ReadCOR(PJ1_PMU_COR3);

	for (i=0; i<PJ1_PMN_NUM; i++)
	{
		cor_value[i] &= ~0x1;
	}

	PJ1_WriteCOR(PJ1_PMU_COR0, cor_value[0]);
	PJ1_WriteCOR(PJ1_PMU_COR1, cor_value[1]);
	PJ1_WriteCOR(PJ1_PMU_COR2, cor_value[2]);
	PJ1_WriteCOR(PJ1_PMU_COR3, cor_value[3]);

	/* clear the overflow flag */
	flag_value = PJ1_ReadFLAG();
	PJ1_WriteFLAG(0xf);

	backtrace(regs, cpu, pid, tid, css_data);

	for (i=0; i<PJ1_PMN_NUM; i++)
	{
		if (flag_value & (0x1 << i))
		{
			switch (i)
			{
			case 0:	reg_id = COUNTER_PJ1_PMU_PMN0; reg = PJ1_PMU_PMN0; break;
			case 1:	reg_id = COUNTER_PJ1_PMU_PMN1; reg = PJ1_PMU_PMN1; break;
			case 2:	reg_id = COUNTER_PJ1_PMU_PMN2; reg = PJ1_PMU_PMN2; break;
			case 3:	reg_id = COUNTER_PJ1_PMU_PMN3; reg = PJ1_PMU_PMN3; break;
			default: break;
			}

			if (es[reg_id].calibration == false)
			{
				/* get css data in non-calibration mode */
				//buffer_full |= backtrace(regs, pid, tid, reg_id, cpu, ts);
				if (!buffer_full)
				{
					fill_css_data_head(css_data, pid, tid, reg_id, ts);

					buffer_full |= write_css_data(cpu, css_data);
				}
			}
			else
			{
				/* calculate the overflow count in calibration mode */
				es[reg_id].overflow++;
			}

			PJ1_WriteCounter(reg, es[reg_id].reset_value);
		}

	}

	if (!buffer_full)
	{
		/* enable the counters if sample buffer is not full */
		cor_value[0] = PJ1_ReadCOR(PJ1_PMU_COR0);
		cor_value[1] = PJ1_ReadCOR(PJ1_PMU_COR1);
		cor_value[2] = PJ1_ReadCOR(PJ1_PMU_COR2);
		cor_value[3] = PJ1_ReadCOR(PJ1_PMU_COR3);

		for (i=0; i<PJ1_PMN_NUM; i++)
		{
			cor_value[i] |= 0x1;
		}

		PJ1_WriteCOR(PJ1_PMU_COR0, cor_value[0]);
		PJ1_WriteCOR(PJ1_PMU_COR1, cor_value[1]);
		PJ1_WriteCOR(PJ1_PMU_COR2, cor_value[2]);
		PJ1_WriteCOR(PJ1_PMU_COR3, cor_value[3]);
	}

	return IRQ_HANDLED;
}
Пример #5
0
static irqreturn_t px_pmu_isr(struct pt_regs * const regs,
                              unsigned int pid,
                              unsigned int tid,
                              unsigned int cpu,
                              unsigned long long ts)
{
	int i;
	int reg = 0;
	unsigned long flag_value;
	unsigned long pmnc_value;
	bool buffer_full = false;
	char ** bt_buffer = &per_cpu(g_bt_buffer, cpu);
	PXD32_CSS_Call_Stack_V2 *css_data = (PXD32_CSS_Call_Stack_V2 *)*bt_buffer;

	/* disable the counters */
	pmnc_value = ReadPMUReg(PXA2_PMU_PMNC);
	WritePMUReg(PXA2_PMU_PMNC, pmnc_value & ~PMNC_ENABLE_BIT);

	/* clear the overflow flag */
	flag_value = ReadPMUReg(PXA2_PMU_FLAG);
	WritePMUReg(PXA2_PMU_FLAG, FLAG_OVERFLOW_BITS);

	backtrace(regs, cpu, pid, tid, css_data);

	/* write sample record to sample buffer */
	if (flag_value & CCNT_OVERFLAG_BIT && es[COUNTER_PXA2_PMU_CCNT].enabled)
	{
		if (es[COUNTER_PXA2_PMU_CCNT].calibration == false)
		{
			/* write sample record in non-calibration mode */
			//buffer_full |= backtrace(regs, pid, tid, COUNTER_PXA2_PMU_CCNT, cpu, ts);
			if (!buffer_full)
			{
				fill_css_data_head(css_data, pid, tid, COUNTER_PXA2_PMU_CCNT, ts);

				buffer_full |= write_css_data(cpu, css_data);
			}
		}
		else
		{
			/* calculate the overflow count in calibration mode */
			es[COUNTER_PXA2_PMU_CCNT].overflow++;
		}

		/* reset the counter value */
		WritePMUReg(PXA2_PMU_CCNT, es[COUNTER_PXA2_PMU_CCNT].reset_value);
	}

	for (i=0; i<PXA2_PMN_NUM; i++)
	{
		if (flag_value & (PMN0_OVERFLAG_BIT << i))
		{
			unsigned int reg_id = 0;

			switch (i)
			{
			case 0: reg_id = COUNTER_PXA2_PMU_PMN0; reg = PXA2_PMU_PMN0; break;
			case 1: reg_id = COUNTER_PXA2_PMU_PMN1; reg = PXA2_PMU_PMN1; break;
			case 2: reg_id = COUNTER_PXA2_PMU_PMN2; reg = PXA2_PMU_PMN2; break;
			case 3: reg_id = COUNTER_PXA2_PMU_PMN3; reg = PXA2_PMU_PMN3; break;
			default: break;
			}

			if (es[reg_id].calibration == false)
			{
				/* write sample record in non-calibration mode */
				//buffer_full |= backtrace(regs, pid, tid, reg_id, cpu, ts);
				if (!buffer_full)
				{
					fill_css_data_head(css_data, pid, tid, reg_id, ts);

					buffer_full |= write_css_data(cpu, css_data);
				}
			}
			else
			{
				/* calculate the overflow count in calibration mode */
				es[reg_id].overflow++;
			}

			/* reset the counter value */
			WritePMUReg(reg, es[reg_id].reset_value);
		}
	}

	if (!buffer_full)
	{
		/* enable the counters if buffer is not full */
		WritePMUReg(PXA2_PMU_PMNC, pmnc_value | PMNC_ENABLE_BIT);
	}

	return IRQ_HANDLED;
}