void mt_trace_ISR_end(int irq) { struct sched_block_event *b; #ifdef CONFIG_MTK_SCHED_TRACERS struct task_struct *tsk= __raw_get_cpu_var(mtk_next_task); if (unlikely(!sched_stopped)) trace_int_switch(tsk, irq, 0); __mt_irq_exit(irq); #endif b = & __raw_get_cpu_var(ISR_mon); WARN_ON(b->cur_event != irq); b->last_event = b->cur_event; b->last_ts = b->cur_ts; b->last_te = sched_clock(); b->cur_event = 0; b->cur_ts = 0; event_duration_check(b); aee_rr_rec_last_irq_exit(smp_processor_id(), irq, b->last_te); //reset HRTimer function counter b = & __raw_get_cpu_var(hrt_mon); reset_event_count(b); }
void mt_trace_sft_end(void *func) { struct sched_block_event *b; b = & __raw_get_cpu_var(sft_mon); WARN_ON(b->cur_event != (unsigned long)func); b->last_event = b->cur_event; b->last_ts = b->cur_ts; b->last_te = sched_clock(); b->cur_event = 0; b->cur_ts = 0; event_duration_check(b); }
void mt_trace_ISR_end(int irq) { struct sched_block_event *b; b = & __raw_get_cpu_var(ISR_mon); WARN_ON(b->cur_event != irq); b->last_event = b->cur_event; b->last_ts = b->cur_ts; b->last_te = sched_clock(); b->cur_event = 0; b->cur_ts = 0; event_duration_check(b); aee_rr_rec_last_irq_exit(smp_processor_id(), irq, b->last_te); //reset HRTimer function counter b = & __raw_get_cpu_var(hrt_mon); reset_event_count(b); }
void mt_trace_SoftIRQ_end(int sq_num) { struct sched_block_event *b; b = & __raw_get_cpu_var(SoftIRQ_mon); WARN_ON(b->cur_event != sq_num); b->last_event = b->cur_event; b->last_ts = b->cur_ts; b->last_te = sched_clock(); b->cur_event = 0; b->cur_ts = 0; event_duration_check(b); //reset soft timer function counter b = & __raw_get_cpu_var(sft_mon); reset_event_count(b); //reset tasklet function counter b = & __raw_get_cpu_var(tasklet_mon); reset_event_count(b); }