static void
probe_mt65xx_mon_manual_tracepoint(void *ignore, unsigned int manual_start)
{
    struct trace_array_cpu *data;
    unsigned long flags;
    int cpu;
    int pc;
	
    if (unlikely(!mt65xx_mon_ref))
        return;

    if (!mt65xx_mon_enabled || mt65xx_mon_stopped)
        return;

    if((manual_start != 0) && (manual_start != 1))  
        return;

	tracing_record_cmdline(current);
    
    if(manual_start == is_manual_start) //if already started or stopped
        return;
        
    if(manual_start == 1) //for START operation, only enable mt65xx monitor
    {
        mtk_mon->enable();
        return;
    }
    else //for STOP operation. log monitor data into buffer
    {
        pc = preempt_count();
        local_irq_save(flags);
        cpu = raw_smp_processor_id();
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
        data = mt65xx_mon_trace->data[cpu];
#else
        data = per_cpu_ptr(mt65xx_mon_trace->trace_buffer.data, cpu);
#endif
        if (likely(!atomic_read(&data->disabled)))
            tracing_mt65xx_mon_manual_stop(mt65xx_mon_trace, flags, pc);
        local_irq_restore(flags);
    }
}
static void probe_mt65xx_mon_manual_tracepoint(void *ignore, unsigned int manual_start)
{
	struct trace_array_cpu *data;
	unsigned long flags;
	int cpu;
	int pc;

	if (unlikely(!mt65xx_mon_ref))
		return;

	if (!mt65xx_mon_enabled || mt65xx_mon_stopped)
		return;

	if ((manual_start != 0) && (manual_start != 1))
		return;

	tracing_record_cmdline(current);

	if (manual_start == is_manual_start)/* if already started or stopped */
		return;

	if (manual_start == 1) {
		//for START operation, only enable mt65xx monitor
		mtk_mon->enable();
		return;
	} else {
		//for STOP operation. log monitor data into buffer
		pc = preempt_count();
		local_irq_save(flags);
		cpu = raw_smp_processor_id();
		data = mt65xx_mon_trace->data[cpu];
		if (likely(!atomic_read(&data->disabled)))
			tracing_mt65xx_mon_manual_stop(mt65xx_mon_trace, flags, pc);
		local_irq_restore(flags);
	}
}