static irqreturn_t cp_to_ap_irq_handle(int irq, void *handle)
{
	struct ipc_spi_dev *dev = (struct ipc_spi_dev*)handle;

	if(cp2ap_sts())
	{
		//irq_set_irq_type( irq,  IRQF_TRIGGER_LOW);
		dev->rx_ctl = CP2AP_RASING;
		dev->bneedrcv = true;
		/* just for debug */
		dev->cp2ap[dev->irq_num % 100].status = 1;
		dev->cp2ap[dev->irq_num % 100].time = cpu_clock(0);
	}
	else
	{
		//irq_set_irq_type( irq,  IRQF_TRIGGER_HIGH);
		dev->rx_ctl = CP2AP_FALLING;
		dev->bneedrcv = false;
		/* just for debug */
		dev->cp2ap[dev->irq_num % 100].status = 0;
		dev->cp2ap[dev->irq_num % 100].time = cpu_clock(0);
	}
	dev->irq_num++;
	wake_up(&(dev->wait));
	return IRQ_HANDLED;
}
Example #2
0
static irqreturn_t fastlogo_devices_vpp_isr(int irq, void *dev_id)
{
    int instat;
    HDL_semaphore *pSemHandle;
    u64 cpcb0_isr_time_current;

    ++fastlogo_ctx.count;
    logo_isr_count++;

    cpcb0_isr_time_current = cpu_clock(smp_processor_id());
    last_isr_interval = (unsigned) (cpcb0_isr_time_current - last_isr_time);
    last_isr_time = cpcb0_isr_time_current;

#if LOGO_TIME_PROFILE
    {
        u64 curr_interval;
        if (cpcb0_isr_time_previous) {
            curr_interval = cpcb0_isr_time_current - cpcb0_isr_time_previous;
            if (logo_tp_count < LOGO_TP_MAX_COUNT)
                lat[logo_tp_count++] = curr_interval;
        }
        cpcb0_isr_time_previous = cpcb0_isr_time_current;
    }
#endif

    /* VPP interrupt handling  */
    pSemHandle = dhub_semaphore(&VPP_dhubHandle.dhub);
    instat = semaphore_chk_full(pSemHandle, -1);

    if (bTST(instat, avioDhubSemMap_vpp_vppCPCB0_intr)) {
        /* our CPCB interrupt */
        /* clear interrupt */
        semaphore_pop(pSemHandle, avioDhubSemMap_vpp_vppCPCB0_intr, 1);
        semaphore_clr_full(pSemHandle, avioDhubSemMap_vpp_vppCPCB0_intr);

		if(logo_isr_count > 1)
		{
			THINVPP_CPCB_ISR_service(thinvpp_obj, CPCB_1);
		}
    }

#if LOGO_TIME_PROFILE
    if (logo_tp_count) {
        u64 curr_interval = cpu_clock(0) - cpcb0_isr_time_current;
        if ((logo_tp_count-1) < LOGO_TP_MAX_COUNT)
            lat2[logo_tp_count-1] = curr_interval;
    }
#endif

    return IRQ_HANDLED;
}
Example #3
0
/**
 * jz_nor_wait_busy - Wait SPI NOR Write In Process finished
 * @flash:		jz_nor_read_status needed.
 * @max_busytime:	Max Busy times, unit: ms.
 *
 * SPI NOR WIP bit only be set 1, while nor WRSR, PP, CE, SE and BE.
 */
static int jz_nor_wait_busy(struct jz_nor_local *flash, u32 max_busytime)
{
	int this_cpu = smp_processor_id();
	u64 t_start = cpu_clock(this_cpu); /* Nano seconds get */
	u64 m_sec = 0;
	while ((jz_nor_read_status(flash, 0) & STATUS_WIP)) {
		m_sec = cpu_clock(this_cpu) - t_start;
		do_div(m_sec, 1000000);
		if ((u32)m_sec >= max_busytime) {
			dev_dbg(&flash->spi->dev, "WIP wait busy timeout, max_busytime: %d ms\n", max_busytime);
			break;
		}
	}
	return (u32)m_sec < max_busytime ? 0 : -EIO;
}
/*
 * handle_IRQ handles all hardware IRQ's.  Decoded IRQs should
 * not come via this function.  Instead, they should provide their
 * own 'handler'.  Used by platform code implementing C-based 1st
 * level decoding.
 */
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

#ifdef CONFIG_SEC_DEBUG
	int cpu = smp_processor_id();
	unsigned long long start_time = cpu_clock(cpu);
#endif

	perf_mon_interrupt_in();
	irq_enter();

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(irq >= nr_irqs)) {
		if (printk_ratelimit())
			printk(KERN_WARNING "Bad IRQ%u\n", irq);
		ack_bad_irq(irq);
	} else {
		generic_handle_irq(irq);
	}

	/* AT91 specific workaround */
	irq_finish(irq);

	irq_exit();
#ifdef CONFIG_SEC_DEBUG
	sec_debug_irq_enterexit_log(irq, start_time);
#endif

	set_irq_regs(old_regs);
	perf_mon_interrupt_out();
}
void __sec_debug_task_sched_log(int cpu, struct task_struct *task,
						char *msg)
{
	unsigned i;

	if (!secdbg_log)
		return;

	if (!task && !msg)
		return;

	i = atomic_inc_return(&(secdbg_log->idx_sched[cpu]))
		& (SCHED_LOG_MAX - 1);
	secdbg_log->sched[cpu][i].time = cpu_clock(cpu);
	if (task) {
		strlcpy(secdbg_log->sched[cpu][i].comm, task->comm,
			sizeof(secdbg_log->sched[cpu][i].comm));
		secdbg_log->sched[cpu][i].pid = task->pid;
		secdbg_log->sched[cpu][i].pTask = task;
	} else {
		strlcpy(secdbg_log->sched[cpu][i].comm, msg,
			sizeof(secdbg_log->sched[cpu][i].comm));
		secdbg_log->sched[cpu][i].pid = -1;
		secdbg_log->sched[cpu][i].pTask = NULL;
	}
}
Example #6
0
u64 notrace trace_clock_global(void)
{
	unsigned long flags;
	int this_cpu;
	u64 now;

	local_irq_save(flags);

	this_cpu = raw_smp_processor_id();
	now = cpu_clock(this_cpu);
	if (unlikely(in_nmi()))
		goto out;

	arch_spin_lock(&trace_clock_struct.lock);

	if ((s64)(now - trace_clock_struct.prev_time) < 0)
		now = trace_clock_struct.prev_time + 1;

	trace_clock_struct.prev_time = now;

	arch_spin_unlock(&trace_clock_struct.lock);

 out:
	local_irq_restore(flags);

	return now;
}
GED_ERROR ged_monitor_3D_fence_add(int fence_fd)
{
    int err;
    GED_MONITOR_3D_FENCE* psMonitor;

    psMonitor = (GED_MONITOR_3D_FENCE*)ged_alloc(sizeof(GED_MONITOR_3D_FENCE));
    if (!psMonitor)
    {
        return GED_ERROR_OOM;
    }

    sync_fence_waiter_init(&psMonitor->sSyncWaiter, ged_sync_cb);
    INIT_WORK(&psMonitor->sWork, ged_monitor_3D_fence_work_cb);
    psMonitor->psSyncFence = sync_fence_fdget(fence_fd);
    if (NULL == psMonitor->psSyncFence)
    {
        ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE));
        return GED_ERROR_INVALID_PARAMS;
    }

    err = sync_fence_wait_async(psMonitor->psSyncFence, &psMonitor->sSyncWaiter);

    if ((1 == err) || (0 > err))
    {
        sync_fence_put(psMonitor->psSyncFence);
        ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE));
    }
    else if (0 == err)
    {
        int iCount = atomic_add_return(1, &g_i32Count);
        if (iCount > 1)
        {
            if (0 == ged_monitor_3D_fence_disable)
            {
                //unsigned int uiFreqLevelID;
                //if (mtk_get_bottom_gpu_freq(&uiFreqLevelID))
                {
                    //if (uiFreqLevelID != 4)
                    {
#ifdef CONFIG_GPU_TRACEPOINTS
                        if (ged_monitor_3D_fence_systrace)
                        {
                            unsigned long long t = cpu_clock(smp_processor_id());
                            trace_gpu_sched_switch("Smart Boost", t, 1, 0, 1);
                        }
#endif
                        mtk_set_bottom_gpu_freq(4);
                    }
                }
            }
        }
    }

    if (ged_monitor_3D_fence_debug > 0)
    {
        GED_LOGI("[+]3D fences count = %d\n", atomic_read(&g_i32Count));
    }

    return GED_OK;
}
static void ged_monitor_3D_fence_work_cb(struct work_struct *psWork)
{
	GED_MONITOR_3D_FENCE *psMonitor;

    if (atomic_sub_return(1, &g_i32Count) < 1)
    {
        if (0 == ged_monitor_3D_fence_disable)
        {
            //unsigned int uiFreqLevelID;
            //if (mtk_get_bottom_gpu_freq(&uiFreqLevelID))
            {
                //if (uiFreqLevelID > 0)
                {
                    mtk_set_bottom_gpu_freq(0);
#ifdef CONFIG_GPU_TRACEPOINTS
                    if (ged_monitor_3D_fence_systrace)
                    {
                        unsigned long long t = cpu_clock(smp_processor_id());
                        trace_gpu_sched_switch("Smart Boost", t, 0, 0, 1);
                    }
#endif
                }
            }
        }
    }

    if (ged_monitor_3D_fence_debug > 0)
    {
        GED_LOGI("[-]3D fences count = %d\n", atomic_read(&g_i32Count));
    }

	psMonitor = GED_CONTAINER_OF(psWork, GED_MONITOR_3D_FENCE, sWork);
    sync_fence_put(psMonitor->psSyncFence);
    ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE));
}
Example #9
0
static inline
void _enqueue(struct aio_threadinfo *tinfo, struct aio_mref_aspect *mref_a, int prio, bool at_end)
{
	unsigned long flags;
#if 1
	prio++;
	if (unlikely(prio < 0)) {
		prio = 0;
	} else if (unlikely(prio >= MARS_PRIO_NR)) {
		prio = MARS_PRIO_NR - 1;
	}
#else
	prio = 0;
#endif

	mref_a->enqueue_stamp = cpu_clock(raw_smp_processor_id());

	traced_lock(&tinfo->lock, flags);

	if (at_end) {
		list_add_tail(&mref_a->io_head, &tinfo->mref_list[prio]);
	} else {
		list_add(&mref_a->io_head, &tinfo->mref_list[prio]);
	}
	tinfo->queued[prio]++;
	atomic_inc(&tinfo->queued_sum);

	traced_unlock(&tinfo->lock, flags);

	atomic_inc(&tinfo->total_enqueue_count);

	wake_up_interruptible_all(&tinfo->event);
}
void wq_get_sample(struct work_struct *work)
{
	int cpu;
	unsigned long long stamp;
	unsigned int value[11];
	//rnd_num[0]: Total High, rnd_num[1]: Total Low
	//rnd_num[2]: Good Duration, rnd_num[3]: Bad Duration
	unsigned char rnd_num[4];

	get_random_bytes(rnd_num, 4);

	cpu = smp_processor_id();
	stamp = cpu_clock(cpu);

	value[0] = rnd_num[0] + rnd_num[1];		//Total Time
	value[1] = rnd_num[0];					//Total High Time
	value[2] = rnd_num[0]/10;				//Longest High Time
	value[3] = rnd_num[2] + rnd_num[3];		//Count of Low to High
	value[4] = rnd_num[2];					//Count of Good Duration
	value[5] = 0;
	value[6] = 0;
	value[7] = 0;
	value[8] = 0;
	value[9] = (value[1]*100)/value[0];		//Total High Percentage
	value[10] = (value[2]*100)/value[0];	//Longest High Percentage
	mt_dcm(stamp, 11, value);
}
void trac_logs(char *s)
{
	unsigned long long t;
    unsigned long nanosec_rem;
    t = cpu_clock(UINT_MAX);
    nanosec_rem = do_div(t, 1000000000);
	ptrac_buf += sprintf(ptrac_buf,"[%5lu.%06lu] %s\n",(unsigned long) t, nanosec_rem / 1000,s);
}
Example #12
0
void mars_trace(struct mref_object *mref, const char *info)
{
	int index = mref->ref_traces;
	if (likely(index < MAX_TRACES)) {
		mref->ref_trace_stamp[index] = cpu_clock(raw_smp_processor_id());
		mref->ref_trace_info[index] = info;
		mref->ref_traces++;
	}
}
Example #13
0
static int shrink_set(const char *arg, const struct kernel_param *kp)
{
	int cpu = smp_processor_id();
	unsigned long long t1, t2;
	int total_pages, available_pages;

	param_set_bool(arg, kp);

	if (shrink_pp) {
		t1 = cpu_clock(cpu);
		shrink_page_pools(&total_pages, &available_pages);
		t2 = cpu_clock(cpu);
		pr_info("shrink page pools: time=%lldns, "
			"total_pages_released=%d, free_pages_available=%d",
			t2-t1, total_pages, available_pages);
	}
	return 0;
}
Example #14
0
/**
 * wait until the next period.
 */
static void dag_wait_period(resch_task_t *rt)
{
    struct timespec ts_period;
    if (rt->release_time > jiffies) {
        jiffies_to_timespec(rt->release_time - jiffies, &ts_period);
    }
    else {
        ts_period.tv_sec = 0;
        ts_period.tv_nsec = 0;
    }

    if (rt->task->dl.flags & SCHED_EXHAUSTIVE) {
        rt->task->dl.deadline = cpu_clock(smp_processor_id());
    }
    sched_wait_interval(!TIMER_ABSTIME, &ts_period, NULL);
    rt->dl_sched_release_time = cpu_clock(smp_processor_id());

}
/**
 *	read CPU time
 *	@return	unsigned int		CPU time in millisecond
 **/
unsigned int MTTLOG_GetTime(void)
{
	unsigned long long t;

	t = cpu_clock(0);
	do_div(t, 1000000);

	return (unsigned long)t;
}
Example #16
0
unsigned long long ged_get_time()
{
	unsigned long long temp;

	preempt_disable();
	temp = cpu_clock(smp_processor_id());
	preempt_enable();

	return temp;
}
static int MTKPP_PrintTime(char *buf, int n)
{
	/* copy & modify from ./kernel/printk.c */
	unsigned long long t;
	unsigned long nanosec_rem;

	t = cpu_clock(smp_processor_id());
	nanosec_rem = do_div(t, 1000000000);
	
	return snprintf(buf, n, "[%5lu.%06lu] ", (unsigned long) t, nanosec_rem / 1000);
}
Example #18
0
static char *rmnet_usb_ctrl_get_timestamp(char *tbuf)
{
	unsigned long long t;
	unsigned long nanosec_rem;

	t = cpu_clock(smp_processor_id());
	nanosec_rem = do_div(t, 1000000000)/1000;
	scnprintf(tbuf, TIME_BUF_LEN, "[%5lu.%06lu] ", (unsigned long)t,
		nanosec_rem);
	return tbuf;
}
Example #19
0
/*
 * Crude but fast random-number generator.  Uses a linear congruential
 * generator, with occasional help from cpu_clock().
 */
static unsigned long
rcu_random(struct rcu_random_state *rrsp)
{
	if (--rrsp->rrs_count < 0) {
		rrsp->rrs_state +=
			(unsigned long)cpu_clock(raw_smp_processor_id());
		rrsp->rrs_count = RCU_RANDOM_REFRESH;
	}
	rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
	return swahw32(rrsp->rrs_state);
}
void trac_log(long lba,int len,int *pbuf,int mod)
{
	unsigned long long t;
    unsigned long nanosec_rem;
    t = cpu_clock(UINT_MAX);
    nanosec_rem = do_div(t, 1000000000);
    if(mod)
        ptrac_buf += sprintf(ptrac_buf,"[%5lu.%06lu] W %d %d %8x %8x\n",(unsigned long) t, nanosec_rem / 1000,lba,len,pbuf[0],pbuf[1]);
    else
        ptrac_buf += sprintf(ptrac_buf,"[%5lu.%06lu] R %d %d %8x %8x\n",(unsigned long) t, nanosec_rem / 1000,lba,len,pbuf[0],pbuf[1]);
}
Example #21
0
void __sprd_debug_task_log(int cpu, struct task_struct *task)
{
	unsigned i;

	i = atomic_inc_return(&task_log_idx[cpu]) &
	    (ARRAY_SIZE(psprd_debug_log->task[0]) - 1);
	psprd_debug_log->task[cpu][i].time = cpu_clock(cpu);
#ifdef CP_DEBUG
	psprd_debug_log->task[cpu][i].sys_cnt = get_sys_cnt();
#endif
	strcpy(psprd_debug_log->task[cpu][i].comm, task->comm);
	psprd_debug_log->task[cpu][i].pid = task->pid;
}
void sec_debug_dcvs_log(int cpu_no, unsigned int prev_freq,
						unsigned int new_freq)
{
	unsigned int i;
	if (!secdbg_log)
		return;

	i = atomic_inc_return(&(secdbg_log->dcvs_log_idx[cpu_no]))
		& (DCVS_LOG_MAX - 1);
	secdbg_log->dcvs_log[cpu_no][i].cpu_no = cpu_no;
	secdbg_log->dcvs_log[cpu_no][i].prev_freq = prev_freq;
	secdbg_log->dcvs_log[cpu_no][i].new_freq = new_freq;
	secdbg_log->dcvs_log[cpu_no][i].time = cpu_clock(cpu_no);
}
Example #23
0
void __sprd_debug_irq_log(unsigned int irq, void *fn, int en)
{
	int cpu = raw_smp_processor_id();
	unsigned i;

	i = atomic_inc_return(&irq_log_idx[cpu]) &
	    (ARRAY_SIZE(psprd_debug_log->irq[0]) - 1);
	psprd_debug_log->irq[cpu][i].time = cpu_clock(cpu);
#ifdef CP_DEBUG
	psprd_debug_log->irq[cpu][i].sys_cnt = get_sys_cnt();
#endif
	psprd_debug_log->irq[cpu][i].irq = irq;
	psprd_debug_log->irq[cpu][i].fn = (void *)fn;
	psprd_debug_log->irq[cpu][i].en = en;
}
void sec_debug_timer_log(unsigned int type, int int_lock, void *fn)
{
	int cpu = smp_processor_id();
	unsigned i;

	if (!secdbg_log)
		return;

	i = atomic_inc_return(&(secdbg_log->idx_timer[cpu]))
		& (SCHED_LOG_MAX - 1);
	secdbg_log->timer_log[cpu][i].time = cpu_clock(cpu);
	secdbg_log->timer_log[cpu][i].type = type;
	secdbg_log->timer_log[cpu][i].int_lock = int_lock;
	secdbg_log->timer_log[cpu][i].fn = (void *)fn;
}
Example #25
0
void timer_irq_handler (struct irq_action_s *action)
{
	register struct cpu_s *cpu;
	register struct device_s *timer;
   
	cpu = current_cpu;

	cpu_trace_write(cpu, timer_irq_handler);
   
	cpu_clock(cpu);
   
	timer = action->dev;
	timer_reset_irq(timer);
	timer_set_period(timer, CPU_CLOCK_TICK);
	timer_run(timer, 1);
}
Example #26
0
void __sprd_debug_hrtimer_log(struct hrtimer *timer,
		     enum hrtimer_restart (*fn) (struct hrtimer *), int en)
{
	int cpu = raw_smp_processor_id();
	unsigned i;

	i = atomic_inc_return(&hrtimer_log_idx[cpu]) &
	    (ARRAY_SIZE(psprd_debug_log->hrtimers[0]) - 1);
	psprd_debug_log->hrtimers[cpu][i].time = cpu_clock(cpu);
#ifdef CP_DEBUG
	psprd_debug_log->hrtimers[cpu][i].sys_cnt = get_sys_cnt();
#endif
	psprd_debug_log->hrtimers[cpu][i].timer = timer;
	psprd_debug_log->hrtimers[cpu][i].fn = fn;
	psprd_debug_log->hrtimers[cpu][i].en = en;
}
Example #27
0
void __sprd_debug_work_log(struct worker *worker,
			  struct work_struct *work, work_func_t f)
{
	int cpu = raw_smp_processor_id();
	unsigned i;

	i = atomic_inc_return(&work_log_idx[cpu]) &
	    (ARRAY_SIZE(psprd_debug_log->work[0]) - 1);
	psprd_debug_log->work[cpu][i].time = cpu_clock(cpu);
#ifdef CP_DEBUG
	psprd_debug_log->work[cpu][i].sys_cnt = get_sys_cnt();
#endif
	psprd_debug_log->work[cpu][i].worker = worker;
	psprd_debug_log->work[cpu][i].work = work;
	psprd_debug_log->work[cpu][i].f = f;
}
void *pvr_trcmd_alloc(unsigned type, int pid, const char *pname, size_t size)
{
	struct tbuf_frame *f;
	size_t total_size;

	size = ALIGN(size, __alignof__(*f));
	total_size = sizeof(*f) + size;
	f = tbuf_get_space(total_size);
	f->size = total_size;
	f->type = type;
	f->pid = pid;
	f->time = cpu_clock(smp_processor_id());
	strlcpy(f->pname, pname, sizeof(f->pname));

	return f + 1;
}
void sec_debug_fuelgauge_log(unsigned int voltage, unsigned short soc,
				unsigned short charging_status)
{
	unsigned int i;
	int cpu = smp_processor_id();

	if (!secdbg_log)
		return;

	i = atomic_inc_return(&(secdbg_log->fg_log_idx))
		& (FG_LOG_MAX - 1);
	secdbg_log->fg_log[i].time = cpu_clock(cpu);
	secdbg_log->fg_log[i].voltage = voltage;
	secdbg_log->fg_log[i].soc = soc;
	secdbg_log->fg_log[i].charging_status = charging_status;
}
static unsigned long long get_kernel_time(void)
{
    int this_cpu;
    unsigned long flags;
    unsigned long long time;

    preempt_disable();
    raw_local_irq_save(flags);

    this_cpu = smp_processor_id();
    time = cpu_clock(this_cpu);

    preempt_enable();
    raw_local_irq_restore(flags);

    return time;
}