unsigned long measure_bw_and_set_irq(void)
{
	long r_mbps, w_mbps, mbps;
	ktime_t ts;
	unsigned int us;

	/*
	 * Since we are stopping the counters, we don't want this short work
	 * to be interrupted by other tasks and cause the measurements to be
	 * wrong. Not blocking interrupts to avoid affecting interrupt
	 * latency and since they should be short anyway because they run in
	 * atomic context.
	 */
	preempt_disable();

	ts = ktime_get();
	us = ktime_to_us(ktime_sub(ts, prev_ts));
	if (!us)
		us = 1;

	mon_disable(RD_MON);
	mon_disable(WR_MON);

	r_mbps = mon_get_count(RD_MON, prev_r_start_val);
	r_mbps = beats_to_mbps(r_mbps, us);
	w_mbps = mon_get_count(WR_MON, prev_w_start_val);
	w_mbps = beats_to_mbps(w_mbps, us);

	prev_r_start_val = mon_set_limit_mbyte(RD_MON, to_limit(r_mbps));
	prev_w_start_val = mon_set_limit_mbyte(WR_MON, to_limit(w_mbps));
	prev_ts = ts;

	mon_enable(RD_MON);
	mon_enable(WR_MON);

	preempt_enable();

	mbps = r_mbps + w_mbps;
	pr_debug("R/W/BW/us = %ld/%ld/%ld/%d\n", r_mbps, w_mbps, mbps, us);

	return mbps;
}
Exemplo n.º 2
0
static int poll_idle(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
{
	ktime_t	t1, t2;
	s64 diff;

	t1 = ktime_get();
	local_irq_enable();
	while (!need_resched())
		cpu_relax();

	t2 = ktime_get();
	diff = ktime_to_us(ktime_sub(t2, t1));
	if (diff > INT_MAX)
		diff = INT_MAX;

	dev->last_residency = (int) diff;

	return index;
}
Exemplo n.º 3
0
/**
 * cpuidle_enter_state - enter the state and update stats
 * @dev: cpuidle device for this cpu
 * @drv: cpuidle driver for this cpu
 * @next_state: index into drv->states of the state to enter
 */
int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
			int index)
{
	int entered_state;

	struct cpuidle_state *target_state = &drv->states[index];
	ktime_t time_start, time_end;
	s64 diff;

	cpuidle_set_current_state(dev->cpu, target_state->exit_latency);

	time_start = ktime_get();

	entered_state = target_state->enter(dev, drv, index);

	time_end = ktime_get();

	cpuidle_set_current_state(dev->cpu, 0);

	local_irq_enable();

	diff = ktime_to_us(ktime_sub(time_end, time_start));
	if (diff > INT_MAX)
		diff = INT_MAX;

	dev->last_residency = (int) diff;

	if (entered_state >= 0) {
		/* Update cpuidle counters */
		/* This can be moved to within driver enter routine
		 * but that results in multiple copies of same code.
		 */
		dev->states_usage[entered_state].time +=
				(unsigned long long)dev->last_residency;
		dev->states_usage[entered_state].usage++;
	} else {
		dev->last_residency = 0;
	}

	return entered_state;
}
Exemplo n.º 4
0
/* Do rtt sampling needed for Veno. */
static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
{
    struct veno *veno = inet_csk_ca(sk);
    u32 vrtt;

    if (ktime_equal(last, net_invalid_timestamp()))
        return;

    /* Never allow zero rtt or baseRTT */
    vrtt = ktime_to_us(net_timedelta(last)) + 1;

    /* Filter to find propagation delay: */
    if (vrtt < veno->basertt)
        veno->basertt = vrtt;

    /* Find the min rtt during the last rtt to find
     * the current prop. delay + queuing delay:
     */
    veno->minrtt = min(veno->minrtt, vrtt);
    veno->cntrtt++;
}
static int cpuboost_cpu_callback(struct notifier_block *cpu_nb,
				 unsigned long action, void *hcpu)
{
	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_UP_PREPARE:
	case CPU_DEAD:
	case CPU_UP_CANCELED:
		break;
	case CPU_ONLINE:
		if (!hotplug_boost || !input_boost_enabled ||
		     work_pending(&input_boost_work))
			break;
		pr_debug("Hotplug boost for CPU%d\n", (int)hcpu);
		queue_work(cpu_boost_wq, &input_boost_work);
		last_input_time = ktime_to_us(ktime_get());
		break;
	default:
		break;
	}
	return NOTIFY_OK;
}
void kgsl_pwrscale_wake(struct kgsl_device *device)
{
	struct kgsl_power_stats stats;
	BUG_ON(!mutex_is_locked(&device->mutex));

	if (!device->pwrscale.enabled)
		return;
	
	memset(&device->pwrscale.accum_stats, 0,
		sizeof(device->pwrscale.accum_stats));

	
	device->ftbl->power_stats(device, &stats);

	device->pwrscale.time = ktime_to_us(ktime_get());

	device->pwrscale.next_governor_call = 0;

	
	queue_work(device->pwrscale.devfreq_wq,
		&device->pwrscale.devfreq_resume_ws);
}
Exemplo n.º 7
0
void MonitorSignalEvent        (monitor_event_code_t    EventCode,
                                unsigned int            Parameters[MONITOR_PARAMETER_COUNT],
                                const char*             Description)
{
    unsigned int                DeviceId        = 0;
    struct DeviceContext_s*     Context         = GetDeviceContext (DeviceId);

    // If no context means the driver has not been installed.
    if (!Context)
    {
            //MONITOR_ERROR("Invalid monitor device %d\n", DeviceId);
        return;
    }

    MonitorRecordEvent         (Context,
                                0,
                                EventCode,
                                (unsigned long long)ktime_to_us (ktime_get ()),
                                Parameters,
                                Description);

}
Exemplo n.º 8
0
static int bl_cpuidle_simple_enter(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
{
	ktime_t time_start, time_end;
	s64 diff;

	time_start = ktime_get();

	cpu_do_idle();

	time_end = ktime_get();

	local_irq_enable();

	diff = ktime_to_us(ktime_sub(time_end, time_start));
	if (diff > INT_MAX)
		diff = INT_MAX;

	dev->last_residency = (int) diff;

	return index;
}
Exemplo n.º 9
0
/*
 * kgsl_pwrscale_wake - notify governor that device is going on
 * @device: The device
 *
 * Called when the device is returning to an active state.
 */
void kgsl_pwrscale_wake(struct kgsl_device *device)
{
	struct kgsl_power_stats stats;
	BUG_ON(!mutex_is_locked(&device->mutex));

	if (!device->pwrscale.enabled)
		return;
	/* clear old stats before waking */
	memset(&device->pwrscale.accum_stats, 0,
		sizeof(device->pwrscale.accum_stats));

	/* and any hw activity from waking up*/
	device->ftbl->power_stats(device, &stats);

	device->pwrscale.time = ktime_to_us(ktime_get());

	device->pwrscale.next_governor_call = 0;

	/* to call devfreq_resume_device() from a kernel thread */
	queue_work(device->pwrscale.devfreq_wq,
		&device->pwrscale.devfreq_resume_ws);
}
Exemplo n.º 10
0
/**
 * mei_txe_aliveness_poll - waits for aliveness to settle
 *
 * @dev: the device structure
 * @expected: expected aliveness value
 *
 * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
 *
 * Return: 0 if the expected value was received, -ETIME otherwise
 */
static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
{
	struct mei_txe_hw *hw = to_txe_hw(dev);
	ktime_t stop, start;

	start = ktime_get();
	stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT));
	do {
		hw->aliveness = mei_txe_aliveness_get(dev);
		if (hw->aliveness == expected) {
			dev->pg_event = MEI_PG_EVENT_IDLE;
			dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
				ktime_to_us(ktime_sub(ktime_get(), start)));
			return 0;
		}
		usleep_range(20, 50);
	} while (ktime_before(ktime_get(), stop));

	dev->pg_event = MEI_PG_EVENT_IDLE;
	dev_err(dev->dev, "aliveness timed out\n");
	return -ETIME;
}
static void cpuidle_profile_main_finish(void)
{
	if (!profile_ongoing) {
		pr_err("CPUIDLE profile does not start yet\n");
		return;
	}

	pr_info("cpuidle profile finish\n");

	/* Wakeup all cpus to update own profile data to finish profile */
	preempt_disable();
	smp_call_function(call_cpu_finish_profile, NULL, 1);
	preempt_enable();

	profile_ongoing = 0;

	profile_finish_time = ktime_get();
	profile_time = ktime_to_us(ktime_sub(profile_finish_time,
						profile_start_time));

	show_result();
}
Exemplo n.º 12
0
static int tegra_idle_enter_lp3(struct cpuidle_device *dev,
	struct cpuidle_state *state)
{
	ktime_t enter, exit;
	s64 us;

	trace_power_start(POWER_CSTATE, 1, dev->cpu);

	local_irq_disable();
	local_fiq_disable();

	enter = ktime_get();

	tegra_cpu_wfi();

	exit = ktime_sub(ktime_get(), enter);
	us = ktime_to_us(exit);

	local_fiq_enable();
	local_irq_enable();
	return (int)us;
}
Exemplo n.º 13
0
/*{{{ MonitorMMEThread*/
static int MonitorMMEThread(void *Param)
{
	struct MMEContext_s *Context = (struct MMEContext_s *)Param;
	MME_ERROR MMEStatus;
	unsigned long long TimeStamp;
	unsigned int TimeValue;
	daemonize(MONITOR_MME_THREAD_NAME);
	MONITOR_DEBUG("Starting\n");
	while (Context->Monitoring)
	{
		MMEStatus = TransformerGetLogEvent(Context);
		if (MMEStatus != MME_SUCCESS)
			break;
		if (down_interruptible(&(Context->EventReceived)) != 0)
			break;
		TimeValue = *Context->DeviceContext->Timer;
		TimeStamp = ktime_to_us(ktime_get());
		if (Context->MMECommandStatus.TimeCode != 0)
		{
			unsigned long long TimeDiff;
			/* This assumes that the timer is counting down from ClockMaxValue to 0 */
			if (Context->MMECommandStatus.TimeCode > TimeValue)
				TimeDiff = (unsigned long long)(Context->MMECommandStatus.TimeCode - TimeValue);
			else
				TimeDiff = ((unsigned long long)Context->MMECommandStatus.TimeCode + Context->ClockMaxValue + 1) - (unsigned long long)TimeValue;
			TimeStamp -= ((unsigned long long)TimeDiff * 1000000ull) / Context->TicksPerSecond;
		}
		if (Context->Monitoring)
			MonitorRecordEvent(Context->DeviceContext,
							   Context->Id,
							   Context->MMECommandStatus.EventID,
							   TimeStamp,
							   Context->MMECommandStatus.Parameters,
							   Context->MMECommandStatus.Message);
	}
	MONITOR_DEBUG("Terminating\n");
	up(&(Context->ThreadTerminated));
	return 0;
}
Exemplo n.º 14
0
static int tegra_idle_enter_lp2(struct cpuidle_device *dev,
	struct cpuidle_state *state)
{
	ktime_t enter, exit;
	s64 us;

	if (!lp2_in_idle || lp2_disabled_by_suspend ||
	    !tegra_lp2_is_allowed(dev, state))
		return tegra_idle_enter_lp3(dev, state);

	local_irq_disable();
	enter = ktime_get();

	tegra_cpu_idle_stats_lp2_ready(dev->cpu);
	tegra_idle_lp2(dev, state);

	exit = ktime_sub(ktime_get(), enter);
	us = ktime_to_us(exit);

	local_irq_enable();

	/* cpu clockevents may have been reset by powerdown */
	hrtimer_peek_ahead_timers();

	smp_rmb();

	/* Update LP2 latency provided no fall back to LP3 */
	if (state == dev->last_state) {
		state->exit_latency = tegra_lp2_exit_latency;
		state->target_residency = tegra_lp2_exit_latency +
			tegra_lp2_power_off_time;
		if (state->target_residency < tegra_lp2_min_residency)
			state->target_residency = tegra_lp2_min_residency;
	}
	tegra_cpu_idle_stats_lp2_time(dev->cpu, us);

	return (int)us;
}
Exemplo n.º 15
0
/*
 * kgsl_devfreq_get_dev_status - devfreq_dev_profile.get_dev_status callback
 * @dev: see devfreq.h
 * @freq: see devfreq.h
 * @flags: see devfreq.h
 *
 * This function expects the device mutex to be unlocked.
 */
int kgsl_devfreq_get_dev_status(struct device *dev,
				struct devfreq_dev_status *stat)
{
	struct kgsl_device *device = dev_get_drvdata(dev);
	struct kgsl_pwrscale *pwrscale;
	s64 tmp;

	if (device == NULL)
		return -ENODEV;
	if (stat == NULL)
		return -EINVAL;

	pwrscale = &device->pwrscale;
	memset(stat, 0, sizeof(*stat));	

	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
	/*
	 * If the GPU clock is on grab the latest power counter
	 * values.  Otherwise the most recent ACTIVE values will
	 * already be stored in accum_stats.
	 */
	kgsl_pwrscale_update_stats(device);

	tmp = ktime_to_us(ktime_get());
	stat->total_time = tmp - pwrscale->time;
	pwrscale->time = tmp;

	stat->busy_time = pwrscale->accum_stats.busy_time;

	stat->current_frequency = kgsl_pwrctrl_active_freq(&device->pwrctrl);

	trace_kgsl_pwrstats(device, stat->total_time, &pwrscale->accum_stats);
	memset(&pwrscale->accum_stats, 0, sizeof(pwrscale->accum_stats));

	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);

	return 0;
}
Exemplo n.º 16
0
static void msm_idle_stats_pre_idle(struct msm_idle_stats_device *stats_dev)
{
	int64_t now;
	int64_t interval;

	if (smp_processor_id() != stats_dev->cpu) {
		WARN_ON(1);
		return;
	}

	if (!atomic_read(&stats_dev->collecting))
		return;

	hrtimer_cancel(&stats_dev->timer);

	now = ktime_to_us(ktime_get());
	interval = now - stats_dev->stats.last_busy_start;
	interval = msm_idle_stats_bound_interval(interval);

	stats_dev->stats.busy_intervals[stats_dev->stats.nr_collected]
		= (__u32) interval;
	stats_dev->stats.last_idle_start = now;
}
Exemplo n.º 17
0
/* Do RTT sampling needed for Vegas.
 * Basically we:
 *   o min-filter RTT samples from within an RTT to get the current
 *     propagation delay + queuing delay (we are min-filtering to try to
 *     avoid the effects of delayed ACKs)
 *   o min-filter RTT samples from a much longer window (forever for now)
 *     to find the propagation delay (baseRTT)
 */
void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
{
	cnt = cnt;

	struct vegas *vegas = inet_csk_ca(sk);
	u32 vrtt;

	if (ktime_equal(last, net_invalid_timestamp()))
		return;

	/* Never allow zero rtt or baseRTT */
	vrtt = ktime_to_us(net_timedelta(last)) + 1;

	/* Filter to find propagation delay: */
	if (vrtt < vegas->baseRTT)
		vegas->baseRTT = vrtt;

	/* Find the min RTT during the last RTT to find
	 * the current prop. delay + queuing delay:
	 */
	vegas->minRTT = min(vegas->minRTT, vrtt);
	vegas->cntRTT++;
}
Exemplo n.º 18
0
static ssize_t hcsr04_read(struct file *filp, char __user *buff,size_t count, loff_t *offp){
//(struct class *class, struct class_attribute *attr, char *buf) {
	int counter;

	hcsr04_read_flag = 1;
	// Send a 10uS impulse to the TRIGGER line
	gpio_set_value(HCSR04_TRIGGER,1);
	udelay(20);
	gpio_set_value(HCSR04_TRIGGER,0);
	valid_value=0;

	counter=0;
	while (valid_value==0) {
		// Out of range
		if (++counter>23200) {
			return sprintf(buff, "%d\n", -1);;
		}
		udelay(1);
	}
	hcsr04_read_flag = 0;
	//printk(KERN_INFO "Sub: %lld\n", ktime_to_us(ktime_sub(echo_end,echo_start)));
	return sprintf(buff, "%lld\n", ktime_to_us(ktime_sub(echo_end,echo_start)));;
}
Exemplo n.º 19
0
uint32 mdp_get_lcd_line_counter(struct msm_fb_data_type *mfd)
{
	uint32 elapsed_usec_time;
	uint32 lcd_line;
	ktime_t last_vsync_timetick_local;
	ktime_t curr_time;
	unsigned long flag;

	if ((!mfd->panel_info.lcd.vsync_enable) || (!vsync_mode))
		return 0;

	spin_lock_irqsave(&mdp_spin_lock, flag);
	last_vsync_timetick_local = mfd->last_vsync_timetick;
	spin_unlock_irqrestore(&mdp_spin_lock, flag);

	curr_time = ktime_get_real();
	elapsed_usec_time = ktime_to_us(ktime_sub(curr_time,
						last_vsync_timetick_local));

	elapsed_usec_time = elapsed_usec_time % mfd->lcd_ref_usec_time;

	/* lcd line calculation referencing to line counter = 0 */
	lcd_line =
	    (elapsed_usec_time * mfd->total_lcd_lines) / mfd->lcd_ref_usec_time;

	/* lcd line adjusment referencing to the actual line counter at vsync */
	lcd_line =
	    (mfd->total_lcd_lines - mfd->panel_info.lcd.v_back_porch +
	     lcd_line) % (mfd->total_lcd_lines + 1);

	if (lcd_line > mfd->total_lcd_lines) {
		MSM_FB_INFO
		    ("mdp_get_lcd_line_counter: mdp_lcd_rd_cnt >= mfd->total_lcd_lines error!\n");
	}

	return lcd_line;
}
Exemplo n.º 20
0
static enum hrtimer_restart msm_idle_stats_timer(struct hrtimer *timer)
{
	struct msm_idle_stats_device *stats_dev;
	unsigned int cpu;
	int64_t now;
	int64_t interval;

	stats_dev = container_of(timer, struct msm_idle_stats_device, timer);
	cpu = get_cpu();

	if (cpu != stats_dev->cpu) {
		if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_MIGRATION)
			pr_info("%s: timer migrated from cpu%u to cpu%u\n",
				__func__, stats_dev->cpu, cpu);

		stats_dev->stats.event = MSM_IDLE_STATS_EVENT_TIMER_MIGRATED;
		goto timer_exit;
	}

	now = ktime_to_us(ktime_get());
	interval = now - stats_dev->stats.last_busy_start;

	if (stats_dev->stats.busy_timer > 0 &&
			interval >= stats_dev->stats.busy_timer - 1)
		stats_dev->stats.event =
			MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED;
	else
		stats_dev->stats.event =
			MSM_IDLE_STATS_EVENT_COLLECTION_TIMER_EXPIRED;

timer_exit:
	atomic_set(&stats_dev->collecting, 0);
	wake_up_interruptible(&stats_dev->wait_q);

	put_cpu();
	return HRTIMER_NORESTART;
}
/**
 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
 * @dev: the target CPU
 * @index: index of target state
 *
 * This is equivalent to the HALT instruction.
 */
static int acpi_idle_enter_c1(struct cpuidle_device *dev, int index)
{
	ktime_t  kt1, kt2;
	s64 idle_time;
	struct acpi_processor *pr;
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);

	pr = __get_cpu_var(processors);
	dev->last_residency = 0;

	if (unlikely(!pr))
		return -EINVAL;

	local_irq_disable();

	if (acpi_idle_suspend) {
		local_irq_enable();
		cpu_relax();
		return -EBUSY;
	}

	lapic_timer_state_broadcast(pr, cx, 1);
	kt1 = ktime_get_real();
	acpi_idle_do_entry(cx);
	kt2 = ktime_get_real();
	idle_time =  ktime_to_us(ktime_sub(kt2, kt1));

	/* Update device last_residency*/
	dev->last_residency = (int)idle_time;

	local_irq_enable();
	cx->usage++;
	lapic_timer_state_broadcast(pr, cx, 0);

	return index;
}
static irqreturn_t mon_intr_handler(int irq, void *dev)
{
	struct devfreq *df = dev;
	ktime_t ts;
	unsigned int us;
	u32 regval;
	int ret;

	regval = get_l2_indirect_reg(L2PMOVSR);
	pr_debug("Got interrupt: %x\n", regval);

	devfreq_monitor_stop(df);

	/*
	 * Don't recalc bandwidth if the interrupt comes right after a
	 * previous bandwidth calculation.  This is done for two reasons:
	 *
	 * 1. Sampling the BW during a very short duration can result in a
	 *    very inaccurate measurement due to very short bursts.
	 * 2. This can only happen if the limit was hit very close to the end
	 *    of the previous sample period. Which means the current BW
	 *    estimate is not very off and doesn't need to be readjusted.
	 */
	ts = ktime_get();
	us = ktime_to_us(ktime_sub(ts, prev_ts));
	if (us > TOO_SOON_US) {
		mutex_lock(&df->lock);
		ret = update_devfreq(df);
		if (ret)
			pr_err("Unable to update freq on IRQ!\n");
		mutex_unlock(&df->lock);
	}

	devfreq_monitor_start(df);

	return IRQ_HANDLED;
}
Exemplo n.º 23
0
static int tegra_idle_enter_lp2(struct cpuidle_device *dev,
	struct cpuidle_state *state)
{
	ktime_t enter, exit;
	s64 us;

	mf_irq_leave(NULL);

	if (!lp2_in_idle || lp2_disabled_by_suspend ||
	    !tegra_lp2_is_allowed(dev, state)) {
		dev->last_state = &dev->states[0];
		return tegra_idle_enter_lp3(dev, state);
	}

	local_irq_disable();
	enter = ktime_get();

	tegra_cpu_idle_stats_lp2_ready(dev->cpu);
	tegra_idle_lp2(dev, state);

	exit = ktime_sub(ktime_get(), enter);
	us = ktime_to_us(exit);

	local_irq_enable();

	smp_rmb();

	/* Update LP2 latency provided no fall back to LP3 */
	if (state == dev->last_state) {
		tegra_lp2_set_global_latency(state);
		tegra_lp2_update_target_residency(state);
	}
	tegra_cpu_idle_stats_lp2_time(dev->cpu, us);

	return (int)us;
}
Exemplo n.º 24
0
int cpuidle_wrap_enter(struct cpuidle_device *dev,
				struct cpuidle_driver *drv, int index,
				int (*enter)(struct cpuidle_device *dev,
					struct cpuidle_driver *drv, int index))
{
	ktime_t time_start, time_end;
	s64 diff;

	time_start = ktime_get();

	index = enter(dev, drv, index);

	time_end = ktime_get();

	local_irq_enable();

	diff = ktime_to_us(ktime_sub(time_end, time_start));
	if (diff > INT_MAX)
		diff = INT_MAX;

	dev->last_residency = (int) diff;

	return index;
}
Exemplo n.º 25
0
/**
 * get_cpu_iowait_time_us - get the total iowait time of a cpu
 * @cpu: CPU number to query
 * @last_update_time: variable to store update time in. Do not update
 * counters if NULL.
 *
 * Return the cummulative iowait time (since boot) for a given
 * CPU, in microseconds.
 *
 * This time is measured via accounting rather than sampling,
 * and is as accurate as ktime_get() is.
 *
 * This function returns -1 if NOHZ is not enabled.
 */
u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
{
	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
	ktime_t now, iowait;

	if (!tick_nohz_enabled)
		return -1;

	now = ktime_get();
	if (last_update_time) {
		update_ts_time_stats(cpu, ts, now, last_update_time);
		iowait = ts->iowait_sleeptime;
	} else {
		if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
			ktime_t delta = ktime_sub(now, ts->idle_entrytime);

			iowait = ktime_add(ts->iowait_sleeptime, delta);
		} else {
			iowait = ts->iowait_sleeptime;
		}
	}

	return ktime_to_us(iowait);
}
static int tegra_idle_enter_lp3(struct cpuidle_device *dev,
	struct cpuidle_state *state)
{
	ktime_t enter, exit;
	s64 us;

	trace_power_start(POWER_CSTATE, 1, dev->cpu);

	local_irq_disable();
	local_fiq_disable();

	enter = ktime_get();

	tegra_cpu_wfi();

	exit = ktime_sub(ktime_get(), enter);
	us = ktime_to_us(exit);
	/* move from driver/cpuidle/cpuidle.c */
	dev->states[0].usage++;
	dev->states[0].time += (unsigned long long)us;
	local_fiq_enable();
	local_irq_enable();
	return (int)us;
}
static unsigned long compute_vsync_interval(void)
{
	ktime_t currtime_us;
	unsigned long diff_from_vsync, vsync_interval;
	/*
	 * Get interval beween last vsync and current time
	 * Current time = CPU programming MDP for next Vsync
	 */
	currtime_us = ktime_get();
	diff_from_vsync =
		(ktime_to_us(ktime_sub(currtime_us, last_vsync_time_ns)));
	diff_from_vsync /= USEC_PER_MSEC;
	/*
	 * If the last Vsync occurred more than 64 ms ago, skip programming
	 * the timer
	 */
	if (diff_from_vsync < (VSYNC_INTERVAL*MAX_VSYNC_GAP)) {
		vsync_interval =
			(VSYNC_INTERVAL-diff_from_vsync)%VSYNC_INTERVAL;
	} else
		vsync_interval = VSYNC_INTERVAL+1;

	return vsync_interval;
}
static void mdp_dma_schedule(struct msm_fb_data_type *mfd, uint32 term)
{
	/*
	 * dma2 configure VSYNC block
	 * vsync supported on Primary LCD only for now
	 */
	int32 mdp_lcd_rd_cnt;
	uint32 usec_wait_time;
	uint32 start_y;

	/*
	 * ToDo: if we can move HRT timer callback to workqueue, we can
	 * move DMA2 power on under mdp_pipe_kickoff().
	 * This will save a power for hrt time wait.
	 * However if the latency for context switch (hrt irq -> workqueue)
	 * is too big, we will miss the vsync timing.
	 */
	if (term == MDP_DMA2_TERM)
		mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);

	mdp_dma2_update_time_in_usec = ktime_to_us(mdp_dma2_last_update_time);

	if ((!mfd->ibuf.vsync_enable) || (!mfd->panel_info.lcd.vsync_enable)
	    || (mfd->use_mdp_vsync)) {
		mdp_pipe_kickoff(term, mfd);
		return;
	}
	/* SW vsync logic starts here */

	/* get current rd counter */
	mdp_lcd_rd_cnt = mdp_get_lcd_line_counter(mfd);
	if (mdp_dma2_update_time_in_usec != 0) {
		uint32 num, den;

		/*
		 * roi width boundary calculation to know the size of pixel
		 * width that MDP can send faster or slower than LCD read
		 * pointer
		 */

		num = mdp_last_dma2_update_width * mdp_last_dma2_update_height;
		den =
		    (((mfd->panel_info.lcd.refx100 * mfd->total_lcd_lines) /
		      1000) * (mdp_dma2_update_time_in_usec / 100)) / 1000;

		if (den == 0)
			mfd->vsync_width_boundary[mdp_last_dma2_update_width] =
			    mfd->panel_info.xres + 1;
		else
			mfd->vsync_width_boundary[mdp_last_dma2_update_width] =
			    (int)(num / den);
	}

	if (mfd->vsync_width_boundary[mdp_last_dma2_update_width] >
	    mdp_curr_dma2_update_width) {
		/* MDP wrp is faster than LCD rdp */
		mdp_lcd_rd_cnt += mdp_lcd_rd_cnt_offset_fast;
	} else {
		/* MDP wrp is slower than LCD rdp */
		mdp_lcd_rd_cnt -= mdp_lcd_rd_cnt_offset_slow;
	}

	if (mdp_lcd_rd_cnt < 0)
		mdp_lcd_rd_cnt = mfd->total_lcd_lines + mdp_lcd_rd_cnt;
	else if (mdp_lcd_rd_cnt > mfd->total_lcd_lines)
		mdp_lcd_rd_cnt = mdp_lcd_rd_cnt - mfd->total_lcd_lines - 1;

	/* get wrt pointer position */
	start_y = mfd->ibuf.dma_y;

	/* measure line difference between start_y and rd counter */
	if (start_y > mdp_lcd_rd_cnt) {
		/*
		 * *100 for lcd_ref_hzx100 was already multiplied by 100
		 * *1000000 is for usec conversion
		 */

		if ((start_y - mdp_lcd_rd_cnt) <=
		    mdp_vsync_usec_wait_line_too_short)
			usec_wait_time = 0;
		else
			usec_wait_time =
			    ((start_y -
			      mdp_lcd_rd_cnt) * 1000000) /
			    ((mfd->total_lcd_lines *
			      mfd->panel_info.lcd.refx100) / 100);
	} else {
		if ((start_y + (mfd->total_lcd_lines - mdp_lcd_rd_cnt)) <=
		    mdp_vsync_usec_wait_line_too_short)
			usec_wait_time = 0;
		else
			usec_wait_time =
			    ((start_y +
			      (mfd->total_lcd_lines -
			       mdp_lcd_rd_cnt)) * 1000000) /
			    ((mfd->total_lcd_lines *
			      mfd->panel_info.lcd.refx100) / 100);
	}

	mdp_last_dma2_update_width = mdp_curr_dma2_update_width;
	mdp_last_dma2_update_height = mdp_curr_dma2_update_height;

	if (usec_wait_time == 0) {
		mdp_pipe_kickoff(term, mfd);
	} else {
		ktime_t wait_time;

		wait_time = ns_to_ktime(usec_wait_time * 1000);

		if (msm_fb_debug_enabled) {
			vt = ktime_get_real();
			mdp_expected_usec_wait = usec_wait_time;
		}
		hrtimer_start(&mfd->dma_hrtimer, wait_time, HRTIMER_MODE_REL);
	}
}
Exemplo n.º 29
0
static int msm_idle_stats_collect(struct file *filp,
				  unsigned int cmd, unsigned long arg)
{
	struct msm_idle_stats_device *stats_dev;
	struct msm_idle_stats *stats;
	int rc;

	stats_dev = (struct msm_idle_stats_device *) filp->private_data;
	stats = &stats_dev->stats;

	rc = mutex_lock_interruptible(&stats_dev->mutex);
	if (rc) {
		if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_SIGNAL)
			pr_info("%s: interrupted while waiting on device "
				"mutex\n", __func__);

		rc = -EINTR;
		goto collect_exit;
	}

	if (atomic_read(&stats_dev->collecting)) {
		pr_err("%s: inconsistent state\n", __func__);
		rc = -EBUSY;
		goto collect_unlock_exit;
	}

	rc = copy_from_user(stats, (void *)arg, sizeof(*stats));
	if (rc) {
		rc = -EFAULT;
		goto collect_unlock_exit;
	}

	if (stats->nr_collected >= MSM_IDLE_STATS_NR_MAX_INTERVALS ||
			stats->busy_timer > MSM_IDLE_STATS_MAX_TIMER ||
			stats->collection_timer > MSM_IDLE_STATS_MAX_TIMER) {
		rc = -EINVAL;
		goto collect_unlock_exit;
	}

	if (get_cpu() != stats_dev->cpu) {
		put_cpu();
		rc = -EACCES;
		goto collect_unlock_exit;
	}

	stats_dev->collection_expiration =
		ktime_to_us(ktime_get()) + stats->collection_timer;

	atomic_set(&stats_dev->collecting, 1);

	if (stats->busy_timer > 0) {
		rc = hrtimer_start(&stats_dev->timer,
			ktime_set(0, stats->busy_timer * 1000),
			HRTIMER_MODE_REL_PINNED);
		WARN_ON(rc);
	}

	put_cpu();
	if (wait_event_interruptible(stats_dev->wait_q,
			!atomic_read(&stats_dev->collecting))) {
		if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_SIGNAL)
			pr_info("%s: interrupted while waiting on "
				"collection\n", __func__);

		hrtimer_cancel(&stats_dev->timer);
		atomic_set(&stats_dev->collecting, 0);

		rc = -EINTR;
		goto collect_unlock_exit;
	}

	stats->return_timestamp = ktime_to_us(ktime_get());

	rc = copy_to_user((void *)arg, stats, sizeof(*stats));
	if (rc) {
		rc = -EFAULT;
		goto collect_unlock_exit;
	}

collect_unlock_exit:
	mutex_unlock(&stats_dev->mutex);

collect_exit:
	return rc;
}
Exemplo n.º 30
0
static int ipp_blit_sync_real(const struct rk29_ipp_req *req)
{
	int status;
	int wait_ret;
	
	//printk("ipp_blit_sync -------------------\n");

	//If IPP is busy now,wait until it becomes idle
	mutex_lock(&drvdata->mutex);
	{
		status = wait_event_interruptible(blit_wait_queue, idle_condition);
		
		if(status < 0)
		{
			printk("ipp_blit_sync_real wait_event_interruptible=%d\n",status);
			mutex_unlock(&drvdata->mutex);
			return status;
		}
		
		idle_condition = 0;
		
	}
	mutex_unlock(&drvdata->mutex);

	
  	drvdata->issync = true;
	drvdata->ipp_result = ipp_blit(req);
   
	if(drvdata->ipp_result == 0)
	{
		//wait_ret = wait_event_interruptible_timeout(hw_wait_queue, wq_condition, msecs_to_jiffies(req->timeout));
		wait_ret = wait_event_timeout(hw_wait_queue, wq_condition, msecs_to_jiffies(req->timeout));
#ifdef IPP_TEST
		irq_end = ktime_get(); 
		irq_end = ktime_sub(irq_end,irq_start);
		hw_end = ktime_sub(hw_end,hw_start);
		if((((int)ktime_to_us(hw_end)/1000)>10)||(((int)ktime_to_us(irq_end)/1000)>10))
		{
			//printk("hw time: %d ms, irq time: %d ms\n",(int)ktime_to_us(hw_end)/1000,(int)ktime_to_us(irq_end)/1000);
		}
#endif				
		if (wait_ret <= 0)
		{
			printk("%s wait_ret=%d,wq_condition =%d,wait_event_timeout:%dms! \n",__FUNCTION__,wait_ret,wq_condition,req->timeout);

			if(wq_condition==0)
			{
				//print all register's value
				printk("IPP_CONFIG: %x\n",ipp_read(IPP_CONFIG));
				printk("IPP_SRC_IMG_INFO: %x\n",ipp_read(IPP_SRC_IMG_INFO));
				printk("IPP_DST_IMG_INFO: %x\n",ipp_read(IPP_DST_IMG_INFO));
				printk("IPP_IMG_VIR: %x\n",ipp_read(IPP_IMG_VIR));
				printk("IPP_INT: %x\n",ipp_read(IPP_INT));
				printk("IPP_SRC0_Y_MST: %x\n",ipp_read(IPP_SRC0_Y_MST));
				printk("IPP_SRC0_CBR_MST: %x\n",ipp_read(IPP_SRC0_CBR_MST));
				printk("IPP_SRC1_Y_MST: %x\n",ipp_read(IPP_SRC1_Y_MST));
				printk("IPP_SRC1_CBR_MST: %x\n",ipp_read(IPP_SRC1_CBR_MST));
				printk("IPP_DST0_Y_MST: %x\n",ipp_read(IPP_DST0_Y_MST));
				printk("IPP_DST0_CBR_MST: %x\n",ipp_read(IPP_DST0_CBR_MST));
				printk("IPP_DST1_Y_MST: %x\n",ipp_read(IPP_DST1_Y_MST));
				printk("IPP_DST1_CBR_MST: %x\n",ipp_read(IPP_DST1_CBR_MST));
				printk("IPP_PRE_SCL_PARA: %x\n",ipp_read(IPP_PRE_SCL_PARA));
				printk("IPP_POST_SCL_PARA: %x\n",ipp_read(IPP_POST_SCL_PARA));
				printk("IPP_SWAP_CTRL: %x\n",ipp_read(IPP_SWAP_CTRL));
				printk("IPP_PRE_IMG_INFO: %x\n",ipp_read(IPP_PRE_IMG_INFO));
				printk("IPP_AXI_ID: %x\n",ipp_read(IPP_AXI_ID));
				printk("IPP_SRESET: %x\n",ipp_read(IPP_SRESET));
				printk("IPP_PROCESS_ST: %x\n",ipp_read(IPP_PROCESS_ST));
		
				ipp_soft_reset();
				drvdata->ipp_result = -EAGAIN;
			}
		}

		ipp_power_off(NULL);
	}
	drvdata->issync = false;

	//IPP is idle, wake up the wait queue
	//printk("ipp_blit_sync done ----------------\n");
	status = drvdata->ipp_result;
	idle_condition = 1;
	wake_up_interruptible_sync(&blit_wait_queue);
	
	return status;
}