void vsync_notify_handler(void *arg)
{
	struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
	session->vsync_time = ktime_get();
	sysfs_notify_dirent(session->vsync_event_sd);
}
Example #2
0
/*
 * We rearm the timer until we get disabled by the idle code.
 * Called with interrupts disabled and timer->base->cpu_base->lock held.
 */
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
	struct tick_sched *ts =
		container_of(timer, struct tick_sched, sched_timer);
	struct pt_regs *regs = get_irq_regs();
	ktime_t now = ktime_get();
	int cpu = smp_processor_id();

#ifdef CONFIG_NO_HZ
	/*
	 * Check if the do_timer duty was dropped. We don't care about
	 * concurrency: This happens only when the cpu in charge went
	 * into a long sleep. If two cpus happen to assign themself to
	 * this duty, then the jiffies update is still serialized by
	 * xtime_lock.
	 */
	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
		tick_do_timer_cpu = cpu;
#endif

	/* Check, if the jiffies need an update */
	if (tick_do_timer_cpu == cpu)
		tick_do_update_jiffies64(now);

	/*
	 * Do not call, when we are not in irq context and have
	 * no valid regs pointer
	 */
	if (regs) {
		/*
		 * When we are idle and the tick is stopped, we have to touch
		 * the watchdog as we might not schedule for a really long
		 * time. This happens on complete idle SMP systems while
		 * waiting on the login prompt. We also increment the "start of
		 * idle" jiffy stamp so the idle accounting adjustment we do
		 * when we go busy again does not account too much ticks.
		 */
		if (ts->tick_stopped) {
			touch_softlockup_watchdog();
			ts->idle_jiffies++;
		}
		update_process_times(user_mode(regs));
		profile_tick(CPU_PROFILING);

		if ((rq_info.init == 1) && (tick_do_timer_cpu == cpu)) {

			/*
			 * update run queue statistics
			 */
			update_rq_stats();

			/*
			 * wakeup user if needed
			 */
			wakeup_user();
		}
	}

	hrtimer_forward(timer, now, tick_period);

	return HRTIMER_RESTART;
}
void gcpu_plug(unsigned int cpu_freq)
{
	unsigned long up_delay, top_freq, bottom_freq;
	static bool first_call = true;
	static cputime64_t total_time = 0;
	static cputime64_t last_time;
	cputime64_t current_time;
	cputime64_t this_time = 0;

	if (is_plugging) {
		return;
	}

	current_time = ktime_to_ms(ktime_get());
	if (first_call) {
		first_call = false;
	} else {
		this_time = current_time - last_time;
	}
	total_time += this_time;

	up_delay = up2gn_delay;
	top_freq = idle_bottom_freq;
	bottom_freq = idle_bottom_freq;

	if (smp_processor_id() == 0)
		mp_state = __cpu_plug();
	else
		mp_state = TEGRA_HP_IDLE;

	switch (hp_state) {
	case TEGRA_HP_DISABLED:
		total_time = 0;
		break;
	case TEGRA_HP_IDLE:
		if (cpu_freq > top_freq) {
			hp_state = TEGRA_HP_UP;
		} else if (cpu_freq <= bottom_freq) {
			hp_state = TEGRA_HP_DOWN;
		}
		total_time = 0;
		break;
	case TEGRA_HP_DOWN:
		if (cpu_freq > top_freq) {
			hp_state = TEGRA_HP_UP;
			total_time = 0;
		}
		break;
	case TEGRA_HP_UP:
		if (cpu_freq <= bottom_freq) {
			hp_state = TEGRA_HP_DOWN;
			total_time = 0;
		}
		break;
	}

	if (hp_state == TEGRA_HP_UP) {
		switch (tegra_cpu_speed_balance()) {
		/* cpu speed is up and balanced - one more on-line */
		case TEGRA_CPU_SPEED_BALANCED:
			if ((total_time >= up_time) && (mp_state == TEGRA_HP_UP)) {
				is_plugging = true;
				last_state = TEGRA_HP_UP;
				queue_work(cpuplug_wq, &cpuplug_work);
				total_time = 0;
			}
			break;
		/* cpu speed is up, but skewed - remove one core */
		case TEGRA_CPU_SPEED_SKEWED:
			if ((total_time >= down_time) && (mp_state == TEGRA_HP_DOWN)) {
				is_plugging = true;
				last_state = TEGRA_HP_DOWN;
				queue_work(cpuplug_wq, &cpuplug_work);
				total_time = 0;
			}
			break;
		/* cpu speed is up, but under-utilized - do nothing */
		case TEGRA_CPU_SPEED_BIASED:
			if (total_time >= up_time)
				total_time = 0;
		default:
			break;
		}
	} else if (hp_state == TEGRA_HP_DOWN) {
		if ((total_time >= down_time) && (mp_state == TEGRA_HP_DOWN))  {
			is_plugging = true;
			last_state = TEGRA_HP_DOWN;
			queue_work(cpuplug_wq, &cpuplug_work);
			total_time = 0;
		}
	}

	last_time = ktime_to_ms(ktime_get());
}
Example #4
0
static int nvhost_pod_estimate_freq(struct devfreq *df,
				    unsigned long *freq)
{
	struct podgov_info_rec *podgov = df->data;
	struct devfreq_dev_status dev_stat;
	struct nvhost_devfreq_ext_stat *ext_stat;
	long delay;
	int current_event;
	int stat;
	ktime_t now;

	stat = df->profile->get_dev_status(df->dev.parent, &dev_stat);
	if (stat < 0)
		return stat;

	/* Ensure maximal clock when scaling is disabled */
	if (!podgov->enable) {
		*freq = df->max_freq;
		return 0;
	}

	if (podgov->p_user) {
		*freq = podgov->p_freq_request;
		return 0;
	}

	current_event = DEVICE_IDLE;
	stat = 0;
	now = ktime_get();

	/* Local adjustments (i.e. requests from kernel threads) are
	 * handled here */

	if (podgov->adjustment_type == ADJUSTMENT_LOCAL) {

		podgov->adjustment_type = ADJUSTMENT_DEVICE_REQ;

		/* Do not do unnecessary scaling */
		scaling_limit(df, &podgov->adjustment_frequency);
		if (df->previous_freq == podgov->adjustment_frequency)
			return GET_TARGET_FREQ_DONTSCALE;

		trace_podgov_estimate_freq(df->previous_freq,
			podgov->adjustment_frequency);

		*freq = podgov->adjustment_frequency;
		return 0;
	}

	/* Retrieve extended data */
	ext_stat = dev_stat.private_data;
	if (!ext_stat)
		return -EINVAL;

	current_event = ext_stat->busy;
	*freq = dev_stat.current_frequency;
	df->min_freq = ext_stat->min_freq;
	df->max_freq = ext_stat->max_freq;

	/* Sustain local variables */
	podgov->last_event_type = current_event;
	podgov->total_idle += (dev_stat.total_time - dev_stat.busy_time);
	podgov->last_total_idle += (dev_stat.total_time - dev_stat.busy_time);

	/* update the load estimate based on idle time */
	update_load_estimate(df);

	/* if throughput hint enabled, and last hint is recent enough, return */
	if (podgov->p_use_throughput_hint &&
		ktime_us_delta(now, podgov->last_throughput_hint) < 1000000)
		return GET_TARGET_FREQ_DONTSCALE;

	switch (current_event) {

	case DEVICE_IDLE:
		/* delay idle_max % of 2 * fast_response time (given in
		 * microseconds) */
		*freq = scaling_state_check(df, now);
		delay = (podgov->idle_max * podgov->p_estimation_window)
			/ 500000;
		schedule_delayed_work(&podgov->idle_timer,
			msecs_to_jiffies(delay));
		break;
	case DEVICE_BUSY:
		cancel_delayed_work(&podgov->idle_timer);
		*freq = scaling_state_check(df, now);
		break;
	case DEVICE_UNKNOWN:
		*freq = scaling_state_check(df, now);
		break;

	}

	if (!(*freq) || (*freq == df->previous_freq))
		return GET_TARGET_FREQ_DONTSCALE;

	trace_podgov_estimate_freq(df->previous_freq, *freq);


	return 0;
}
Example #5
0
ssize_t mdp4_dsi_video_show_event(struct device *dev,
	struct device_attribute *attr, char *buf)
{
	int cndx;
	struct vsycn_ctrl *vctrl;
	ssize_t ret = 0;
	unsigned long flags;
	u64 vsync_tick;
//yanghai add the iommu patch 2013.5.25	
#ifdef CONFIG_VENDOR_EDIT
	ktime_t ctime;
       u32 ctick, ptick;
       int diff;
#endif
//yanghai add
	cndx = 0;
	vctrl = &vsync_ctrl_db[0];

	if (atomic_read(&vctrl->suspend) > 0 ||
		atomic_read(&vctrl->vsync_resume) == 0)
		return 0;
//yanghai add the iommu patch 2013.5.25	
#ifdef CONFIG_VENDOR_EDIT	
       /*
        * show_event thread keep spinning on vctrl->vsync_comp
        * race condition on x.done if multiple thread blocked
        * at wait_for_completion(&vctrl->vsync_comp)
        *
        * if show_event thread waked up first then it will come back
        * and call INIT_COMPLETION(vctrl->vsync_comp) which set x.done = 0
        * then second thread wakeed up which set x.done = 0x7ffffffd
        * after that wait_for_completion will never wait.
        * To avoid this, force show_event thread to sleep 5 ms here
        * since it has full vsycn period (16.6 ms) to wait
        */
       ctime = ktime_get();
       ctick = (u32)ktime_to_us(ctime);
       ptick = (u32)ktime_to_us(vctrl->vsync_time);
       ptick += 5000;  /* 5ms */
       diff = ptick - ctick;
       if (diff > 0) {
               if (diff > 1000) /* 1 ms */
                       diff = 1000;
               usleep(diff);
       }
#endif
//yanghai add end
	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (vctrl->wait_vsync_cnt == 0)
		INIT_COMPLETION(vctrl->vsync_comp);
	vctrl->wait_vsync_cnt++;
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
	ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp,
		msecs_to_jiffies(VSYNC_PERIOD * 4));
	if (ret <= 0) {
		complete_all(&vctrl->vsync_comp);
		vctrl->wait_vsync_cnt = 0;
		vctrl->vsync_time = ktime_get();
	}

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	vsync_tick = ktime_to_ns(vctrl->vsync_time);
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
	buf[strlen(buf) + 1] = '\0';
	return ret;
}
Example #6
0
static void __cpuinit tplug_work_fn(struct work_struct *work)
{
	int i;
	unsigned int load[8], avg_load[8];

	switch(endurance_level)
	{
	case 0:
		core_limit = 8;
	break;
	case 1:
		core_limit = 4;
	break;
	case 2:
		core_limit = 2;
	break;
	default:
		core_limit = 8;
	break;
	}

	for(i = 0 ; i < core_limit; i++)
	{
		if(cpu_online(i))
			load[i] = get_curr_load(i);
		else
			load[i] = 0;

		avg_load[i] = ((int) load[i] + (int) last_load[i]) / 2;
		last_load[i] = load[i];
	}

	for(i = 0 ; i < core_limit; i++)
	{
	if(cpu_online(i) && avg_load[i] > load_threshold && cpu_is_offline(i+1))
	{
	if(DEBUG)
		pr_info("%s : bringing back cpu%d\n", THUNDERPLUG,i);
		if(!((i+1) > 7)) {
			last_time[i+1] = ktime_to_ms(ktime_get());
			cpu_up(i+1);
		}
	}
	else if(cpu_online(i) && avg_load[i] < load_threshold && cpu_online(i+1))
	{
		if(DEBUG)
			pr_info("%s : offlining cpu%d\n", THUNDERPLUG,i);
			if(!(i+1)==0) {
				now[i+1] = ktime_to_ms(ktime_get());
				if((now[i+1] - last_time[i+1]) > MIN_CPU_UP_TIME)
					cpu_down(i+1);
			}
		}
	}

	if(tplug_hp_enabled != 0 && !isSuspended)
		queue_delayed_work_on(0, tplug_wq, &tplug_work,
			msecs_to_jiffies(sampling_time));
	else {
		if(!isSuspended)
			cpus_online_all();
		else
			thunderplug_suspend();
	}

}
Example #7
0
static int nvhost_pod_init(struct devfreq *df)
{
	struct nvhost_device *d = to_nvhost_device(df->dev.parent);
	struct podgov_info_rec *podgov;
	ktime_t now = ktime_get();
	int error = 0;

	struct nvhost_devfreq_ext_stat *ext_stat;
	struct devfreq_dev_status dev_stat;
	int stat = 0;

	long rate;
	int freqs[MAX_FREQ_COUNT];

	podgov = kzalloc(sizeof(struct podgov_info_rec), GFP_KERNEL);
	if (!podgov)
		goto err_alloc_podgov;
	df->data = (void *)podgov;

	/* This should be removed after the governor include also the hint
	 * interface */
	local_podgov = podgov;

	/* Initialise workers */
	INIT_WORK(&podgov->work, podgov_clocks_handler);
	INIT_DELAYED_WORK(&podgov->idle_timer, podgov_idle_handler);

	/* Set scaling parameter defaults */
	podgov->enable = 1;
	podgov->p_adjust = 0;
	podgov->block = 0;
	podgov->p_use_throughput_hint = 1;
	podgov->idle_min = podgov->p_idle_min = 100;
	podgov->idle_max = podgov->p_idle_max = 150;
	podgov->p_hint_lo_limit = 800;
	podgov->p_hint_hi_limit = 1015;
	podgov->p_scaleup_limit = 1275;
	podgov->p_scaledown_limit = 1475;
	podgov->p_smooth = 7;
	
	podgov->p_estimation_window = 10000;
	podgov->adjustment_type = ADJUSTMENT_DEVICE_REQ;
	podgov->p_user = 0;

	/* Reset clock counters */
	podgov->last_throughput_hint = now;
	podgov->last_scale = now;
	podgov->last_adjust = now;
	podgov->last_estimation_window = now;
	podgov->estimation_window = now;
	podgov->last_notification = now;

	podgov->power_manager = df;

	/* Get the current status of the device */
	stat = df->profile->get_dev_status(df->dev.parent, &dev_stat);
	if (!dev_stat.private_data) {
		pr_err("podgov: device does not support ext_stat.\n");
		goto err_get_current_status;
	}
	ext_stat = dev_stat.private_data;
	df->previous_freq = dev_stat.current_frequency;
	df->min_freq = ext_stat->min_freq;
	df->max_freq = ext_stat->max_freq;

	podgov->p_freq_request = ext_stat->max_freq;

	/* Create sysfs entries for controlling this governor */
	error = device_create_file(&d->dev,
			&dev_attr_enable_3d_scaling);
	if (error)
		goto err_create_sysfs_entry;

	error = device_create_file(&d->dev,
			&dev_attr_user);
	if (error)
		goto err_create_sysfs_entry;

	error = device_create_file(&d->dev,
			&dev_attr_freq_request);
	if (error)
		goto err_create_sysfs_entry;

	rate = 0;
	podgov->freq_count = 0;
	while (rate <= df->max_freq) {
		long rounded_rate;
		if (unlikely(podgov->freq_count == MAX_FREQ_COUNT)) {
			pr_err("%s: too many frequencies\n", __func__);
			break;
		}
		rounded_rate = clk_round_rate(clk_get_parent(d->clk[0]), rate);
		if (podgov->freq_count &&
		    freqs[podgov->freq_count - 1] == rounded_rate)
			break;
		freqs[podgov->freq_count++] = rounded_rate;
		rate = rounded_rate + 2000;
	}

	podgov->freqlist =
		kmalloc(podgov->freq_count * sizeof(int), GFP_KERNEL);
	if (podgov->freqlist == NULL)
		goto err_allocate_freq_list;

	memcpy(podgov->freqlist, freqs,
		podgov->freq_count * sizeof(int));

	podgov->idle_avg = 0;
	podgov->hint_avg = 0;

	nvhost_scale3d_debug_init(df);

	return 0;

err_allocate_freq_list:
	device_remove_file(&d->dev, &dev_attr_enable_3d_scaling);
	device_remove_file(&d->dev, &dev_attr_user);
	device_remove_file(&d->dev, &dev_attr_freq_request);
err_create_sysfs_entry:
	dev_err(&df->dev, "failed to create sysfs attributes");
err_get_current_status:
	kfree(podgov);
err_alloc_podgov:
	return -ENOMEM;
}
Example #8
0
static unsigned long long _timestamp_kernel(void)
{
	ktime_t t = ktime_get();
	return (t.tv64);
}
Example #9
0
static void
mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
#ifdef CONFIG_MMC_DEBUG
	unsigned int i, sz;
	struct scatterlist *sg;
#endif

	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
		 mmc_hostname(host), mrq->cmd->opcode,
		 mrq->cmd->arg, mrq->cmd->flags);

	if (mrq->data) {
		pr_debug("%s:     blksz %d blocks %d flags %08x "
			"tsac %d ms nsac %d\n",
			mmc_hostname(host), mrq->data->blksz,
			mrq->data->blocks, mrq->data->flags,
			mrq->data->timeout_ns / 1000000,
			mrq->data->timeout_clks);
	}

	if (mrq->stop) {
		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
			 mmc_hostname(host), mrq->stop->opcode,
			 mrq->stop->arg, mrq->stop->flags);
	}

	WARN_ON(!host->claimed);

	led_trigger_event(host->led, LED_FULL);

	mrq->cmd->error = 0;
	mrq->cmd->mrq = mrq;
	if (mrq->data) {
		BUG_ON(mrq->data->blksz > host->max_blk_size);
		BUG_ON(mrq->data->blocks > host->max_blk_count);
		BUG_ON(mrq->data->blocks * mrq->data->blksz >
			host->max_req_size);

#ifdef CONFIG_MMC_DEBUG
		sz = 0;
		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
			sz += sg->length;
		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
#endif

		mrq->cmd->data = mrq->data;
		mrq->data->error = 0;
		mrq->data->mrq = mrq;
		if (mrq->stop) {
			mrq->data->stop = mrq->stop;
			mrq->stop->error = 0;
			mrq->stop->mrq = mrq;
		}

#ifdef CONFIG_MMC_PERF_PROFILING
		host->perf.start = ktime_get();
#endif
	}
	host->ops->request(host, mrq);
}
Example #10
0
int jbd2_journal_stop(handle_t *handle)
{
	transaction_t *transaction = handle->h_transaction;
	journal_t *journal = transaction->t_journal;
	int err, wait_for_commit = 0;
	tid_t tid;
	pid_t pid;

	J_ASSERT(journal_current_handle() == handle);

	if (is_handle_aborted(handle))
		err = -EIO;
	else {
		J_ASSERT(atomic_read(&transaction->t_updates) > 0);
		err = 0;
	}

	if (--handle->h_ref > 0) {
		jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
			  handle->h_ref);
		return err;
	}

	jbd_debug(4, "Handle %p going down\n", handle);

	pid = current->pid;
	if (handle->h_sync && journal->j_last_sync_writer != pid) {
		u64 commit_time, trans_time;

		journal->j_last_sync_writer = pid;

		read_lock(&journal->j_state_lock);
		commit_time = journal->j_average_commit_time;
		read_unlock(&journal->j_state_lock);

		trans_time = ktime_to_ns(ktime_sub(ktime_get(),
						   transaction->t_start_time));

		commit_time = max_t(u64, commit_time,
				    1000*journal->j_min_batch_time);
		commit_time = min_t(u64, commit_time,
				    1000*journal->j_max_batch_time);

		if (trans_time < commit_time) {
			ktime_t expires = ktime_add_ns(ktime_get(),
						       commit_time);
			set_current_state(TASK_UNINTERRUPTIBLE);
			schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
		}
	}

	if (handle->h_sync)
		transaction->t_synchronous_commit = 1;
	current->journal_info = NULL;
	atomic_sub(handle->h_buffer_credits,
		   &transaction->t_outstanding_credits);

	if (handle->h_sync ||
	    (atomic_read(&transaction->t_outstanding_credits) >
	     journal->j_max_transaction_buffers) ||
	    time_after_eq(jiffies, transaction->t_expires)) {

		jbd_debug(2, "transaction too old, requesting commit for "
					"handle %p\n", handle);
		
		jbd2_log_start_commit(journal, transaction->t_tid);

		if (handle->h_sync && !(current->flags & PF_MEMALLOC))
			wait_for_commit = 1;
	}

	tid = transaction->t_tid;
	if (atomic_dec_and_test(&transaction->t_updates)) {
		wake_up(&journal->j_wait_updates);
		if (journal->j_barrier_count)
			wake_up(&journal->j_wait_transaction_locked);
	}

	if (wait_for_commit)
		err = jbd2_log_wait_commit(journal, tid);

	lock_map_release(&handle->h_lockdep_map);

	jbd2_free_handle(handle);
	return err;
}
Example #11
0
/*
 * High resolution timer interrupt
 * Called with interrupts disabled
 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	ktime_t expires_next, now, entry_time, delta;
	int i, retries = 0;

	BUG_ON(!cpu_base->hres_active);
	cpu_base->nr_events++;
	dev->next_event.tv64 = KTIME_MAX;

	entry_time = now = ktime_get();
retry:
	expires_next.tv64 = KTIME_MAX;

	raw_spin_lock(&cpu_base->lock);
	/*
	 * We set expires_next to KTIME_MAX here with cpu_base->lock
	 * held to prevent that a timer is enqueued in our queue via
	 * the migration code. This does not affect enqueueing of
	 * timers which run their callback and need to be requeued on
	 * this CPU.
	 */
	cpu_base->expires_next.tv64 = KTIME_MAX;

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		struct hrtimer_clock_base *base;
		struct timerqueue_node *node;
		ktime_t basenow;

		if (!(cpu_base->active_bases & (1 << i)))
			continue;

		base = cpu_base->clock_base + i;
		basenow = ktime_add(now, base->offset);

		while ((node = timerqueue_getnext(&base->active))) {
			struct hrtimer *timer;

			timer = container_of(node, struct hrtimer, node);

			/*
			 * The immediate goal for using the softexpires is
			 * minimizing wakeups, not running timers at the
			 * earliest interrupt after their soft expiration.
			 * This allows us to avoid using a Priority Search
			 * Tree, which can answer a stabbing querry for
			 * overlapping intervals and instead use the simple
			 * BST we already have.
			 * We don't add extra wakeups by delaying timers that
			 * are right-of a not yet expired timer, because that
			 * timer will have to trigger a wakeup anyway.
			 */

			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
				ktime_t expires;

				expires = ktime_sub(hrtimer_get_expires(timer),
						    base->offset);
				if (expires.tv64 < expires_next.tv64)
					expires_next = expires;
				break;
			}

			__run_hrtimer(timer, &basenow);
		}
	}

	/*
	 * Store the new expiry value so the migration code can verify
	 * against it.
	 */
	cpu_base->expires_next = expires_next;
	raw_spin_unlock(&cpu_base->lock);

	/* Reprogramming necessary ? */
	if (expires_next.tv64 == KTIME_MAX ||
	    !tick_program_event(expires_next, 0)) {
		cpu_base->hang_detected = 0;
		return;
	}

	/*
	 * The next timer was already expired due to:
	 * - tracing
	 * - long lasting callbacks
	 * - being scheduled away when running in a VM
	 *
	 * We need to prevent that we loop forever in the hrtimer
	 * interrupt routine. We give it 3 attempts to avoid
	 * overreacting on some spurious event.
	 */
	now = ktime_get();
	cpu_base->nr_retries++;
	if (++retries < 3)
		goto retry;
	/*
	 * Give the system a chance to do something else than looping
	 * here. We stored the entry time, so we know exactly how long
	 * we spent here. We schedule the next event this amount of
	 * time away.
	 */
	cpu_base->nr_hangs++;
	cpu_base->hang_detected = 1;
	delta = ktime_sub(now, entry_time);
	if (delta.tv64 > cpu_base->max_hang_time.tv64)
		cpu_base->max_hang_time = delta;
	/*
	 * Limit it to a sensible value as we enforce a longer
	 * delay. Give the CPU at least 100ms to catch up.
	 */
	if (delta.tv64 > 100 * NSEC_PER_MSEC)
		expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
	else
		expires_next = ktime_add(now, delta);
	tick_program_event(expires_next, 1);
	printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
		    ktime_to_ns(delta));
}
Example #12
0
/*
 * High resolution timer interrupt
 * Called with interrupts disabled
 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	struct hrtimer_clock_base *base;
	ktime_t expires_next, now;
	int nr_retries = 0;
	int i;

	BUG_ON(!cpu_base->hres_active);
	cpu_base->nr_events++;
	dev->next_event.tv64 = KTIME_MAX;

 retry:
	/* 5 retries is enough to notice a hang */
	if (!(++nr_retries % 5))
		hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));

	now = ktime_get();

	expires_next.tv64 = KTIME_MAX;

	base = cpu_base->clock_base;

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		ktime_t basenow;
		struct rb_node *node;

		spin_lock(&cpu_base->lock);

		basenow = ktime_add(now, base->offset);

		while ((node = base->first)) {
			struct hrtimer *timer;

			timer = rb_entry(node, struct hrtimer, node);

			/*
			 * The immediate goal for using the softexpires is
			 * minimizing wakeups, not running timers at the
			 * earliest interrupt after their soft expiration.
			 * This allows us to avoid using a Priority Search
			 * Tree, which can answer a stabbing querry for
			 * overlapping intervals and instead use the simple
			 * BST we already have.
			 * We don't add extra wakeups by delaying timers that
			 * are right-of a not yet expired timer, because that
			 * timer will have to trigger a wakeup anyway.
			 */

			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
				ktime_t expires;

				expires = ktime_sub(hrtimer_get_expires(timer),
						    base->offset);
				if (expires.tv64 < expires_next.tv64)
					expires_next = expires;
				break;
			}

			__run_hrtimer(timer);
		}
		spin_unlock(&cpu_base->lock);
		base++;
	}

	cpu_base->expires_next = expires_next;

	/* Reprogramming necessary ? */
	if (expires_next.tv64 != KTIME_MAX) {
		if (tick_program_event(expires_next, force_clock_reprogram))
			goto retry;
	}
}
Example #13
0
/*
 * Low level master read/write transaction.
 */
static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
			     struct i2c_msg *msg, int stop)
{
	struct omap_i2c_dev *dev = i2c_get_adapdata(adap);
	int r;
	u16 w;

	dev_dbg(dev->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
		msg->addr, msg->len, msg->flags, stop);

	if (msg->len == 0)
		return -EINVAL;

	omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr);

	/* REVISIT: Could the STB bit of I2C_CON be used with probing? */
	dev->buf = msg->buf;
	dev->buf_len = msg->len;

	omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len);

	/* Clear the FIFO Buffers */
	w = omap_i2c_read_reg(dev, OMAP_I2C_BUF_REG);
	w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR;
	omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w);

	init_completion(&dev->cmd_complete);
	dev->cmd_err = 0;

	w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;

	/* High speed configuration */
	if (dev->speed > 400)
		w |= OMAP_I2C_CON_OPMODE_HS;

	if (msg->flags & I2C_M_TEN)
		w |= OMAP_I2C_CON_XA;
	if (!(msg->flags & I2C_M_RD))
		w |= OMAP_I2C_CON_TRX;

	if (!dev->b_hw && stop)
		w |= OMAP_I2C_CON_STP;

	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);

	/*
	 * Don't write stt and stp together on some hardware.
	 */
	if (dev->b_hw && stop) {
		ktime_t start = ktime_get();
		u16 con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
		while (con & OMAP_I2C_CON_STT) {
			con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);

			/* Let the user know if i2c is in a bad state */
			if (ktime_us_delta(ktime_get(), start) > OMAP_I2C_TIMEOUT) {
				dev_err(dev->dev, "controller timed out "
				"waiting for start condition to finish\n");
				return -ETIMEDOUT;
			}
			cpu_relax();
		}

		w |= OMAP_I2C_CON_STP;
		w &= ~OMAP_I2C_CON_STT;
		omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
	}

	/*
	 * REVISIT: We should abort the transfer on signals, but the bus goes
	 * into arbitration and we're currently unable to recover from it.
	 */
	r = wait_for_completion_timeout(&dev->cmd_complete,
					usecs_to_jiffies(OMAP_I2C_TIMEOUT));
	dev->buf_len = 0;
	if (r < 0)
		return r;
	if (r == 0) {
		dev_err(dev->dev, "controller timed out\n");
		pr_info("addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
			msg->addr, msg->len, msg->flags, stop);
		omap_i2c_dump(dev);
		omap_i2c_init(dev);
		return -ETIMEDOUT;
	}

	if (likely(!dev->cmd_err))
		return 0;

	/* We have an error */
	if (dev->cmd_err & OMAP_I2C_STAT_AL) {
		omap_i2c_init(dev);
		return -EIO;
	}

	if (dev->cmd_err & OMAP_I2C_STAT_NACK) {
		if (msg->flags & I2C_M_IGNORE_NAK)
			return 0;
		if (stop) {
			w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
			w |= OMAP_I2C_CON_STP;
			omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
		}
		return -EREMOTEIO;
	}
	return -EIO;
}
Example #14
0
/*
 * High resolution timer interrupt
 * Called with interrupts disabled
 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	struct hrtimer_clock_base *base;
	ktime_t expires_next, now;
	int i, raise = 0;

	BUG_ON(!cpu_base->hres_active);
	cpu_base->nr_events++;
	dev->next_event.tv64 = KTIME_MAX;

 retry:
	now = ktime_get();

	expires_next.tv64 = KTIME_MAX;

	base = cpu_base->clock_base;

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		ktime_t basenow;
		struct rb_node *node;

		spin_lock(&cpu_base->lock);

		basenow = ktime_add(now, base->offset);

		while ((node = base->first)) {
			struct hrtimer *timer;

			timer = rb_entry(node, struct hrtimer, node);

			if (basenow.tv64 < timer->expires.tv64) {
				ktime_t expires;

				expires = ktime_sub(timer->expires,
						    base->offset);
				if (expires.tv64 < expires_next.tv64)
					expires_next = expires;
				break;
			}

			/* Move softirq callbacks to the pending list */
			if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
				__remove_hrtimer(timer, base,
						 HRTIMER_STATE_PENDING, 0);
				list_add_tail(&timer->cb_entry,
					      &base->cpu_base->cb_pending);
				raise = 1;
				continue;
			}

			__run_hrtimer(timer);
		}
		spin_unlock(&cpu_base->lock);
		base++;
	}

	cpu_base->expires_next = expires_next;

	/* Reprogramming necessary ? */
	if (expires_next.tv64 != KTIME_MAX) {
		if (tick_program_event(expires_next, 0))
			goto retry;
	}

	/* Raise softirq ? */
	if (raise)
		raise_softirq(HRTIMER_SOFTIRQ);
}
Example #15
0
static void update_load_stats_state(void)
{
	unsigned int load;
	unsigned int nr_cpu_online;
	unsigned int max_cpus = cpq_max_cpus();
	unsigned int min_cpus = cpq_min_cpus();
	u64 current_time;
	u64 this_time = 0;
	int index;
		
	if (load_stats_state == DISABLED)
		return;

	current_time = ktime_to_ms(ktime_get());
	if (current_time <= start_delay){
		load_stats_state = IDLE;
		return;
	}

	if (first_call) {
		first_call = false;
	} else {
		this_time = current_time - last_time;
	}
	total_time += this_time;
	load = report_load();
	nr_cpu_online = num_online_cpus();
	load_stats_state = IDLE;

	if (nr_cpu_online) {
		index = (nr_cpu_online - 1) * 2;
		if ((nr_cpu_online < CONFIG_NR_CPUS) && (load >= load_threshold[index])) {
			if (total_time >= twts_threshold[index]) {
           		if (nr_cpu_online < max_cpus){
           			hotplug_info("UP load=%d total_time=%lld load_threshold[index]=%d twts_threshold[index]=%d nr_cpu_online=%d min_cpus=%d max_cpus=%d\n", load, total_time, load_threshold[index], twts_threshold[index], nr_cpu_online, min_cpus, max_cpus);
           	    	load_stats_state = UP;
           	    }
			}
		} else if (load <= load_threshold[index+1]) {
			if (total_time >= twts_threshold[index+1] ) {
           		if ((nr_cpu_online > 1) && (nr_cpu_online > min_cpus)){
           			hotplug_info("DOWN load=%d total_time=%lld load_threshold[index+1]=%d twts_threshold[index+1]=%d nr_cpu_online=%d min_cpus=%d max_cpus=%d\n", load, total_time, load_threshold[index+1], twts_threshold[index+1], nr_cpu_online, min_cpus, max_cpus);
                   	load_stats_state = DOWN;
                }
			}
		} else {
			load_stats_state = IDLE;
			total_time = 0;
		}
	} else {
		total_time = 0;
	}

	if (input_boost_running && current_time > input_boost_end_time)
		input_boost_running = false;

	if (input_boost_running){
		if (load_stats_state != UP){
			load_stats_state = IDLE;
			hotplug_info("IDLE because of input boost\n");
		}
	}
	
	if (load_stats_state != IDLE)
		total_time = 0;

	last_time = ktime_to_ms(ktime_get());
}
Example #16
0
/* timer_arm: as in "arm the timer", not as in ARM the company */
static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
{
	timer->armed = true;
	hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
		      HRTIMER_MODE_ABS);
}
/**
 *	suspend_enter - enter the desired system sleep state.
 *	@state:		state to enter
 *
 *	This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state)
{
	int error;

	if (suspend_ops->prepare) {
		error = suspend_ops->prepare();
		if (error)
			goto Platform_finish;
	}

	error = dpm_suspend_noirq(PMSG_SUSPEND);
	if (error) {
		printk(KERN_ERR "PM: Some devices failed to power down\n");
		goto Platform_finish;
	}

	if (suspend_ops->prepare_late) {
		error = suspend_ops->prepare_late();
		if (error)
			goto Platform_wake;
	}

	if (suspend_test(TEST_PLATFORM))
		goto Platform_wake;

	error = disable_nonboot_cpus();
	if (error || suspend_test(TEST_CPUS))
		goto Enable_cpus;

	arch_suspend_disable_irqs();
	BUG_ON(!irqs_disabled());

	error = sysdev_suspend(PMSG_SUSPEND);
	
	if (!error) {
		if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) {
			error = suspend_ops->enter(state);
			rtc_resume_fromtime=tegra_rtc_read_ms();
			events_check_enabled = false;
		}
		sysdev_resume();
	}

	arch_suspend_enable_irqs();
	BUG_ON(irqs_disabled());

 Enable_cpus:
	enable_nonboot_cpus();

	rtc_resume_endtime=tegra_rtc_read_ms();		
	printk(KERN_ERR "<POWER>resume2[Enable CPU]take %llu ms\n",rtc_resume_endtime-rtc_resume_fromtime);
	rtc_resume_fromtime=rtc_resume_endtime;
	
 Platform_wake:
	if (suspend_ops->wake)
		suspend_ops->wake();
	rtc_resume_endtime=tegra_rtc_read_ms();		
	printk(KERN_ERR "<POWER>resume3[CPU suspend_ops->wake]take %llu ms\n",rtc_resume_endtime-rtc_resume_fromtime);

	ktime_resume_fromtime=ktime_get();
	dpm_resume_noirq(PMSG_RESUME);
	ktime_resume_endtime=ktime_get();
	printk(KERN_ERR "<POWER>resume4[CPU resume_noirq]take %llu ns\n",ktime_to_ns(ktime_sub(ktime_resume_endtime, ktime_resume_fromtime)));
	ktime_resume_fromtime=ktime_resume_endtime;

 Platform_finish:
	if (suspend_ops->finish)
		suspend_ops->finish();
	ktime_resume_endtime=ktime_get();
	printk(KERN_ERR "<POWER>resume5[CPU suspend_ops->finish]take %llu ns\n",ktime_to_ns(ktime_sub(ktime_resume_endtime, ktime_resume_fromtime)));
	ktime_resume_fromtime=ktime_resume_endtime;

	return error;
}
Example #18
0
static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
			  u64 buf_size, u64 win_size, u64 total)
{
	int chunks, total_chunks, i;
	int copied_chunks = 0;
	u64 copied = 0, result;
	char __iomem *tmp = dst;
	u64 perf, diff_us;
	ktime_t kstart, kstop, kdiff;
	unsigned long last_sleep = jiffies;

	chunks = div64_u64(win_size, buf_size);
	total_chunks = div64_u64(total, buf_size);
	kstart = ktime_get();

	for (i = 0; i < total_chunks; i++) {
		result = perf_copy(pctx, tmp, src, buf_size);
		copied += result;
		copied_chunks++;
		if (copied_chunks == chunks) {
			tmp = dst;
			copied_chunks = 0;
		} else
			tmp += buf_size;

		/* Probably should schedule every 5s to prevent soft hang. */
		if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
			last_sleep = jiffies;
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(1);
		}

		if (unlikely(kthread_should_stop()))
			break;
	}

	if (use_dma) {
		pr_debug("%s: All DMA descriptors submitted\n", current->comm);
		while (atomic_read(&pctx->dma_sync) != 0) {
			if (kthread_should_stop())
				break;
			msleep(20);
		}
	}

	kstop = ktime_get();
	kdiff = ktime_sub(kstop, kstart);
	diff_us = ktime_to_us(kdiff);

	pr_debug("%s: copied %llu bytes\n", current->comm, copied);

	pr_debug("%s: lasted %llu usecs\n", current->comm, diff_us);

	perf = div64_u64(copied, diff_us);

	pr_debug("%s: MBytes/s: %llu\n", current->comm, perf);

	pctx->copied = copied;
	pctx->diff_us = diff_us;

	return 0;
}
/* init a new touch */
static void new_touch(int x, int y) {
	tap_time_pre = ktime_to_ms(ktime_get());
	x_pre = x;
	y_pre = y;
	touch_nr++;
}
Example #20
0
static int aic3254_set_config(int config_tbl, int idx, int en)
{
	int rc = 0, len = 0;
	int64_t t1, t2;
#if defined(CONFIG_ARCH_MSM7X30)
	struct ecodec_aic3254_state *drv = &codec_clk;
#endif

	mutex_lock(&lock);
	spi_aic3254_prevent_sleep();

#if defined(CONFIG_ARCH_MSM7X30)
	if (drv->enabled == 0) {
		/* enable MI2S RX master block */
		/* enable MI2S RX bit clock */
		clk_enable(drv->rx_mclk);
		clk_enable(drv->rx_sclk);
		printk("%s: enable CLK\n", __func__);
		drv->enabled = 1;
	}
#endif

	switch (config_tbl) {
	case AIC3254_CONFIG_TX:
		/* TX */
		pr_aud_info("%s: enable tx\n", __func__);
#if defined(CONFIG_SPI_AIC3254_SELF_POWER_DOWN)
		if (en && idx != UPLINK_OFF) {
#else
		if (en) {
#endif
			if (ctl_ops->tx_amp_enable)
				ctl_ops->tx_amp_enable(0);

			aic3254_tx_config(idx);
			aic3254_tx_mode = idx;

			if (ctl_ops->tx_amp_enable)
				ctl_ops->tx_amp_enable(1);
		} else {
			aic3254_tx_config(UPLINK_OFF);
			aic3254_tx_mode = UPLINK_OFF;
#if defined(CONFIG_SPI_AIC3254_SELF_POWER_DOWN)
			if (ctl_ops->tx_amp_enable)
				ctl_ops->tx_amp_enable(0);
			aic3254_powerdown();
#endif
		}
		break;
	case AIC3254_CONFIG_RX:
		/* RX */
		pr_aud_info("%s: enable rx\n", __func__);
#if defined(CONFIG_SPI_AIC3254_SELF_POWER_DOWN)
		if (en && idx != DOWNLINK_OFF) {
#else
		if (en) {
#endif
			if (ctl_ops->rx_amp_enable)
				ctl_ops->rx_amp_enable(0);

			aic3254_rx_config(idx);
			aic3254_rx_mode = idx;

			if (ctl_ops->rx_amp_enable)
				ctl_ops->rx_amp_enable(1);
		} else {
			aic3254_rx_config(DOWNLINK_OFF);
			aic3254_rx_mode = DOWNLINK_OFF;
#if defined(CONFIG_SPI_AIC3254_SELF_POWER_DOWN)
			if (ctl_ops->rx_amp_enable)
				ctl_ops->rx_amp_enable(0);
			aic3254_powerdown();
#endif
		}
		break;
	case AIC3254_CONFIG_MEDIA:
		if (aic3254_minidsp == NULL) {
			rc = -EFAULT;
			break;
		}

		len = (aic3254_minidsp[idx][0].reg << 8)
			| aic3254_minidsp[idx][0].data;

		pr_aud_info("%s: configure miniDSP index(%d) len = %d ++\n",
			__func__, idx, len);
		pr_aud_info("%s: rx mode %d, tx mode %d\n",
			__func__, aic3254_rx_mode, aic3254_tx_mode);

		t1 = ktime_to_ms(ktime_get());

		if (ctl_ops->rx_amp_enable)
			ctl_ops->rx_amp_enable(0);

		/* step 1: power off first */
		if (aic3254_rx_mode != DOWNLINK_OFF)
			aic3254_rx_config(DOWNLINK_OFF);

		/* step 2: config DSP */
		aic3254_config(&aic3254_minidsp[idx][1], len);

		/* step 3: switch back to original path */
		if (aic3254_rx_mode != DOWNLINK_OFF)
			aic3254_rx_config(aic3254_rx_mode);
		if (aic3254_tx_mode != UPLINK_OFF)
			aic3254_tx_config(aic3254_tx_mode);

		t2 = ktime_to_ms(ktime_get())-t1;

		if (ctl_ops->rx_amp_enable)
			ctl_ops->rx_amp_enable(1);

		pr_aud_info("%s: configure miniDSP index(%d) time: %lldms --\n",
			__func__, idx, (t2));
		break;
	}

	spi_aic3254_allow_sleep();
	mutex_unlock(&lock);
	return rc;
}

static int aic3254_open(struct inode *inode, struct file *pfile)
{
	int ret = 0;

	mutex_lock(&lock);
	if (aic3254_opend) {
		pr_aud_err("%s: busy\n", __func__);
		ret = -EBUSY;
	} else
		aic3254_opend = 1;
	mutex_unlock(&lock);

	return ret;
}

static int aic3254_release(struct inode *inode, struct file *pfile)
{
	mutex_lock(&lock);
	aic3254_opend = 0;
	mutex_unlock(&lock);

	return 0;
}
Example #21
0
void nvhost_scale3d_set_throughput_hint(int hint)
{
	struct podgov_info_rec *podgov = local_podgov;
	struct devfreq *df;

	long idle;
	long curr, target;
	int avg_idle, avg_hint, scale_score;
	unsigned int smooth;

	if (!podgov)
		return;
	df = podgov->power_manager;
	if (!df)
		return;

	mutex_lock(&podgov->power_manager->lock);

	podgov->block--;

	if (!podgov->enable ||
		!podgov->p_use_throughput_hint ||
		podgov->block > 0) {
		mutex_unlock(&podgov->power_manager->lock);
		return;
	}

	trace_podgov_hint(podgov->idle_estimate, hint);
	podgov->last_throughput_hint = ktime_get();

	curr = podgov->power_manager->previous_freq;
	idle = podgov->idle_estimate;
	smooth = podgov->p_smooth;

	/* compute averages usings exponential-moving-average */
	avg_idle = ((smooth*podgov->idle_avg + idle)/(smooth+1));
	podgov->idle_avg = avg_idle;
	avg_hint = ((smooth*podgov->hint_avg + hint)/(smooth+1));
	podgov->hint_avg = avg_hint;

	/* set the target using avg_hint and avg_idle */
	if (avg_hint < podgov->p_hint_lo_limit) {
		target = freqlist_up(podgov, curr, 1);
	} else if (avg_hint > podgov->p_hint_hi_limit) {
		target = freqlist_down(podgov, curr, 1);
	} else {
		scale_score = avg_idle + avg_hint;
		if (scale_score > podgov->p_scaledown_limit)
			target = freqlist_down(podgov, curr, 1);
		else if (scale_score < podgov->p_scaleup_limit)
			target = freqlist_up(podgov, curr, 1);
		else
			target = curr;
	}

	/* clamp and apply target */
	scaling_limit(df, &target);
	if (target != curr) {
		podgov->block = podgov->p_smooth;
		trace_podgov_do_scale(df->previous_freq, target);
		podgov->adjustment_frequency = target;
		podgov->adjustment_type = ADJUSTMENT_LOCAL;
		update_devfreq(df);
	}

	trace_podgov_print_target(idle, avg_idle, curr / 1000000, target, hint,
		avg_hint);

	mutex_unlock(&podgov->power_manager->lock);
}
void hisifb_vsync_register(struct platform_device *pdev)
{
	struct hisi_fb_data_type *hisifd = NULL;
	struct hisifb_vsync *vsync_ctrl = NULL;
#if defined(CONFIG_HISI_FB_VSYNC_THREAD)
	char name[64] = {0};
#endif

	BUG_ON(pdev == NULL);
	hisifd = platform_get_drvdata(pdev);
	BUG_ON(hisifd == NULL);
	vsync_ctrl = &(hisifd->vsync_ctrl);
	BUG_ON(vsync_ctrl == NULL);

	if (vsync_ctrl->vsync_created)
		return;

	vsync_ctrl->hisifd = hisifd;
	vsync_ctrl->vsync_infinite = 0;
	vsync_ctrl->vsync_enabled = 0;
	vsync_ctrl->vsync_ctrl_offline_enabled = 0;
	vsync_ctrl->vsync_timestamp = ktime_get();
	init_waitqueue_head(&(vsync_ctrl->vsync_wait));
	spin_lock_init(&(vsync_ctrl->spin_lock));
	INIT_WORK(&vsync_ctrl->vsync_ctrl_work, hisifb_vsync_ctrl_workqueue_handler);

	mutex_init(&(vsync_ctrl->vsync_lock));

	atomic_set(&(vsync_ctrl->buffer_updated), 1);
#ifdef CONFIG_REPORT_VSYNC
	vsync_ctrl->vsync_report_fnc = mali_kbase_pm_report_vsync;
#else
	vsync_ctrl->vsync_report_fnc = NULL;
#endif

#ifdef CONFIG_FAKE_VSYNC_USED
	/* hrtimer for fake vsync timing */
	hisifd->fake_vsync_used = false;
	hrtimer_init(&hisifd->fake_vsync_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hisifd->fake_vsync_hrtimer.function = hisifb_fake_vsync;
	hrtimer_start(&hisifd->fake_vsync_hrtimer,
		ktime_set(0, NSEC_PER_SEC / 60), HRTIMER_MODE_REL);
#endif

#if defined(CONFIG_HISI_FB_VSYNC_THREAD)
	snprintf(name, sizeof(name), "hisifb%d_vsync", hisifd->index);
	vsync_ctrl->vsync_thread = kthread_run(wait_for_vsync_thread, hisifd, name);
	if (IS_ERR(vsync_ctrl->vsync_thread)) {
		vsync_ctrl->vsync_thread = NULL;
		HISI_FB_ERR("failed to run vsync thread!\n");
		return;
	}
#else
	if (hisifd->sysfs_attrs_append_fnc) {
		hisifd->sysfs_attrs_append_fnc(hisifd, &dev_attr_vsync_event.attr);
		hisifd->sysfs_attrs_append_fnc(hisifd, &dev_attr_vsync_timestamp.attr);
	}
#endif

	vsync_ctrl->vsync_created = 1;
}
Example #23
0
	flow_ctrl = IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + FLOW_CTRL_HALT_CPUx_EVENTS(dev->cpu);
	reg = FLOW_CTRL_WAITEVENT | FLOW_CTRL_JTAG_RESUME;
	/* 20100907  race condition fix [END] */

	smp_rmb();

	enter = ktime_get();
	if (!need_resched()) {
		__raw_writel(reg, flow_ctrl);
		reg = __raw_readl(flow_ctrl);
		dsb();
		__asm__ volatile ("wfi");
		__raw_writel(0, flow_ctrl);
		reg = __raw_readl(flow_ctrl);
	}
	exit = ktime_get();
	enter = ktime_sub(exit, enter);
	us = ktime_to_us(enter);
	local_irq_enable();
	return (int)us;
}

extern bool tegra_nvrm_lp2_allowed(void);
extern unsigned int tegra_suspend_lp2(unsigned int);

static int tegra_idle_enter_lp2(struct cpuidle_device *dev,
	struct cpuidle_state *state)
{
	ktime_t enter;
	s64 request, us, latency, idle_us;
	struct tick_sched *ts = tick_get_tick_sched(dev->cpu);
Example #24
0
int wfd_stats_update(struct wfd_stats *stats, enum wfd_stats_event event)
{
	int rc = 0;
	switch (event) {
	case WFD_STAT_EVENT_CLIENT_QUEUE:
		stats->v4l2_buf_count++;
		break;
	case WFD_STAT_EVENT_CLIENT_DEQUEUE: {
		struct wfd_stats_encode_sample *sample = NULL;

		stats->v4l2_buf_count--;

		if (!list_empty(&stats->enc_queue))
			sample = list_first_entry(&stats->enc_queue,
					struct wfd_stats_encode_sample,
					list);
		if (sample) {
			ktime_t kdiff = ktime_sub(ktime_get(),
						sample->encode_start_ts);
			uint32_t diff = ktime_to_ms(kdiff);

			stats->enc_cumulative_latency += diff;
			stats->enc_latency_samples++;
			stats->enc_avg_latency = stats->enc_cumulative_latency /
				stats->enc_latency_samples;

			list_del(&sample->list);
			kfree(sample);
			sample = NULL;
		}
		break;
	}
	case WFD_STAT_EVENT_MDP_QUEUE:
		stats->mdp_buf_count++;
		break;
	case WFD_STAT_EVENT_MDP_DEQUEUE:
		stats->mdp_buf_count--;
		stats->mdp_updates++;
		break;
	case WFD_STAT_EVENT_ENC_QUEUE: {
		struct wfd_stats_encode_sample *sample = NULL;

		stats->enc_buf_count++;
		stats->frames_encoded++;

		sample = kzalloc(sizeof(*sample), GFP_KERNEL);
		if (sample) {
			INIT_LIST_HEAD(&sample->list);
			sample->encode_start_ts = ktime_get();
			list_add_tail(&sample->list, &stats->enc_queue);
		} else {
			WFD_MSG_WARN("Unable to measure latency\n");
		}
		break;
	}
	case WFD_STAT_EVENT_ENC_DEQUEUE:
		stats->enc_buf_count--;
		break;
	case WFD_STAT_EVENT_VSG_QUEUE:
		stats->vsg_buf_count++;
		break;
	case WFD_STAT_EVENT_VSG_DEQUEUE:
		stats->vsg_buf_count--;
		break;
	default:
		rc = -ENOTSUPP;
	}

	return rc;
}
Example #25
0
static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
	unsigned long rcu_delta_jiffies;
	ktime_t last_update, expires, now;
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
	u64 time_delta;
	int cpu;

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	now = tick_nohz_start_idle(cpu, ts);

	/*
	 * If this cpu is offline and it is the one which updates
	 * jiffies, then give up the assignment and let it be taken by
	 * the cpu which runs the tick timer next. If we don't drop
	 * this here the jiffies might be stale and do_timer() never
	 * invoked.
	 */
	if (unlikely(!cpu_online(cpu))) {
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
	}

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		return;

	if (need_resched())
		return;

	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
		static int ratelimit;

		if (ratelimit < 10) {
			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
			       (unsigned int) local_softirq_pending());
			ratelimit++;
		}
		return;
	}

	ts->idle_calls++;
	/* Read jiffies and the time when jiffies were updated last */
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;
		time_delta = timekeeping_max_deferment();
	} while (read_seqretry(&xtime_lock, seq));

	if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
	    arch_needs_cpu(cpu)) {
		next_jiffies = last_jiffies + 1;
		delta_jiffies = 1;
	} else {
		/* Get the next timer wheel timer */
		next_jiffies = get_next_timer_interrupt(last_jiffies);
		delta_jiffies = next_jiffies - last_jiffies;
		if (rcu_delta_jiffies < delta_jiffies) {
			next_jiffies = last_jiffies + rcu_delta_jiffies;
			delta_jiffies = rcu_delta_jiffies;
		}
	}
	/*
	 * Do not stop the tick, if we are only one off
	 * or if the cpu is required for rcu
	 */
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	/* Schedule the tick, if we are at least one jiffie off */
	if ((long)delta_jiffies >= 1) {

		/*
		 * If this cpu is the one which updates jiffies, then
		 * give up the assignment and let it be taken by the
		 * cpu which runs the tick timer next, which might be
		 * this cpu as well. If we don't drop this here the
		 * jiffies might be stale and do_timer() never
		 * invoked. Keep track of the fact that it was the one
		 * which had the do_timer() duty last. If this cpu is
		 * the one which had the do_timer() duty last, we
		 * limit the sleep time to the timekeeping
		 * max_deferement value which we retrieved
		 * above. Otherwise we can sleep as long as we want.
		 */
		if (cpu == tick_do_timer_cpu) {
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
			ts->do_timer_last = 1;
		} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
			time_delta = KTIME_MAX;
			ts->do_timer_last = 0;
		} else if (!ts->do_timer_last) {
			time_delta = KTIME_MAX;
		}

		/*
		 * calculate the expiry time for the next timer wheel
		 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
		 * that there is no timer pending or at least extremely
		 * far into the future (12 days for HZ=1000). In this
		 * case we set the expiry to the end of time.
		 */
		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
			/*
			 * Calculate the time delta for the next timer event.
			 * If the time delta exceeds the maximum time delta
			 * permitted by the current clocksource then adjust
			 * the time delta accordingly to ensure the
			 * clocksource does not wrap.
			 */
			time_delta = min_t(u64, time_delta,
					   tick_period.tv64 * delta_jiffies);
		}

		if (time_delta < KTIME_MAX)
			expires = ktime_add_ns(last_update, time_delta);
		else
			expires.tv64 = KTIME_MAX;

		/* Skip reprogram of event if its not changed */
		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
			goto out;

		/*
		 * nohz_stop_sched_tick can be called several times before
		 * the nohz_restart_sched_tick is called. This happens when
		 * interrupts arrive which do not cause a reschedule. In the
		 * first call we save the current tick time, so we can restart
		 * the scheduler tick in nohz_restart_sched_tick.
		 */
		if (!ts->tick_stopped) {
			select_nohz_load_balancer(1);

			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
		}

		ts->idle_sleeps++;

		/* Mark expires */
		ts->idle_expires = expires;

		/*
		 * If the expiration time == KTIME_MAX, then
		 * in this case we simply stop the tick timer.
		 */
		 if (unlikely(expires.tv64 == KTIME_MAX)) {
			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
				hrtimer_cancel(&ts->sched_timer);
			goto out;
		}

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS_PINNED);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if (!tick_program_event(expires, 0))
				goto out;
		/*
		 * We are past the event already. So we crossed a
		 * jiffie boundary. Update jiffies and raise the
		 * softirq.
		 */
		tick_do_update_jiffies64(ktime_get());
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
}
Example #26
0
/* note: already called with rcu_read_lock (preempt_disabled) */
int br_handle_frame_finish(struct sk_buff *skb)
{
	const unsigned char *dest = eth_hdr(skb)->h_dest;
	struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
	struct net_bridge *br;
	struct net_bridge_fdb_entry *dst;
	struct sk_buff *skb2;
#if defined(CONFIG_MIPS_BRCM)
	struct iphdr *pip = NULL;
	__u8 igmpTypeOffset = 0;
#endif

	if (!p || p->state == BR_STATE_DISABLED)
		goto drop;

#if defined(CONFIG_MIPS_BRCM)
	if ( vlan_eth_hdr(skb)->h_vlan_proto == ETH_P_IP )
	{
		pip = ip_hdr(skb);
		igmpTypeOffset = (pip->ihl << 2);
	}
	else if ( vlan_eth_hdr(skb)->h_vlan_proto == ETH_P_8021Q )
	{
		if ( vlan_eth_hdr(skb)->h_vlan_encapsulated_proto == ETH_P_IP )
		{
			pip = (struct iphdr *)(skb_network_header(skb) + sizeof(struct vlan_hdr));
			igmpTypeOffset = (pip->ihl << 2) + sizeof(struct vlan_hdr);
		}
	}

	if ((pip) && (pip->protocol == IPPROTO_IGMP))
	{
#if defined(CONFIG_BCM_GPON_MODULE)
		struct igmphdr *ih = (struct igmphdr *)&skb->data[igmpTypeOffset];

		/* drop IGMP v1 report packets */
		if (ih->type == IGMP_HOST_MEMBERSHIP_REPORT)
		{
			goto drop;
		}

		/* drop IGMP v1 query packets */
		if ((ih->type == IGMP_HOST_MEMBERSHIP_QUERY) &&
		    (ih->code == 0))
		{
			goto drop;
		}

		/* drop IGMP leave packets for group 0.0.0.0 */
		if ((ih->type == IGMP_HOST_LEAVE_MESSAGE) &&
          (0 == ih->group) )
		{
			goto drop;
		}
#endif
		/* rate limit IGMP */
		br = p->br;
		if ( br->igmp_rate_limit )
		{
			ktime_t      curTime;
			u64          diffUs;
			unsigned int usPerPacket;
			unsigned int temp32;
			unsigned int burstLimit;

			/* add tokens to the bucket - compute in microseconds */
			curTime     = ktime_get();
			usPerPacket = (1000000 / br->igmp_rate_limit);
			diffUs      = ktime_to_us(ktime_sub(curTime, br->igmp_rate_last_packet));
			diffUs     += br->igmp_rate_rem_time;

			/* allow 25% burst */
			burstLimit = br->igmp_rate_limit >> 2;
			if ( 0 == burstLimit)
			{
				burstLimit = 1;
			}

			if ( diffUs > 1000000 )
			{
				br->igmp_rate_bucket   = burstLimit;
				br->igmp_rate_rem_time = 0;
			}
			else
			{
				temp32 = (unsigned int)diffUs / usPerPacket;
				br->igmp_rate_bucket += temp32;
				if (temp32)
				{
					br->igmp_rate_rem_time = diffUs - (temp32 * usPerPacket);
				}
			}
			if (br->igmp_rate_bucket > burstLimit)
			{
				br->igmp_rate_bucket   = burstLimit;
				br->igmp_rate_rem_time = 0;
			}

			/* if bucket is empty drop the packet */
			if (0 == br->igmp_rate_bucket)
			{
				goto drop;
			}
			br->igmp_rate_bucket--;
			br->igmp_rate_last_packet.tv64 = curTime.tv64;
		}
	}
Example #27
0
/* vsync_isr_handler: Called from isr context*/
static void vsync_isr_handler(void)
{
  vsync_cntrl.vsync_time = ktime_get();
}
/*------------------------------------------------------------------------------
Function name   : hs_buttonisr
Description     : interrupt handler

Return type     : irqreturn_t
------------------------------------------------------------------------------*/
irqreturn_t hs_buttonisr(int irq, void *dev_id)
{
	ktime_t r, temp;
	unsigned int val = 0;
#ifdef REG_DEBUG
	unsigned long val_anacr2, val_cmc, val_auxen;
#endif

	printk ("%s: HS_BUTTONISR (<<<) : status=%d \n", __func__, mic.hsbst);

	if (mic.hsbst == DISABLE || mic.headset_state != HEADSET_4_POLE)
		return IRQ_HANDLED;

	/* Read the ANACR12 register value to check if the interrupt being
	* serviced by the ISR is spurious */
	val = readl(io_p2v(REG_ANACR12));
	temp = ktime_get();
	r = ktime_sub(temp,mic.hsbtime);
	if((r.tv.sec > 0) || (r.tv.nsec > REF_TIME))
	{
		mic.hsbtime = temp;
	}
	else
	{
		printk ("%s: HS_BUTTONISR appeared frequently (r.tv.sec=%d, r.tv.nsec=%d) status=%d \n", __func__, r.tv.sec, r.tv.nsec, mic.hsbst);
		// return IRQ_HANDLED;
	}
	/* If the value read from the ANACR12 register is greater than the
	* threshold, schedule the workqueue */
	printk("%s: REG_ANACR12=%x\n", __func__,val);

#ifdef REG_DEBUG
	val_anacr2 = readl(io_p2v(REG_ANACR2));
	val_cmc = readl(io_p2v(REG_AUXMIC_CMC));
	val_auxen = readl(io_p2v(REG_AUXMIC_AUXEN));
	printk("%s: REG_ANACR2=%x, REG_AUXMIC_CMC=%x, REG_AUXMIC_AUXEN=%x\n", __func__, val_anacr2, val_cmc, val_auxen);
#endif

	if (val >= KEY_PRESS_THRESHOLD)
	{
		board_sysconfig(SYSCFG_AUXMIC, SYSCFG_ENABLE);

		key_resolved = 0;
		key_count[0] = key_count[1] = key_count[2] = 0;

		sync_use_mic = ENABLE;
		schedule_delayed_work(&(mic.input_work), KEY_BEFORE_PRESS_REF_TIME);
		printk("%s: set_button => PRESS\n", __func__);
		set_button(PRESS); 
	}
	else
	{
		pr_info("Headset Button press detected for a illegal interrupt\n");
		printk("%s: set_button => RELEASE\n", __func__);
		set_button(RELEASE);
	}

	printk ("%s: HS_BUTTONISR (>>>) : status=%d \n", __func__, mic.hsbst);

	return IRQ_HANDLED;
}
Example #29
0
static int mmc_queue_thread(void *d)
{
    struct mmc_queue *mq = d;
    struct request_queue *q = mq->queue;
    struct request *req;

#ifdef CONFIG_MMC_PERF_PROFILING
    ktime_t start, diff;
    struct mmc_host *host = mq->card->host;
    unsigned long bytes_xfer;
#endif


    current->flags |= PF_MEMALLOC;

    down(&mq->thread_sem);
    do {
        req = NULL;	/* Must be set to NULL at each iteration */

        spin_lock_irq(q->queue_lock);
        set_current_state(TASK_INTERRUPTIBLE);
        req = blk_fetch_request(q);
        mq->req = req;
        spin_unlock_irq(q->queue_lock);

        if (!req) {
            if (kthread_should_stop()) {
                set_current_state(TASK_RUNNING);
                break;
            }
            up(&mq->thread_sem);
            schedule();
            down(&mq->thread_sem);
            continue;
        }
        set_current_state(TASK_RUNNING);

#ifdef CONFIG_MMC_PERF_PROFILING
        bytes_xfer = blk_rq_bytes(req);
        if (rq_data_dir(req) == READ) {
            start = ktime_get();
            mq->issue_fn(mq, req);
            diff = ktime_sub(ktime_get(), start);
            host->perf.rbytes_mmcq += bytes_xfer;
            host->perf.rtime_mmcq =
                ktime_add(host->perf.rtime_mmcq, diff);
        } else {
            start = ktime_get();
            mq->issue_fn(mq, req);
            diff = ktime_sub(ktime_get(), start);
            host->perf.wbytes_mmcq += bytes_xfer;
            host->perf.wtime_mmcq =
                ktime_add(host->perf.wtime_mmcq, diff);
        }
#else
        mq->issue_fn(mq, req);
#endif
    } while (1);
    up(&mq->thread_sem);

    return 0;
}
Example #30
0
File: osl.c Project: Aydan/linux
/*
 * Support ACPI 3.0 AML Timer operand
 * Returns 64-bit free-running, monotonically increasing timer
 * with 100ns granularity
 */
u64 acpi_os_get_timer(void)
{
	u64 time_ns = ktime_to_ns(ktime_get());
	do_div(time_ns, 100);
	return time_ns;
}