Ejemplo n.º 1
0
/**
 * hrtimer_forward - forward the timer expiry
 * @timer:	hrtimer to forward
 * @now:	forward past this time
 * @interval:	the interval to forward
 *
 * Forward the timer expiry so it will expire in the future.
 * Returns the number of overruns.
 */
u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
{
	u64 orun = 1;
	ktime_t delta;

	delta = ktime_sub(now, hrtimer_get_expires(timer));

	if (delta.tv64 < 0)
		return 0;

	if (interval.tv64 < timer->base->resolution.tv64)
		interval.tv64 = timer->base->resolution.tv64;

	if (unlikely(delta.tv64 >= interval.tv64)) {
		s64 incr = ktime_to_ns(interval);

		orun = ktime_divns(delta, incr);
		hrtimer_add_expires_ns(timer, incr * orun);
		if (hrtimer_get_expires_tv64(timer) > now.tv64)
			return orun;
		/*
		 * This (and the ktime_add() below) is the
		 * correction for exact:
		 */
		orun++;
	}
	hrtimer_add_expires(timer, interval);

	return orun;
}
Ejemplo n.º 2
0
static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
{
	int restart_timer = 0;
	wait_queue_head_t *q = &vcpu->wq;

	/*
	 * There is a race window between reading and incrementing, but we do
	 * not care about potentially loosing timer events in the !reinject
	 * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
	 * in vcpu_enter_guest.
	 */
	if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
		atomic_inc(&ktimer->pending);
		/* FIXME: this code should not know anything about vcpus */
		kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
	}

	if (waitqueue_active(q))
		wake_up_interruptible(q);

	if (ktimer->t_ops->is_periodic(ktimer)) {
		hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
		restart_timer = 1;
	}

	return restart_timer;
}
Ejemplo n.º 3
0
/**
 * hrtimer_forward - forward the timer expiry
 * @timer:	hrtimer to forward
 * @now:	forward past this time
 * @interval:	the interval to forward
 *
 * Forward the timer expiry so it will expire in the future.
 * Returns the number of overruns.
 *
 * Can be safely called from the callback function of @timer. If
 * called from other contexts @timer must neither be enqueued nor
 * running the callback and the caller needs to take care of
 * serialization.
 *
 * Note: This only updates the timer expiry value and does not requeue
 * the timer.
 */
u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
{
	u64 orun = 1;
	ktime_t delta;

	delta = ktime_sub(now, hrtimer_get_expires(timer));

	if (delta < 0)
		return 0;

	if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
		return 0;

	if (interval < hrtimer_resolution)
		interval = hrtimer_resolution;

	if (unlikely(delta >= interval)) {
		s64 incr = ktime_to_ns(interval);

		orun = ktime_divns(delta, incr);
		hrtimer_add_expires_ns(timer, incr * orun);
		if (hrtimer_get_expires_tv64(timer) > now)
			return orun;
		/*
		 * This (and the ktime_add() below) is the
		 * correction for exact:
		 */
		orun++;
	}
	hrtimer_add_expires(timer, interval);

	return orun;
}
Ejemplo n.º 4
0
static enum hrtimer_restart dm_pcm_irq(struct hrtimer *handle)
{
	int fifo, diff, per_size, buf_size;
	static int last_ptr;

	if (substream_loc->runtime && substream_loc->runtime->status &&
	    snd_pcm_running(substream_loc)) {
		fifo = ops->get_fifo_status();
		if (fifo >= (hw_fifo_size - 1))
			ops->enable();

		buf_size = substream_loc->runtime->buffer_size;
		per_size = substream_loc->runtime->period_size;
		for (; fifo < hw_fifo_size; fifo++) {
			ops->write(local_buffer[pointer_sub++]);
			pointer_sub %= buf_size;
			if (ops->wait_fifo_ready)
				ops->wait_fifo_ready();
		}
		if (last_ptr >= pointer_sub)
			diff = buf_size + pointer_sub - last_ptr;
		else
			diff = pointer_sub - last_ptr;
		if (diff >= per_size) {
			snd_pcm_period_elapsed(substream_loc);
			last_ptr += per_size;
			if (last_ptr >= buf_size)
				last_ptr -= buf_size;
		}
	} else
		last_ptr = 0;
	hrtimer_add_expires_ns(&hrtimer, ns_for_interrupt);
	return HRTIMER_RESTART;
}
Ejemplo n.º 5
0
static enum hrtimer_restart lp5560_Callback(struct hrtimer *timer)
{
	int delay,count,polarity;
	int i=array_num;

	if((i>=total_array) || (i>timeout))
		return HRTIMER_NORESTART;
	count = curr_data[i].count;
	delay = curr_data[i].delay;
	polarity = curr_data[i].polarity;
//	printk("[JX] %s count=%d i=%d total_array=%d current_count=%d\n",__func__,count,i,total_array,current_count);
	if(current_count>=count)
	{
		current_count=0;
		if(i>=total_array)
			return HRTIMER_NORESTART;
		array_num++;
		hrtimer_add_expires_ns(&g_timeOutTimer, delay*100000); // 100us
		return HRTIMER_RESTART;
	}
	else
	{
		if(polarity==LOW_KEEP) {
			lp5560_gpio_set(0);
		}
		else if(polarity==HIGH_KEEP){
			lp5560_gpio_set(1);
		}
		else{
			if(IsGPIOHigh)
			{
				lp5560_gpio_set(0);
			}
			else
			{
				lp5560_gpio_set(1);
			}
		}
		current_count++;
		hrtimer_add_expires_ns(&g_timeOutTimer, delay*100000); // 100us 
		return HRTIMER_RESTART;
	}

}
Ejemplo n.º 6
0
enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
{
	struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
	struct kvm_vcpu *vcpu = ktimer->vcpu;
	wait_queue_head_t *q = &vcpu->wq;

	if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
		atomic_inc(&ktimer->pending);
		
		kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
	}

	if (waitqueue_active(q))
		wake_up_interruptible(q);

	if (ktimer->t_ops->is_periodic(ktimer)) {
		hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
		return HRTIMER_RESTART;
	} else
		return HRTIMER_NORESTART;
}