Ejemplo n.º 1
0
/**
 * schedule_hrtimeout_range - sleep until timeout
 * @expires:	timeout value (ktime_t)
 * @delta:	slack in expires timeout (ktime_t)
 * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
 *
 * Make the current task sleep until the given expiry time has
 * elapsed. The routine will return immediately unless
 * the current task state has been set (see set_current_state()).
 *
 * The @delta argument gives the kernel the freedom to schedule the
 * actual wakeup to a time that is both power and performance friendly.
 * The kernel give the normal best effort behavior for "@expires+@delta",
 * but may decide to fire the timer earlier, but no earlier than @expires.
 *
 * You can set the task state as follows -
 *
 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
 * pass before the routine returns.
 *
 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
 * delivered to the current task.
 *
 * The current task state is guaranteed to be TASK_RUNNING when this
 * routine returns.
 *
 * Returns 0 when the timer has expired otherwise -EINTR
 */
int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
                                     const enum hrtimer_mode mode)
{
    struct hrtimer_sleeper t;

    /*
     * Optimize when a zero timeout value is given. It does not
     * matter whether this is an absolute or a relative time.
     */
    if (expires && !expires->tv64) {
        __set_current_state(TASK_RUNNING);
        return 0;
    }

    /*
     * A NULL parameter means "inifinte"
     */
    if (!expires) {
        schedule();
        __set_current_state(TASK_RUNNING);
        return -EINTR;
    }

    hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
    hrtimer_set_expires_range_ns(&t.timer, *expires, delta);

    hrtimer_init_sleeper(&t, current);

    hrtimer_start_expires(&t.timer, mode);
    if (!hrtimer_active(&t.timer))
        t.task = NULL;

    if (likely(t.task))
        schedule();

    hrtimer_cancel(&t.timer);
    destroy_hrtimer_on_stack(&t.timer);

    __set_current_state(TASK_RUNNING);

    return !t.task ? 0 : -EINTR;
}
Ejemplo n.º 2
0
/*
 * The pm8058_nc_ir detects insert / remove of the headset (for NO and NC),
 * as well as button press / release (for NC type).
 * The current state of the headset is maintained in othc_ir_state variable.
 * Due to a hardware bug, false switch interrupts are seen during headset
 * insert. This is handled in the software by rejecting the switch interrupts
 * for a small period of time after the headset has been inserted.
 */
static irqreturn_t pm8058_nc_ir(int irq, void *dev_id)
{
	unsigned long flags;
	struct pm8058_othc *dd = dev_id;
	struct othc_hsed_config *hsed_config = dd->othc_pdata->hsed_config;

	spin_lock_irqsave(&dd->lock, flags);
	/* Enable the switch reject flag */
	dd->switch_reject = true;
	spin_unlock_irqrestore(&dd->lock, flags);

	/* Start the HR timer if one is not active */
	if (hrtimer_active(&dd->timer))
		hrtimer_cancel(&dd->timer);

	hrtimer_start(&dd->timer,
		ktime_set((dd->switch_debounce_ms / 1000),
		(dd->switch_debounce_ms % 1000) * 1000000), HRTIMER_MODE_REL);

	if (hsed_config->othc_headset == OTHC_HEADSET_NC)
		othc_process_nc(dd);
	else {
		/* disable irq, this gets enabled in the workqueue */
		disable_irq_nosync(dd->othc_irq_ir);
		/* Processing for NO type headset */
		if (dd->othc_ir_state == false) {
			/*  headset jack inserted */
			dd->othc_ir_state = true;
			pm8058_headset_switch(dd->othc_ipd,
					SW_HEADPHONE_INSERT, 1);
		} else {
			/* headset jack removed */
			dd->othc_ir_state = false;
			pm8058_headset_switch(dd->othc_ipd,
					SW_HEADPHONE_INSERT, 0);
		}
	}
	input_sync(dd->othc_ipd);

	return IRQ_HANDLED;
}
Ejemplo n.º 3
0
/*
 * Get the time remaining on a POSIX.1b interval timer.  This function
 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
 * mess with irq.
 *
 * We have a couple of messes to clean up here.  First there is the case
 * of a timer that has a requeue pending.  These timers should appear to
 * be in the timer list with an expiry as if we were to requeue them
 * now.
 *
 * The second issue is the SIGEV_NONE timer which may be active but is
 * not really ever put in the timer list (to save system resources).
 * This timer may be expired, and if so, we will do it here.  Otherwise
 * it is the same as a requeue pending timer WRT to what we should
 * report.
 */
static void
common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
{
	ktime_t now, remaining, iv;
	struct hrtimer *timer = &timr->it.real.timer;

	memset(cur_setting, 0, sizeof(struct itimerspec));

	iv = timr->it.real.interval;

	/* interval timer ? */
	if (iv.tv64)
		cur_setting->it_interval = ktime_to_timespec(iv);
	else if (!hrtimer_active(timer) &&
		 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
		return;

	now = timer->base->get_time();

	/*
	 * When a requeue is pending or this is a SIGEV_NONE
	 * timer move the expiry time forward by intervals, so
	 * expiry is > now.
	 */
	if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
	    (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
		timr->it_overrun += (unsigned int) hrtimer_forward(timer, now,
								   iv);

	remaining = ktime_sub(hrtimer_get_expires(timer), now);
	/* Return 0 only, when the timer is expired and not pending */
	if (remaining.tv64 <= 0) {
		/*
		 * A single shot SIGEV_NONE timer must return 0, when
		 * it is expired !
		 */
		if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
			cur_setting->it_value.tv_nsec = 1;
	} else
		cur_setting->it_value = ktime_to_timespec(remaining);
}
static void k3_vibrator_enable(struct timed_output_dev *dev, int value)
{
	struct k3_vibrator_data *pdata = container_of(dev, struct k3_vibrator_data, dev);
	/* DTS2012050403313 begin: modify by KF74453 for resolved vibrator freq call adc function at 2010-06-13*/
#ifdef CONFIG_ANDROID_K3_VIBRATOR_AUTO_CONTROL
	static int set_count;
#endif
	/* DTS2012050403313 end: modify by KF74453 for resolved vibrator freq call adc function at 2010-06-13*/
	printk("k3_vibrator_enable,value=%d\n",value);
	if (value < 0) {
		pr_err("error:vibrator_enable value:%d is negative\n", value);
		return;
	}
	/* cancel previous timer */
	if (hrtimer_active(&pdata->timer))
        	hrtimer_cancel(&pdata->timer);

	if (value > 0) {
	/* DTS2012050403313 begin: modify by KF74453 for resolved vibrator freq call adc function at 2010-06-13*/
#ifdef CONFIG_ANDROID_K3_VIBRATOR_AUTO_CONTROL
		if (time_after(jiffies, g_pre_set_time+60*HZ)) {
			g_pre_set_time = jiffies;
			set_count = 0;
		}
		if (set_count == 0)
			pdata->battery_power = k3_vibrator_get_iset_value(0);

		set_count = (set_count+1)%50;
#endif
	/* DTS2012050403313 end: modify by KF74453 for resolved vibrator freq call adc function at 2010-06-13*/

		if (value < TIMEOUT_MIN)
			value = TIMEOUT_MIN;
		k3_vibrator_onoff(1);
		hrtimer_start(&pdata->timer,
			ktime_set(value / 1000, (value % 1000) * 1000000),
			HRTIMER_MODE_REL);
	} else {
		k3_vibrator_onoff(0);
	}
}
static int k3_vibrator_remove(struct platform_device *pdev)
{
	struct k3_vibrator_data *pdata = platform_get_drvdata(pdev);

	if (pdata == NULL) {
		dev_err(&pdev->dev, "%s:pdata is NULL\n", __func__);
		return -ENODEV;
	}

	if (hrtimer_active(&pdata->timer))
		hrtimer_cancel(&pdata->timer);

	timed_output_dev_unregister(&pdata->dev);

	iounmap(pdata->k3_vibrator_base);
	kfree(pdata);
	pdata = NULL;
	destroy_workqueue(done_queue);
	platform_set_drvdata(pdev, NULL);
	return 0;
}
Ejemplo n.º 6
0
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
{
	hrtimer_init_sleeper(t, current);

	do {
		set_current_state(TASK_INTERRUPTIBLE);
		hrtimer_start_expires(&t->timer, mode);
		if (!hrtimer_active(&t->timer))
			t->task = NULL;

		if (likely(t->task))
			schedule();

		hrtimer_cancel(&t->timer);
		mode = HRTIMER_MODE_ABS;

	} while (t->task && !signal_pending(current));

	__set_current_state(TASK_RUNNING);

	return t->task == NULL;
}
Ejemplo n.º 7
0
enum hrtimer_restart hrtimer_T_callback(struct hrtimer *timer) {
  struct task_struct *task;

  write_lock(&tasklist_lock);
  task = container_of(timer, struct task_struct, T_timer);

  // printk("[hrtimer_T_callback] PID = %d C = %lld ms\n",
  //        timer->start_pid,
  //        ktime_to_ms(task->real_C_time));

  timer_callback_hook(task);

  // cancel C timer
  if (hrtimer_active(&(task->C_timer))) {
    hrtimer_cancel(&(task->C_timer));
  }

  // reset C time
  task->real_C_time = ktime_set(0, 0);

  // if the current task running is the task in question, reset its C timer
  if (task == current) {
    hrtimer_init(&(current->C_timer), CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
    current->C_timer.function = hrtimer_C_callback;
    hrtimer_start(&(current->C_timer), ktime_sub(current->C_time, current->real_C_time),
                  HRTIMER_MODE_REL_PINNED);
  }

  // make runnable any task suspended by enforcement
  if (task->put_to_sleep) {
    // printk("[hrtimer_T_callback] wake_up pid: %d\n", task->pid);
    task->put_to_sleep = 0;
    wake_up_process(task);
  }

  hrtimer_forward_now(&(task->T_timer), task->T_time);
  write_unlock(&tasklist_lock);
  return HRTIMER_RESTART;
}
Ejemplo n.º 8
0
static int get_time_for_vibetonz(struct timed_output_dev *dev)
{
	int remaining;

	if (hrtimer_active(&timer))
	{
		ktime_t r = hrtimer_get_remaining(&timer);
		remaining = r.tv.sec * 1000 + r.tv.nsec / 1000000;
	}
	else
	{
		remaining = 0;
	}

	if (vibrator_value ==-1)
	{
		remaining = -1;
	}

	return remaining;

}
Ejemplo n.º 9
0
/*
 * Get the time remaining on a POSIX.1b interval timer.  This function
 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
 * mess with irq.
 *
 * We have a couple of messes to clean up here.  First there is the case
 * of a timer that has a requeue pending.  These timers should appear to
 * be in the timer list with an expiry as if we were to requeue them
 * now.
 *
 * The second issue is the SIGEV_NONE timer which may be active but is
 * not really ever put in the timer list (to save system resources).
 * This timer may be expired, and if so, we will do it here.  Otherwise
 * it is the same as a requeue pending timer WRT to what we should
 * report.
 */
static void
common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
{
	ktime_t remaining;
	struct hrtimer *timer = &timr->it.real.timer;

	memset(cur_setting, 0, sizeof(struct itimerspec));
	remaining = hrtimer_get_remaining(timer);

	/* Time left ? or timer pending */
	if (remaining.tv64 > 0 || hrtimer_active(timer))
		goto calci;
	/* interval timer ? */
	if (timr->it.real.interval.tv64 == 0)
		return;
	/*
	 * When a requeue is pending or this is a SIGEV_NONE timer
	 * move the expiry time forward by intervals, so expiry is >
	 * now.
	 */
	if (timr->it_requeue_pending & REQUEUE_PENDING ||
	    (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
		timr->it_overrun +=
			hrtimer_forward(timer, timr->it.real.interval);
		remaining = hrtimer_get_remaining(timer);
	}
 calci:
	/* interval timer ? */
	if (timr->it.real.interval.tv64 != 0)
		cur_setting->it_interval =
			ktime_to_timespec(timr->it.real.interval);
	/* Return 0 only, when the timer is expired and not pending */
	if (remaining.tv64 <= 0)
		cur_setting->it_value.tv_nsec = 1;
	else
		cur_setting->it_value = ktime_to_timespec(remaining);
}
/*
 * The pm8058_nc_ir detects insert / remove of the headset (for NO),
 * The current state of the headset is maintained in othc_ir_state variable.
 * Due to a hardware bug, false switch interrupts are seen during headset
 * insert. This is handled in the software by rejecting the switch interrupts
 * for a small period of time after the headset has been inserted.
 */
static irqreturn_t pm8058_nc_ir(int irq, void *dev_id)
{
#if 0
	unsigned long flags, rc;
	struct pm8058_othc *dd = dev_id;

	spin_lock_irqsave(&dd->lock, flags);
	/* Enable the switch reject flag */
	dd->switch_reject = true;
	spin_unlock_irqrestore(&dd->lock, flags);

	/* Start the HR timer if one is not active */
	if (hrtimer_active(&dd->timer))
		hrtimer_cancel(&dd->timer);

	hrtimer_start(&dd->timer,
		ktime_set((dd->switch_debounce_ms / 1000),
		(dd->switch_debounce_ms % 1000) * 1000000), HRTIMER_MODE_REL);

	/* disable irq, this gets enabled in the workqueue */
	disable_irq_nosync(dd->othc_irq_ir);

	/* Check the MIC_BIAS status, to check if inserted or removed */
	rc = pm8058_irq_get_rt_status(dd->pm_chip, dd->othc_irq_ir);
	if (rc < 0) {
		pr_err("Unable to read IR status\n");
		goto fail_ir;
	}

	dd->othc_ir_state = rc;
	schedule_delayed_work(&dd->detect_work,
				msecs_to_jiffies(dd->detection_delay_ms));

fail_ir:
 #endif   
	return IRQ_HANDLED;
}
Ejemplo n.º 11
0
int __sched
schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
                               const enum hrtimer_mode mode, int clock)
{
    struct hrtimer_sleeper t;

    if (expires && !expires->tv64) {
        __set_current_state(TASK_RUNNING);
        return 0;
    }

    if (!expires) {
        schedule();
        __set_current_state(TASK_RUNNING);
        return -EINTR;
    }

    hrtimer_init_on_stack(&t.timer, clock, mode);
    hrtimer_set_expires_range_ns(&t.timer, *expires, delta);

    hrtimer_init_sleeper(&t, current);

    hrtimer_start_expires(&t.timer, mode);
    if (!hrtimer_active(&t.timer))
        t.task = NULL;

    if (likely(t.task))
        schedule();

    hrtimer_cancel(&t.timer);
    destroy_hrtimer_on_stack(&t.timer);

    __set_current_state(TASK_RUNNING);

    return !t.task ? 0 : -EINTR;
}
void cdc_ncm_unbind(struct if_usb_devdata *pipe_data,
		struct usb_interface *intf)
{
	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)pipe_data->sedata;
	struct usb_driver *usbdrv = driver_of(intf);

	if (ctx == NULL)
		return;		/* no setup */

	netif_carrier_off(pipe_data->iod->ndev);
	atomic_set(&ctx->stop, 1);

	if (hrtimer_active(&ctx->tx_timer))
		hrtimer_cancel(&ctx->tx_timer);

	tasklet_kill(&ctx->bh);

	/* disconnect master --> disconnect slave */
	if (intf == ctx->control && ctx->data) {
		usb_set_intfdata(ctx->data, NULL);
		usb_driver_release_interface(usbdrv, ctx->data);
		ctx->data = NULL;
	} else if (intf == ctx->data && ctx->control) {
		usb_set_intfdata(ctx->control, NULL);
		usb_driver_release_interface(usbdrv, ctx->control);
		ctx->control = NULL;
	}
	pipe_data->usbdev = NULL;
	pipe_data->disconnected = 1;
	pipe_data->state = STATE_SUSPENDED;

	usb_set_intfdata(ctx->intf, NULL);
	cdc_ncm_free(ctx);

	pipe_data->sedata = NULL;
}
static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
	unsigned long rcu_delta_jiffies;
	ktime_t last_update, expires, now;
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
	u64 time_delta;
	int cpu;

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	now = tick_nohz_start_idle(cpu, ts);

	if (unlikely(!cpu_online(cpu))) {
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
	}

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		return;

	if (need_resched())
		return;

	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
		static int ratelimit;

		if (ratelimit < 10) {
			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
			       (unsigned int) local_softirq_pending());
			ratelimit++;
		}
		return;
	}

	ts->idle_calls++;
	
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;
		time_delta = timekeeping_max_deferment();
	} while (read_seqretry(&xtime_lock, seq));

	if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
	    arch_needs_cpu(cpu)) {
		next_jiffies = last_jiffies + 1;
		delta_jiffies = 1;
	} else {
		
		next_jiffies = get_next_timer_interrupt(last_jiffies);
		delta_jiffies = next_jiffies - last_jiffies;
		if (rcu_delta_jiffies < delta_jiffies) {
			next_jiffies = last_jiffies + rcu_delta_jiffies;
			delta_jiffies = rcu_delta_jiffies;
		}
	}
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	
	if ((long)delta_jiffies >= 1) {

		if (cpu == tick_do_timer_cpu) {
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
			ts->do_timer_last = 1;
		} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
			time_delta = KTIME_MAX;
			ts->do_timer_last = 0;
		} else if (!ts->do_timer_last) {
			time_delta = KTIME_MAX;
		}

		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
			time_delta = min_t(u64, time_delta,
					   tick_period.tv64 * delta_jiffies);
		}

		if (time_delta < KTIME_MAX)
			expires = ktime_add_ns(last_update, time_delta);
		else
			expires.tv64 = KTIME_MAX;

		
		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
			goto out;

		if (!ts->tick_stopped) {
			select_nohz_load_balancer(1);

			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
		}

		ts->idle_sleeps++;

		
		ts->idle_expires = expires;

		 if (unlikely(expires.tv64 == KTIME_MAX)) {
			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
				hrtimer_cancel(&ts->sched_timer);
			goto out;
		}

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS_PINNED);
			
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if (!tick_program_event(expires, 0))
				goto out;
		tick_do_update_jiffies64(ktime_get());
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
}
Ejemplo n.º 14
0
/**
 * nohz_restart_sched_tick - restart the idle tick from the idle task
 *
 * Restart the idle tick when the CPU is woken up from idle
 */
void tick_nohz_restart_sched_tick(void)
{
	int cpu = smp_processor_id();
	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
	unsigned long ticks;
	ktime_t now, delta;

	if (!ts->tick_stopped)
		return;

	/* Update jiffies first */
	now = ktime_get();

	local_irq_disable();
	tick_do_update_jiffies64(now);
	cpu_clear(cpu, nohz_cpu_mask);

	/* Account the idle time */
	delta = ktime_sub(now, ts->idle_entrytime);
	ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);

	/*
	 * We stopped the tick in idle. Update process times would miss the
	 * time we slept as update_process_times does only a 1 tick
	 * accounting. Enforce that this is accounted to idle !
	 */
	ticks = jiffies - ts->idle_jiffies;
	/*
	 * We might be one off. Do not randomly account a huge number of ticks!
	 */
	if (ticks && ticks < LONG_MAX) {
		add_preempt_count(HARDIRQ_OFFSET);
		account_system_time(current, HARDIRQ_OFFSET,
				    jiffies_to_cputime(ticks));
		sub_preempt_count(HARDIRQ_OFFSET);
	}

	/*
	 * Cancel the scheduled timer and restore the tick
	 */
	ts->tick_stopped  = 0;
	hrtimer_cancel(&ts->sched_timer);
	ts->sched_timer.expires = ts->idle_tick;

	while (1) {
		/* Forward the time to expire in the future */
		hrtimer_forward(&ts->sched_timer, now, tick_period);

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer,
				      ts->sched_timer.expires,
				      HRTIMER_MODE_ABS);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				break;
		} else {
			if (!tick_program_event(ts->sched_timer.expires, 0))
				break;
		}
		/* Update jiffies and reread time */
		tick_do_update_jiffies64(now);
		now = ktime_get();
	}
	local_irq_enable();
}
Ejemplo n.º 15
0
/**
 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
 *
 * When the next event is more than a tick into the future, stop the idle tick
 * Called either from the idle loop or from irq_exit() when an idle period was
 * just interrupted by an interrupt which did not cause a reschedule.
 */
void tick_nohz_stop_sched_tick(void)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
	struct tick_sched *ts;
	ktime_t last_update, expires, now, delta;
	int cpu;

	local_irq_save(flags);

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		goto end;

	if (need_resched())
		goto end;

	cpu = smp_processor_id();
	if (unlikely(local_softirq_pending()))
		printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
		       local_softirq_pending());

	now = ktime_get();
	/*
	 * When called from irq_exit we need to account the idle sleep time
	 * correctly.
	 */
	if (ts->tick_stopped) {
		delta = ktime_sub(now, ts->idle_entrytime);
		ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
	}

	ts->idle_entrytime = now;
	ts->idle_calls++;

	/* Read jiffies and the time when jiffies were updated last */
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;
	} while (read_seqretry(&xtime_lock, seq));

	/* Get the next timer wheel timer */
	next_jiffies = get_next_timer_interrupt(last_jiffies);
	delta_jiffies = next_jiffies - last_jiffies;

	if (rcu_needs_cpu(cpu))
		delta_jiffies = 1;
	/*
	 * Do not stop the tick, if we are only one off
	 * or if the cpu is required for rcu
	 */
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	/* Schedule the tick, if we are at least one jiffie off */
	if ((long)delta_jiffies >= 1) {

		if (delta_jiffies > 1)
			cpu_set(cpu, nohz_cpu_mask);
		/*
		 * nohz_stop_sched_tick can be called several times before
		 * the nohz_restart_sched_tick is called. This happens when
		 * interrupts arrive which do not cause a reschedule. In the
		 * first call we save the current tick time, so we can restart
		 * the scheduler tick in nohz_restart_sched_tick.
		 */
		if (!ts->tick_stopped) {
			ts->idle_tick = ts->sched_timer.expires;
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
		}

		/*
		 * If this cpu is the one which updates jiffies, then
		 * give up the assignment and let it be taken by the
		 * cpu which runs the tick timer next, which might be
		 * this cpu as well. If we don't drop this here the
		 * jiffies might be stale and do_timer() never
		 * invoked.
		 */
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = -1;

		/*
		 * calculate the expiry time for the next timer wheel
		 * timer
		 */
		expires = ktime_add_ns(last_update, tick_period.tv64 *
				       delta_jiffies);
		ts->idle_expires = expires;
		ts->idle_sleeps++;

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if(!tick_program_event(expires, 0))
				goto out;
		/*
		 * We are past the event already. So we crossed a
		 * jiffie boundary. Update jiffies and raise the
		 * softirq.
		 */
		tick_do_update_jiffies64(ktime_get());
		cpu_clear(cpu, nohz_cpu_mask);
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
end:
	local_irq_restore(flags);
}
Ejemplo n.º 16
0
/*
 * row_dispatch_requests() - selects the next request to dispatch
 * @q:		requests queue
 * @force:		flag indicating if forced dispatch
 *
 * Return 0 if no requests were moved to the dispatch queue.
 *	  1 otherwise
 *
 */
static int row_dispatch_requests(struct request_queue *q, int force)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	int ret = 0, currq, ioprio_class_to_serve, start_idx, end_idx;
	int expire_index = -1;

	if (force && hrtimer_active(&rd->rd_idle_data.hr_timer)) {
		if (hrtimer_try_to_cancel(&rd->rd_idle_data.hr_timer) >= 0) {
			row_log(rd->dispatch_queue,
				"Canceled delayed work on %d - forced dispatch",
				rd->rd_idle_data.idling_queue_idx);
			rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
		}
	}

	if (rd->pending_urgent_rq) {
		row_log(rd->dispatch_queue, "dispatching urgent request");
		row_dispatch_insert(rd, rd->pending_urgent_rq);
		ret = 1;
		goto done;
	}

	ioprio_class_to_serve = row_get_ioprio_class_to_serve(rd, force);
	row_log(rd->dispatch_queue, "Dispatching from %d priority class",
		ioprio_class_to_serve);

	if (ioprio_class_to_serve == IOPRIO_CLASS_RT) {
		expire_index = row_be_expire_adjust(rd);
		if (expire_index >= ROWQ_REG_PRIO_IDX)
			ioprio_class_to_serve = IOPRIO_CLASS_BE;
	}

	switch (ioprio_class_to_serve) {
	case IOPRIO_CLASS_NONE:
		rd->last_served_ioprio_class = IOPRIO_CLASS_NONE;
		goto done;
	case IOPRIO_CLASS_RT:
		if (expire_index >= 0) {
			start_idx = expire_index;
			end_idx = expire_index + 1;
			expire_index = -1;
		} else {
			start_idx = ROWQ_HIGH_PRIO_IDX;
			end_idx = ROWQ_REG_PRIO_IDX;
		}
		break;
	case IOPRIO_CLASS_BE:
		if (expire_index > 0) {
			start_idx = expire_index;
			end_idx = expire_index + 1;
			expire_index = -1;
		} else {
			start_idx = ROWQ_REG_PRIO_IDX;
			end_idx = ROWQ_LOW_PRIO_IDX;
		}
		break;
	case IOPRIO_CLASS_IDLE:
		start_idx = ROWQ_LOW_PRIO_IDX;
		end_idx = ROWQ_MAX_PRIO;
		break;
	default:
		pr_err("%s(): Invalid I/O priority class", __func__);
		goto done;
	}

	currq = row_get_next_queue(q, rd, start_idx, end_idx);

	/* Dispatch */
	if (currq >= 0) {
		row_dispatch_insert(rd,
			rq_entry_fifo(rd->row_queues[currq].fifo.next));
		ret = 1;
	}
done:
	return ret;
}
Ejemplo n.º 17
0
static void msm_otg_sm_work(struct work_struct *w)
{
	struct msm_otg *dev = container_of(w, struct msm_otg, sm_work);
	int ret;
	int work = 0;
	enum usb_otg_state state;

	if (atomic_read(&dev->in_lpm))
		msm_otg_set_suspend(&dev->otg, 0);

	spin_lock_irq(&dev->lock);
	state = dev->otg.state;
	spin_unlock_irq(&dev->lock);

	pr_debug("state: %s\n", state_string(state));

	switch (state) {
	case OTG_STATE_UNDEFINED:
		if (!dev->otg.host || !is_host())
			set_bit(ID, &dev->inputs);

		if (dev->otg.gadget && is_b_sess_vld())
			set_bit(B_SESS_VLD, &dev->inputs);

		spin_lock_irq(&dev->lock);
		if (test_bit(ID, &dev->inputs)) {
			dev->otg.state = OTG_STATE_B_IDLE;
		} else {
			set_bit(A_BUS_REQ, &dev->inputs);
			dev->otg.state = OTG_STATE_A_IDLE;
		}
		spin_unlock_irq(&dev->lock);

		work = 1;
		break;
	case OTG_STATE_B_IDLE:
		dev->otg.default_a = 0;
		if (!test_bit(ID, &dev->inputs)) {
			pr_debug("!id\n");
			clear_bit(B_BUS_REQ, &dev->inputs);
			otg_reset(dev, 0);

			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_IDLE;
			spin_unlock_irq(&dev->lock);
			work = 1;
		} else if (test_bit(B_SESS_VLD, &dev->inputs)) {
			pr_debug("b_sess_vld\n");
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_PERIPHERAL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_peripheral(&dev->otg, 1);
		} else if (test_bit(B_BUS_REQ, &dev->inputs)) {
			pr_debug("b_sess_end && b_bus_req\n");
			ret = msm_otg_start_srp(&dev->otg);
			if (ret < 0) {
				/* notify user space */
				clear_bit(B_BUS_REQ, &dev->inputs);
				work = 1;
				break;
			}
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_SRP_INIT;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_timer(dev, TB_SRP_FAIL, B_SRP_FAIL);
			break;
		} else {
			pr_debug("entering into lpm\n");
			msm_otg_suspend(dev);

		}
		break;
	case OTG_STATE_B_SRP_INIT:
		if (!test_bit(ID, &dev->inputs) ||
				test_bit(B_SESS_VLD, &dev->inputs)) {
			pr_debug("!id || b_sess_vld\n");
			msm_otg_del_timer(dev);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_IDLE;
			spin_unlock_irq(&dev->lock);
			work = 1;
		} else if (test_bit(B_SRP_FAIL, &dev->tmouts)) {
			pr_debug("b_srp_fail\n");
			/* notify user space */
			clear_bit(B_BUS_REQ, &dev->inputs);
			clear_bit(B_SRP_FAIL, &dev->tmouts);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_IDLE;
			spin_unlock_irq(&dev->lock);
			dev->b_last_se0_sess = jiffies;
			work = 1;
		}
		break;
	case OTG_STATE_B_PERIPHERAL:
		if (!test_bit(ID, &dev->inputs) ||
				!test_bit(B_SESS_VLD, &dev->inputs)) {
			pr_debug("!id || !b_sess_vld\n");
			clear_bit(B_BUS_REQ, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_IDLE;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_peripheral(&dev->otg, 0);
			dev->b_last_se0_sess = jiffies;

			/* Workaround: Reset phy after session */
			otg_reset(dev, 1);

			/* come back later to put hardware in
			 * lpm. This removes addition checks in
			 * suspend routine for missing BSV
			 */
			work = 1;
		} else if (test_bit(B_BUS_REQ, &dev->inputs) &&
				dev->otg.gadget->b_hnp_enable &&
				test_bit(A_BUS_SUSPEND, &dev->inputs)) {
			pr_debug("b_bus_req && b_hnp_en && a_bus_suspend\n");
			msm_otg_start_timer(dev, TB_ASE0_BRST, B_ASE0_BRST);
			msm_otg_start_peripheral(&dev->otg, 0);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_WAIT_ACON;
			spin_unlock_irq(&dev->lock);
			/* start HCD even before A-device enable
			 * pull-up to meet HNP timings.
			 */
			dev->otg.host->is_b_host = 1;
			msm_otg_start_host(&dev->otg, REQUEST_START);

		}
		break;
	case OTG_STATE_B_WAIT_ACON:
		if (!test_bit(ID, &dev->inputs) ||
				!test_bit(B_SESS_VLD, &dev->inputs)) {
			pr_debug("!id || !b_sess_vld\n");
			msm_otg_del_timer(dev);
			/* A-device is physically disconnected during
			 * HNP. Remove HCD.
			 */
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			dev->otg.host->is_b_host = 0;

			clear_bit(B_BUS_REQ, &dev->inputs);
			clear_bit(A_BUS_SUSPEND, &dev->inputs);
			dev->b_last_se0_sess = jiffies;
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_IDLE;
			spin_unlock_irq(&dev->lock);

			/* Workaround: Reset phy after session */
			otg_reset(dev, 1);
			work = 1;
		} else if (test_bit(A_CONN, &dev->inputs)) {
			pr_debug("a_conn\n");
			clear_bit(A_BUS_SUSPEND, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_HOST;
			spin_unlock_irq(&dev->lock);
		} else if (test_bit(B_ASE0_BRST, &dev->tmouts)) {
			/* TODO: A-device may send reset after
			 * enabling HNP; a_bus_resume case is
			 * not handled for now.
			 */
			pr_debug("b_ase0_brst_tmout\n");
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			dev->otg.host->is_b_host = 0;
			clear_bit(B_ASE0_BRST, &dev->tmouts);
			clear_bit(A_BUS_SUSPEND, &dev->inputs);
			clear_bit(B_BUS_REQ, &dev->inputs);

			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_PERIPHERAL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
		}
		break;
	case OTG_STATE_B_HOST:
		/* B_BUS_REQ is not exposed to user space. So
		 * it must be A_CONN for now.
		 */
		if (!test_bit(B_BUS_REQ, &dev->inputs) ||
				!test_bit(A_CONN, &dev->inputs)) {
			pr_debug("!b_bus_req || !a_conn\n");
			clear_bit(A_CONN, &dev->inputs);
			clear_bit(B_BUS_REQ, &dev->inputs);

			msm_otg_start_host(&dev->otg, 0);
			dev->otg.host->is_b_host = 0;

			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_IDLE;
			spin_unlock_irq(&dev->lock);
			/* Workaround: Reset phy after session */
			otg_reset(dev, 1);
			work = 1;
		}
		break;
	case OTG_STATE_A_IDLE:
		dev->otg.default_a = 1;
		if (test_bit(ID, &dev->inputs)) {
			pr_debug("id\n");
			dev->otg.default_a = 0;
			otg_reset(dev, 0);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_IDLE;
			spin_unlock_irq(&dev->lock);
			work = 1;
		} else if (!test_bit(A_BUS_DROP, &dev->inputs) &&
				(test_bit(A_SRP_DET, &dev->inputs) ||
				 test_bit(A_BUS_REQ, &dev->inputs))) {
			pr_debug("!a_bus_drop && (a_srp_det || a_bus_req)\n");

			clear_bit(A_SRP_DET, &dev->inputs);
			/* Disable SRP detection */
			writel((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) &
					~OTGSC_DPIE, USB_OTGSC);

			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VRISE;
			spin_unlock_irq(&dev->lock);
			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1);
			msm_otg_start_timer(dev, TA_WAIT_VRISE, A_WAIT_VRISE);
			/* no need to schedule work now */
		} else {
			pr_debug("No session requested\n");

			/* A-device is not providing power on VBUS.
			 * Enable SRP detection.
			 */
			writel((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) |
					OTGSC_DPIE, USB_OTGSC);
			msm_otg_suspend(dev);

		}
		break;
	case OTG_STATE_A_WAIT_VRISE:
		if (test_bit(ID, &dev->inputs) ||
				test_bit(A_BUS_DROP, &dev->inputs) ||
				test_bit(A_WAIT_VRISE, &dev->tmouts)) {
			pr_debug("id || a_bus_drop || a_wait_vrise_tmout\n");
			clear_bit(A_BUS_REQ, &dev->inputs);
			msm_otg_del_timer(dev);
			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
		} else if (test_bit(A_VBUS_VLD, &dev->inputs)) {
			pr_debug("a_vbus_vld\n");
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_BCON;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_timer(dev, TA_WAIT_BCON, A_WAIT_BCON);
			/* Start HCD to detect peripherals. */
			msm_otg_start_host(&dev->otg, REQUEST_START);
		}
		break;
	case OTG_STATE_A_WAIT_BCON:
		if (test_bit(ID, &dev->inputs) ||
				test_bit(A_BUS_DROP, &dev->inputs) ||
				test_bit(A_WAIT_BCON, &dev->tmouts)) {
			pr_debug("id || a_bus_drop || a_wait_bcon_tmout\n");
			msm_otg_del_timer(dev);
			clear_bit(A_BUS_REQ, &dev->inputs);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
		} else if (test_bit(B_CONN, &dev->inputs)) {
			pr_debug("b_conn\n");
			msm_otg_del_timer(dev);
			/* HCD is added already. just move to
			 * A_HOST state.
			 */
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_HOST;
			spin_unlock_irq(&dev->lock);
		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
			pr_debug("!a_vbus_vld\n");
			msm_otg_del_timer(dev);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_VBUS_ERR;
			spin_unlock_irq(&dev->lock);
		}
		break;
	case OTG_STATE_A_HOST:
		if (test_bit(ID, &dev->inputs) ||
				test_bit(A_BUS_DROP, &dev->inputs)) {
			pr_debug("id || a_bus_drop\n");
			clear_bit(B_CONN, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
			pr_debug("!a_vbus_vld\n");
			clear_bit(B_CONN, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_VBUS_ERR;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			/* no work */
		} else if (!test_bit(A_BUS_REQ, &dev->inputs)) {
			/* a_bus_req is de-asserted when root hub is
			 * suspended or HNP is in progress.
			 */
			pr_debug("!a_bus_req\n");
			
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_SUSPEND;
			spin_unlock_irq(&dev->lock);
			if (dev->otg.host->b_hnp_enable) {
				msm_otg_start_timer(dev, TA_AIDL_BDIS,
						A_AIDL_BDIS);
			} else {
				/* No HNP. Root hub suspended */
				msm_otg_suspend(dev);
			}
		} else if (!test_bit(B_CONN, &dev->inputs)) {
			pr_debug("!b_conn\n");
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_BCON;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_timer(dev, TA_WAIT_BCON, A_WAIT_BCON);
		}
		break;
	case OTG_STATE_A_SUSPEND:
		if (test_bit(ID, &dev->inputs) ||
				test_bit(A_BUS_DROP, &dev->inputs) ||
				test_bit(A_AIDL_BDIS, &dev->tmouts)) {
			pr_debug("id || a_bus_drop || a_aidl_bdis_tmout\n");
			msm_otg_del_timer(dev);
			clear_bit(B_CONN, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
			pr_debug("!a_vbus_vld\n");
			msm_otg_del_timer(dev);
			clear_bit(B_CONN, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_VBUS_ERR;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
		} else if (!test_bit(B_CONN, &dev->inputs) &&
				dev->otg.host->b_hnp_enable) {
			pr_debug("!b_conn && b_hnp_enable");
			/* Clear AIDL_BDIS timer */
			msm_otg_del_timer(dev);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_PERIPHERAL;
			spin_unlock_irq(&dev->lock);

			msm_otg_start_host(&dev->otg, REQUEST_HNP_SUSPEND);

			/* We may come here even when B-dev is physically
			 * disconnected during HNP. We go back to host
			 * role if bus is idle for BIDL_ADIS time.
			 */
			dev->otg.gadget->is_a_peripheral = 1;
			msm_otg_start_peripheral(&dev->otg, 1);
		} else if (!test_bit(B_CONN, &dev->inputs) &&
				!dev->otg.host->b_hnp_enable) {
			pr_debug("!b_conn && !b_hnp_enable");
			/* bus request is dropped during suspend.
			 * acquire again for next device.
			 */
			set_bit(A_BUS_REQ, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_BCON;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_timer(dev, TA_WAIT_BCON, A_WAIT_BCON);
		}
		break;
	case OTG_STATE_A_PERIPHERAL:
		if (test_bit(ID, &dev->inputs) ||
				test_bit(A_BUS_DROP, &dev->inputs)) {
			pr_debug("id || a_bus_drop\n");
			/* Clear BIDL_ADIS timer */
			msm_otg_del_timer(dev);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_peripheral(&dev->otg, 0);
			dev->otg.gadget->is_a_peripheral = 0;
			/* HCD was suspended before. Stop it now */
                        msm_otg_start_host(&dev->otg, REQUEST_STOP);

			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
			pr_debug("!a_vbus_vld\n");
			/* Clear BIDL_ADIS timer */
			msm_otg_del_timer(dev);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_VBUS_ERR;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_peripheral(&dev->otg, 0);
			dev->otg.gadget->is_a_peripheral = 0;
			/* HCD was suspended before. Stop it now */
                        msm_otg_start_host(&dev->otg, REQUEST_STOP);
		} else if (test_bit(A_BIDL_ADIS, &dev->tmouts)) {
			pr_debug("a_bidl_adis_tmout\n");
			msm_otg_start_peripheral(&dev->otg, 0);
			dev->otg.gadget->is_a_peripheral = 0;

			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_BCON;
			spin_unlock_irq(&dev->lock);
			set_bit(A_BUS_REQ, &dev->inputs);
			msm_otg_start_host(&dev->otg, REQUEST_HNP_RESUME);
			msm_otg_start_timer(dev, TA_WAIT_BCON, A_WAIT_BCON);
		}
		break;
	case OTG_STATE_A_WAIT_VFALL:
		if (test_bit(A_WAIT_VFALL, &dev->tmouts)) {
			clear_bit(A_VBUS_VLD, &dev->inputs);
			/* Reset both phy and link */
			otg_reset(dev, 1);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_IDLE;
			spin_unlock_irq(&dev->lock);
			work = 1;
		}
		break;
	case OTG_STATE_A_VBUS_ERR:
		if (test_bit(ID, &dev->inputs) ||
				test_bit(A_BUS_DROP, &dev->inputs) ||
				test_bit(A_CLR_ERR, &dev->inputs)) {
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
			spin_unlock_irq(&dev->lock);
			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
		}
		break;
	default:
		pr_err("invalid OTG state\n");
	}

	if (work)
		queue_work(dev->wq, &dev->sm_work);

	/* IRQ/sysfs may queue work. Check work_pending. otherwise
	 * we might endup releasing wakelock after it is acquired
	 * in IRQ/sysfs.
	 */
	if (!work_pending(&dev->sm_work) && !hrtimer_active(&dev->timer))
		wake_unlock(&dev->wlock);
}
Ejemplo n.º 18
0
int aat1271_flashlight_control(int mode)
{
	int ret = 0;
	uint32_t flash_ns = ktime_to_ns(ktime_get());

#if 0 /* disable flash_adj_value check now */
	if (this_fl_str->flash_adj_value == 2) {
		printk(KERN_WARNING "%s: force disable function!\n", __func__);
		return -EIO;
	}
#endif
	if (this_fl_str->mode_status == mode) {
		FLT_INFO_LOG("%s: mode is same: %d\n",
							FLASHLIGHT_NAME, mode);

		if (!hrtimer_active(&this_fl_str->timer) &&
			this_fl_str->mode_status == FL_MODE_OFF) {
			FLT_INFO_LOG("flashlight hasn't been enable or" \
				" has already reset to 0 due to timeout\n");
			return ret;
		}
		else
			return -EINVAL;
	}

	spin_lock_irqsave(&this_fl_str->spin_lock,
						this_fl_str->spinlock_flags);
	if (this_fl_str->mode_status == FL_MODE_FLASH) {
		hrtimer_cancel(&this_fl_str->timer);
		flashlight_turn_off();
	}
	switch (mode) {
	case FL_MODE_OFF:
		flashlight_turn_off();
	break;
	case FL_MODE_TORCH:
		if (this_fl_str->led_count)
			flashlight_hw_command(3, 4);
		else
			flashlight_hw_command(3, 3);
		flashlight_hw_command(0, 6);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_TORCH;
		this_fl_str->fl_lcdev.brightness = LED_HALF;
	break;
	case FL_MODE_TORCH_LED_A:
		flashlight_hw_command(3, 1);
		flashlight_hw_command(0, 15);
		flashlight_hw_command(2, 3);
		this_fl_str->mode_status = FL_MODE_TORCH_LED_A;
		this_fl_str->fl_lcdev.brightness = 1;
	break;
	case FL_MODE_TORCH_LED_B:
		flashlight_hw_command(3, 1);
		flashlight_hw_command(0, 15);
		flashlight_hw_command(2, 2);
		this_fl_str->mode_status = FL_MODE_TORCH_LED_B;
		this_fl_str->fl_lcdev.brightness = 2;
	break;
	case FL_MODE_FLASH:
		flashlight_hw_command(2, 4);
		gpio_direction_output(this_fl_str->gpio_flash, 1);
		this_fl_str->mode_status = FL_MODE_FLASH;
		this_fl_str->fl_lcdev.brightness = LED_FULL;
		hrtimer_start(&this_fl_str->timer,
			ktime_set(this_fl_str->flash_sw_timeout_ms / 1000,
				(this_fl_str->flash_sw_timeout_ms % 1000) *
					NSEC_PER_MSEC), HRTIMER_MODE_REL);
	break;
	case FL_MODE_PRE_FLASH:
		flashlight_hw_command(3, 3);
		flashlight_hw_command(0, 6);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_PRE_FLASH;
		this_fl_str->fl_lcdev.brightness = LED_HALF + 1;
	break;
	case FL_MODE_TORCH_LEVEL_1:
		if (this_fl_str->led_count)
			flashlight_hw_command(3, 4);
		else
			flashlight_hw_command(3, 3);
		flashlight_hw_command(0, 15);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_1;
		this_fl_str->fl_lcdev.brightness = LED_HALF - 2;
	break;
	case FL_MODE_TORCH_LEVEL_2:
		if (this_fl_str->led_count)
			flashlight_hw_command(3, 4);
		else
			flashlight_hw_command(3, 3);
		flashlight_hw_command(0, 10);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_2;
		this_fl_str->fl_lcdev.brightness = LED_HALF - 1;
	break;

	default:
		FLT_ERR_LOG("%s: unknown flash_light flags: %d\n",
							__func__, mode);
		ret = -EINVAL;
	break;
	}

	FLT_INFO_LOG("%s: mode: %d, %u\n", FLASHLIGHT_NAME, mode,
		flash_ns/(1000*1000));

	spin_unlock_irqrestore(&this_fl_str->spin_lock,
						this_fl_str->spinlock_flags);
	return ret;
}
Ejemplo n.º 19
0
static void msmfb_pan_update(struct fb_info *info, uint32_t left, uint32_t top,
			     uint32_t eright, uint32_t ebottom,
			     uint32_t yoffset, int pan_display)
{
	struct msmfb_info *msmfb = info->par;
	struct msm_panel_data *panel = msmfb->panel;
	unsigned long irq_flags;
	int sleeping;
	int retry = 1;

	DLOG(SHOW_UPDATES, "update %d %d %d %d %d %d\n",
		left, top, eright, ebottom, yoffset, pan_display);
restart:
	spin_lock_irqsave(&msmfb->update_lock, irq_flags);

	/* if we are sleeping, on a pan_display wait 10ms (to throttle back
	 * drawing otherwise return */
	if (msmfb->sleeping == SLEEPING) {
		DLOG(SUSPEND_RESUME, "drawing while asleep\n");
		spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
		if (pan_display)
			wait_event_interruptible_timeout(msmfb->frame_wq,
				msmfb->sleeping != SLEEPING, HZ/10);
		return;
	}

	sleeping = msmfb->sleeping;
	/* on a full update, if the last frame has not completed, wait for it */
	if (pan_display && (msmfb->frame_requested != msmfb->frame_done ||
			    sleeping == UPDATING)) {
		int ret;
		spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
		ret = wait_event_interruptible_timeout(msmfb->frame_wq,
			msmfb->frame_done == msmfb->frame_requested &&
			msmfb->sleeping != UPDATING, 5 * HZ);
		if (ret <= 0 && (msmfb->frame_requested != msmfb->frame_done ||
				 msmfb->sleeping == UPDATING)) {
			if (retry && panel->request_vsync &&
			    (sleeping == AWAKE)) {
				panel->request_vsync(panel,
					&msmfb->vsync_callback);
				retry = 0;
				printk(KERN_WARNING "msmfb_pan_display timeout "
					"rerequest vsync\n");
			} else {
				printk(KERN_WARNING "msmfb_pan_display timeout "
					"waiting for frame start, %d %d\n",
					msmfb->frame_requested,
					msmfb->frame_done);
				return;
			}
		}
		goto restart;
	}


	msmfb->frame_requested++;
	/* if necessary, update the y offset, if this is the
	 * first full update on resume, set the sleeping state */
	if (pan_display) {
		msmfb->yoffset = yoffset;
		if (left == 0 && top == 0 && eright == info->var.xres &&
		    ebottom == info->var.yres) {
			if (sleeping == WAKING) {
				msmfb->update_frame = msmfb->frame_requested;
				DLOG(SUSPEND_RESUME, "full update starting\n");
				msmfb->sleeping = UPDATING;
			}
		}
	}

	/* set the update request */
	if (left < msmfb->update_info.left)
		msmfb->update_info.left = left;
	if (top < msmfb->update_info.top)
		msmfb->update_info.top = top;
	if (eright > msmfb->update_info.eright)
		msmfb->update_info.eright = eright;
	if (ebottom > msmfb->update_info.ebottom)
		msmfb->update_info.ebottom = ebottom;
	DLOG(SHOW_UPDATES, "update queued %d %d %d %d %d\n",
		msmfb->update_info.left, msmfb->update_info.top,
		msmfb->update_info.eright, msmfb->update_info.ebottom,
		msmfb->yoffset);
	spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);

	/* if the panel is all the way on wait for vsync, otherwise sleep
	 * for 16 ms (long enough for the dma to panel) and then begin dma */
	msmfb->vsync_request_time = ktime_get();
	if (panel->request_vsync && (sleeping == AWAKE)) {
		panel->request_vsync(panel, &msmfb->vsync_callback);
	} else {
		if (!hrtimer_active(&msmfb->fake_vsync)) {
			hrtimer_start(&msmfb->fake_vsync,
				      ktime_set(0, NSEC_PER_SEC/60),
				      HRTIMER_MODE_REL);
		}
	}
}
Ejemplo n.º 20
0
/**
 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
 *
 * When the next event is more than a tick into the future, stop the idle tick
 * Called either from the idle loop or from irq_exit() when an idle period was
 * just interrupted by an interrupt which did not cause a reschedule.
 */
void tick_nohz_stop_sched_tick(int inidle)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
	struct tick_sched *ts;
	ktime_t last_update, expires, now;
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
	u64 time_delta;
	int cpu;

	local_irq_save(flags);

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	/*
	 * Call to tick_nohz_start_idle stops the last_update_time from being
	 * updated. Thus, it must not be called in the event we are called from
	 * irq_exit() with the prior state different than idle.
	 */
	if (!inidle && !ts->inidle)
		goto end;

	/*
	 * Set ts->inidle unconditionally. Even if the system did not
	 * switch to NOHZ mode the cpu frequency governers rely on the
	 * update of the idle time accounting in tick_nohz_start_idle().
	 */
	ts->inidle = 1;

	now = tick_nohz_start_idle(ts);

	/*
	 * If this cpu is offline and it is the one which updates
	 * jiffies, then give up the assignment and let it be taken by
	 * the cpu which runs the tick timer next. If we don't drop
	 * this here the jiffies might be stale and do_timer() never
	 * invoked.
	 */
	if (unlikely(!cpu_online(cpu))) {
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
	}

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		goto end;

	if (need_resched())
		goto end;

	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
		static int ratelimit;

		if (ratelimit < 10) {
			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
			       local_softirq_pending());
			ratelimit++;
		}
		goto end;
	}

	ts->idle_calls++;
	/* Read jiffies and the time when jiffies were updated last */
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;

		/*
		 * On SMP we really should only care for the CPU which
		 * has the do_timer duty assigned. All other CPUs can
		 * sleep as long as they want.
		 */
		if (cpu == tick_do_timer_cpu ||
		    tick_do_timer_cpu == TICK_DO_TIMER_NONE)
			time_delta = timekeeping_max_deferment();
		else
			time_delta = KTIME_MAX;
	} while (read_seqretry(&xtime_lock, seq));

	if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
	    arch_needs_cpu(cpu)) {
		next_jiffies = last_jiffies + 1;
		delta_jiffies = 1;
	} else {
		/* Get the next timer wheel timer */
		next_jiffies = get_next_timer_interrupt(last_jiffies);
		delta_jiffies = next_jiffies - last_jiffies;
	}
	/*
	 * Do not stop the tick, if we are only one off
	 * or if the cpu is required for rcu
	 */
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	/* Schedule the tick, if we are at least one jiffie off */
	if ((long)delta_jiffies >= 1) {

		/*
		 * calculate the expiry time for the next timer wheel
		 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
		 * that there is no timer pending or at least extremely
		 * far into the future (12 days for HZ=1000). In this
		 * case we set the expiry to the end of time.
		 */
		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
			/*
			 * Calculate the time delta for the next timer event.
			 * If the time delta exceeds the maximum time delta
			 * permitted by the current clocksource then adjust
			 * the time delta accordingly to ensure the
			 * clocksource does not wrap.
			 */
			time_delta = min_t(u64, time_delta,
					   tick_period.tv64 * delta_jiffies);
			expires = ktime_add_ns(last_update, time_delta);
		} else {
			expires.tv64 = KTIME_MAX;
		}

		/*
		 * If this cpu is the one which updates jiffies, then
		 * give up the assignment and let it be taken by the
		 * cpu which runs the tick timer next, which might be
		 * this cpu as well. If we don't drop this here the
		 * jiffies might be stale and do_timer() never
		 * invoked.
		 */
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;

		if (delta_jiffies > 1)
			cpumask_set_cpu(cpu, nohz_cpu_mask);

		/* Skip reprogram of event if its not changed */
		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
			goto out;

		/*
		 * nohz_stop_sched_tick can be called several times before
		 * the nohz_restart_sched_tick is called. This happens when
		 * interrupts arrive which do not cause a reschedule. In the
		 * first call we save the current tick time, so we can restart
		 * the scheduler tick in nohz_restart_sched_tick.
		 */
		if (!ts->tick_stopped) {
			if (select_nohz_load_balancer(1)) {
				/*
				 * sched tick not stopped!
				 */
				cpumask_clear_cpu(cpu, nohz_cpu_mask);
				goto out;
			}

			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
			rcu_enter_nohz();
		}

		ts->idle_sleeps++;

		/* Mark expires */
		ts->idle_expires = expires;

		/*
		 * If the expiration time == KTIME_MAX, then
		 * in this case we simply stop the tick timer.
		 */
		 if (unlikely(expires.tv64 == KTIME_MAX)) {
			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
				hrtimer_cancel(&ts->sched_timer);
			goto out;
		}

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS_PINNED);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if (!tick_program_event(expires, 0))
				goto out;
		/*
		 * We are past the event already. So we crossed a
		 * jiffie boundary. Update jiffies and raise the
		 * softirq.
		 */
		tick_do_update_jiffies64(ktime_get());
		cpumask_clear_cpu(cpu, nohz_cpu_mask);
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
	ts->sleep_length = ktime_sub(dev->next_event, now);
end:
	local_irq_restore(flags);
}
Ejemplo n.º 21
0
/*
 * row_add_request() - Add request to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 */
static void row_add_request(struct request_queue *q,
			    struct request *rq)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	struct row_queue *rqueue = RQ_ROWQ(rq);
	s64 diff_ms;
	bool queue_was_empty = list_empty(&rqueue->fifo);

	list_add_tail(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rqueue->nr_req++;
	rq_set_fifo_time(rq, jiffies); /* for statistics*/

	if (rq->cmd_flags & REQ_URGENT) {
		WARN_ON(1);
		blk_dump_rq_flags(rq, "");
		rq->cmd_flags &= ~REQ_URGENT;
	}

	if (row_queues_def[rqueue->prio].idling_enabled) {
		if (rd->rd_idle_data.idling_queue_idx == rqueue->prio &&
		    hrtimer_active(&rd->rd_idle_data.hr_timer)) {
			if (hrtimer_try_to_cancel(
				&rd->rd_idle_data.hr_timer) >= 0) {
				row_log_rowq(rd, rqueue->prio,
				    "Canceled delayed work on %d",
				    rd->rd_idle_data.idling_queue_idx);
				rd->rd_idle_data.idling_queue_idx =
					ROWQ_MAX_PRIO;
			}
		}
		diff_ms = ktime_to_ms(ktime_sub(ktime_get(),
				rqueue->idle_data.last_insert_time));
		if (unlikely(diff_ms < 0)) {
			pr_err("%s(): time delta error: diff_ms < 0",
				__func__);
			rqueue->idle_data.begin_idling = false;
			return;
		}
		if (diff_ms < rd->rd_idle_data.freq_ms) {
			rqueue->idle_data.begin_idling = true;
			row_log_rowq(rd, rqueue->prio, "Enable idling");
		} else {
			rqueue->idle_data.begin_idling = false;
			row_log_rowq(rd, rqueue->prio, "Disable idling (%ldms)",
				(long)diff_ms);
		}

		rqueue->idle_data.last_insert_time = ktime_get();
	}
	if (row_queues_def[rqueue->prio].is_urgent &&
	    !rd->pending_urgent_rq && !rd->urgent_in_flight) {
		/* Handle High Priority queues */
		if (rqueue->prio < ROWQ_REG_PRIO_IDX &&
		    rd->last_served_ioprio_class != IOPRIO_CLASS_RT &&
		    queue_was_empty) {
			row_log_rowq(rd, rqueue->prio,
				"added (high prio) urgent request");
			rq->cmd_flags |= REQ_URGENT;
			rd->pending_urgent_rq = rq;
		} else  if (row_rowq_unserved(rd, rqueue->prio)) {
			/* Handle Regular priotity queues */
			row_log_rowq(rd, rqueue->prio,
				"added urgent request (total on queue=%d)",
				rqueue->nr_req);
			rq->cmd_flags |= REQ_URGENT;
			rd->pending_urgent_rq = rq;
		}
	} else
		row_log_rowq(rd, rqueue->prio,
			"added request (total on queue=%d)", rqueue->nr_req);
}
Ejemplo n.º 22
0
int netmap_mitigation_active(struct nm_generic_mit *mit)
{
    return hrtimer_active(&mit->mit_timer);
}
static void k3_vibrator_enable(struct timed_output_dev *dev, int value)
{
	struct k3_vibrator_data *pdata = container_of(dev, struct k3_vibrator_data, dev);

	if (value < 0 ||!pdata ) {
		pr_err("error:vibrator_enable value:%d is negative\n", value);
		return;
	}
	/* cancel previous timer */
	if (hrtimer_active(&pdata->timer))
		hrtimer_cancel(&pdata->timer);
	#if 0
	if (value > 0 && value <= DR2_OT_SEL_1000) {
		u32 set_value = 0;
		u32 k3_vibrator_dr2_ctrl = 0;

		mutex_lock(&pdata->lock);
		K3_VIB_REG_W(pdata->reg_dr2_vset, pdata->k3_vibrator_base, DR2_ISET);
		k3_vibrator_dr2_ctrl = K3_VIB_REG_R(pdata->k3_vibrator_base, DR2_CTRL);
		if (pdata->work_mode)
			K3_VIB_REG_W((k3_vibrator_dr2_ctrl | LM_FIX_MODE | DRIVE_MODE | \
					pdata->lm_dr2) & DR2_DISABLE, pdata->k3_vibrator_base, DR2_CTRL);
		else
			K3_VIB_REG_W((k3_vibrator_dr2_ctrl | LM_FIX_MODE | pdata->lm_dr2) & DR2_DISABLE, \
					pdata->k3_vibrator_base, DR2_CTRL);

		set_value = k3_vibrator_get_value(value);
		K3_VIB_REG_W(set_value | DR2_OT_EN, pdata->k3_vibrator_base, DR2_CTRL1);
		mutex_unlock(&pdata->lock);
	} else if (value > DR2_OT_SEL_1000) {
	#endif
	if (value > 0) {
		k3_vibrator_on(pdata);
		hrtimer_start(&pdata->timer,
			ktime_set(value / 1000, (value % 1000) * 1000000),
			HRTIMER_MODE_REL);
	} else {
		k3_vibrator_off(pdata);
	}
}

// Begin Immersion changes
void imm_vibrator_en(bool en)
{
	if (!p_data ) {
		pr_err("error:p_data is null\n");
		return;
	}

	if(en){
		k3_vibrator_on(p_data);
	}
	else{
		k3_vibrator_off(p_data);
	}
}
EXPORT_SYMBOL(imm_vibrator_en);

void imm_vibrator_pwm(int force)
{
	if(force > 0)
		p_data->reg_dr2_vset = (u8)(32*force/128+31);
	else if(force == 0)
		p_data->reg_dr2_vset = 0x20;
	else if(force < 0)
		p_data->reg_dr2_vset = 0x0;
	else
		return;

	if(p_data->reg_dr2_vset >= 0x0 || p_data->reg_dr2_vset <= 0x3F)
	{
		K3_VIB_REG_W(p_data->reg_dr2_vset, p_data->k3_vibrator_base, DR2_ISET);
	}
}
EXPORT_SYMBOL(imm_vibrator_pwm);
// End Immersion changes

#ifdef CONFIG_OF
static const struct of_device_id hsk3_vibrator_match[] = {
	{ .compatible = "hisilicon,hi6421-vibrator",},
	{},
};
Ejemplo n.º 24
0
long sys_cancel_reserve(pid_t pid) {
  struct cpumask set;
  struct pid *pid_struct;
  struct task_struct *task;
  struct task_struct *tmp;
  int i;
  int cpu_task_count[] = {0, 0, 0, 0};

  printk(KERN_ALERT "[sys_cancel_reserve] PID %u\n", pid);

  // locate the task_struct for the task required
  if (pid == 0) {
    task = current;
  } else {
    rcu_read_lock();
    pid_struct = find_get_pid(pid);
    if (!pid_struct) {
      rcu_read_unlock();
      return -ENODEV;
    }
    task = pid_task(pid_struct, PIDTYPE_PID);
    if (!task) {
      rcu_read_unlock();
      return -ENODEV;
    }
    rcu_read_unlock();
  }

  // make sure the task has a reservation
  if (task->has_reservation == 0) {
    return -EINVAL;
  }

  if (task->has_reservation || task->energymon_node) {
    cancel_reserve_hook(task);  // execute cancel reserve hook
  }

  // cancel timers if they are active
  if (hrtimer_active(&(task->T_timer))) {
    hrtimer_cancel(&(task->T_timer));
  }
  if (hrtimer_active(&(task->C_timer))) {
    hrtimer_cancel(&(task->C_timer));
  }

  // make runnable any task suspended by enforcement
  if (task->put_to_sleep) {
    task->put_to_sleep = 0;
    wake_up_process(task);
  }

  // mark as not having a reservation
  task->has_reservation = 0;

  // set process CPU to 0 because it is never offline
  cpumask_clear(&set);
  cpumask_set_cpu(0, &set);
  if (sched_setaffinity(task->pid, &set)) {
    printk(KERN_INFO "[sys_cancel_reserve] failed to set CPU affinity\n");
    return -EINVAL;
  }

  // find what cpus have tasks with reservations
  rcu_read_lock();
  for_each_process(tmp) {
    if (tmp->has_reservation) {
      cpu_task_count[task_cpu(tmp)] = 1;
    }
  }
  rcu_read_unlock();

  // Bring offline all cpus with no tasks
  for (i = 0; i < NUM_CPUS; i ++) {
    if (cpu_task_count[i] == 0) {
      if (power_cpu(i, 0) != 0) {
        printk(KERN_INFO "[sys_cancel_reserve] failed to turn off cpu %d", i);
        return -EINVAL;
      }
    } else {
      if (power_cpu(i, 1) != 0) {
        printk(KERN_INFO "[sys_cancel_reserve] failed to turn on cpu %d", i);
        return -EINVAL;
      }
    }
  }

  // set the frequency based on sysclock algorithm
  sysclock_set();

  return 0;
}
Ejemplo n.º 25
0
long sys_set_reserve(pid_t pid, struct timespec __user *user_C,
                     struct timespec __user *user_T, int cid) {

  struct cpumask set;
  struct timespec T, C, empty;
  struct pid *pid_struct;
  struct task_struct *task;
  struct task_struct *tmp;
  int i;
  int cpu_task_count[] = {0, 0, 0, 0};

  set_normalized_timespec(&empty, 0, 0);

  // locate the task_struct for the task required
  if (pid == 0) {
    task = current;
  } else {
    rcu_read_lock();
    pid_struct = find_get_pid(pid);
    if (!pid_struct) {
      rcu_read_unlock();
      return -ENODEV;
    }
    task = pid_task(pid_struct, PIDTYPE_PID);
    if (!task) {
      rcu_read_unlock();
      return -ENODEV;
    }
    rcu_read_unlock();
  }

  // get timespec struct info
  if (copy_from_user(&C, user_C, sizeof(struct timespec))) {
    printk(KERN_ALERT "[sys_set_reserve] failed to copy C from user\n");
    return -EFAULT;
  }

  if (copy_from_user(&T, user_T, sizeof(struct timespec))) {
    printk(KERN_ALERT "[sys_set_reserve] failed to copy T from user\n");
    return -EFAULT;
  }

  // check for timespec validity
  if ((timespec_compare(&T, &C) < 0) || !timespec_valid(&T) || !timespec_valid(&C) ||
      (cid >= NUM_CPUS)) {
    printk(KERN_ALERT "[sys_set_reserve] invalid T and C\n");
    return -EINVAL;
  }

  // do a reservation admission check
  cid = admission_check(task, C, T, cid);
  if (cid < 0) {
    return -EBUSY;
  }

  if (set_reserve_hook(task) != 0) {
    return -EFAULT;
  }

  // cancel any old timers for an updated reservation
  if (hrtimer_active(&(task->C_timer))) {
    hrtimer_cancel(&(task->C_timer));
  }
  if (hrtimer_active(&(task->T_timer))) {
    hrtimer_cancel(&(task->T_timer));
  }

  // make runnable any task suspended by enforcement
  if (task->put_to_sleep) {
    task->put_to_sleep = 0;
    wake_up_process(task);
  }

  // copy into task struct ktime values
  task->real_C_time = ktime_set(0, 0);
  task->C_time = ktime_set(C.tv_sec, C.tv_nsec);
  task->T_time = ktime_set(T.tv_sec, T.tv_nsec);

  // find what cpus have tasks on them
  rcu_read_lock();
  for_each_process(tmp) {
    if (tmp->has_reservation) {
      cpu_task_count[task_cpu(tmp)] = 1;
    }
  }
  rcu_read_unlock();

  cpu_task_count[cid] = 1;
  task->reserve_cpu = cid;
  // Bring offline all cpus with no tasks
  for (i = 0; i < NUM_CPUS; i ++) {
    if (cpu_task_count[i] == 0) {
      if (power_cpu(i, 0) != 0) {
        printk(KERN_ALERT"[sys_set_reserve] failed to turn off cpu %d\n", i);
        goto fail;
      }
      printk(KERN_ALERT"[sys_set_reserve] turned OFF CPU %d\n", i);
    } else {
      if (power_cpu(i, 1) != 0) {
        printk(KERN_ALERT"[sys_set_reserve] failed to turn on cpu %d\n", i);
        goto fail;
      }
      printk(KERN_ALERT"[sys_set_reserve] turned ON CPU %d\n", i);
    }
  }

  // set process CPU
  cpumask_clear(&set);
  cpumask_set_cpu(cid, &set);
  if (sched_setaffinity(pid, &set)) {
    printk(KERN_ALERT"[sys_set_reserve] failed to set CPU affinity\n");
    goto fail;
  }

  printk(KERN_ALERT "[sys_set_reserve] PID %d (C = %lld ms / T = %lld ms) CPU %u\n",
         pid, ktime_to_ms(task->C_time), ktime_to_ms(task->T_time), cid);

  // mark as having a reservation
  task->has_reservation = 1;

  // set the frequency based on sysclock algorithm
  sysclock_set();

  return 0;

  fail:
    if (task->has_reservation || task->energymon_node) {
      cancel_reserve_hook(task);
    }
    return -EINVAL;
}
static int CVE_2014_0205_linux2_6_30_2_futex_wait(u32 __user *uaddr, int fshared,
		      u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
{
	struct task_struct *curr = current;
	struct restart_block *restart;
	DECLARE_WAITQUEUE(wait, curr);
	struct futex_hash_bucket *hb;
	struct futex_q q;
	u32 uval;
	int ret;
	struct hrtimer_sleeper t;
	int rem = 0;

	if (!bitset)
		return -EINVAL;

	q.pi_state = NULL;
	q.bitset = bitset;
retry:
	q.key = FUTEX_KEY_INIT;
	ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_READ);
	if (unlikely(ret != 0))
		goto out;

retry_private:
	hb = queue_lock(&q);

	/*
	 * Access the page AFTER the hash-bucket is locked.
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) CVE_2014_0205_linux2_6_30_2_futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
	 * any cond.  If we queued after testing *uaddr, that would open
	 * a race condition where we could block indefinitely with
	 * cond(var) false, which would violate the guarantee.
	 *
	 * A consequence is that CVE_2014_0205_linux2_6_30_2_futex_wait() can return zero and absorb
	 * a wakeup when *uaddr != val on entry to the syscall.  This is
	 * rare, but normal.
	 *
	 * For shared futexes, we hold the mmap semaphore, so the mapping
	 * cannot have changed since we looked it up in get_futex_key.
	 */
	ret = get_futex_value_locked(&uval, uaddr);

	if (unlikely(ret)) {
		queue_unlock(&q, hb);

		ret = get_user(uval, uaddr);
		if (ret)
			goto out_put_key;

		if (!fshared)
			goto retry_private;

		put_futex_key(fshared, &q.key);
		goto retry;
	}
	ret = -EWOULDBLOCK;
	if (unlikely(uval != val)) {
		queue_unlock(&q, hb);
		goto out_put_key;
	}

	/* Only actually queue if *uaddr contained val.  */
	queue_me(&q, hb);

	/*
	 * There might have been scheduling since the queue_me(), as we
	 * cannot hold a spinlock across the get_user() in case it
	 * faults, and we cannot just set TASK_INTERRUPTIBLE state when
	 * queueing ourselves into the futex hash.  This code thus has to
	 * rely on the futex_wake() code removing us from hash when it
	 * wakes us up.
	 */

	/* add_wait_queue is the barrier after __set_current_state. */
	__set_current_state(TASK_INTERRUPTIBLE);
	add_wait_queue(&q.waiter, &wait);
	/*
	 * !plist_node_empty() is safe here without any lock.
	 * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
	 */
	if (likely(!plist_node_empty(&q.list))) {
		if (!abs_time)
			schedule();
		else {
			hrtimer_init_on_stack(&t.timer,
					      clockrt ? CLOCK_REALTIME :
					      CLOCK_MONOTONIC,
					      HRTIMER_MODE_ABS);
			hrtimer_init_sleeper(&t, current);
			hrtimer_set_expires_range_ns(&t.timer, *abs_time,
						     current->timer_slack_ns);

			hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
			if (!hrtimer_active(&t.timer))
				t.task = NULL;

			/*
			 * the timer could have already expired, in which
			 * case current would be flagged for rescheduling.
			 * Don't bother calling schedule.
			 */
			if (likely(t.task))
				schedule();

			hrtimer_cancel(&t.timer);

			/* Flag if a timeout occured */
			rem = (t.task == NULL);

			destroy_hrtimer_on_stack(&t.timer);
		}
	}
	__set_current_state(TASK_RUNNING);

	/*
	 * NOTE: we don't remove ourselves from the waitqueue because
	 * we are the only user of it.
	 */

	/* If we were woken (and unqueued), we succeeded, whatever. */
	ret = 0;
	if (!unqueue_me(&q))
		goto out_put_key;
	ret = -ETIMEDOUT;
	if (rem)
		goto out_put_key;

	/*
	 * We expect signal_pending(current), but another thread may
	 * have handled it for us already.
	 */
	ret = -ERESTARTSYS;
	if (!abs_time)
		goto out_put_key;

	restart = &current_thread_info()->restart_block;
	restart->fn = CVE_2014_0205_linux2_6_30_2_futex_wait_restart;
	restart->futex.uaddr = (u32 *)uaddr;
	restart->futex.val = val;
	restart->futex.time = abs_time->tv64;
	restart->futex.bitset = bitset;
	restart->futex.flags = 0;

	if (fshared)
		restart->futex.flags |= FLAGS_SHARED;
	if (clockrt)
		restart->futex.flags |= FLAGS_CLOCKRT;

	ret = -ERESTART_RESTARTBLOCK;

out_put_key:
	put_futex_key(fshared, &q.key);
out:
	return ret;
}
Ejemplo n.º 27
0
/*
 * row_get_ioprio_class_to_serve() - Return the next I/O priority
 *				      class to dispatch requests from
 * @rd:	pointer to struct row_data
 * @force:	flag indicating if forced dispatch
 *
 * This function returns the next I/O priority class to serve
 * {IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE}.
 * If there are no more requests in scheduler or if we're idling on some queue
 * IOPRIO_CLASS_NONE will be returned.
 * If idling is scheduled on a lower priority queue than the one that needs
 * to be served, it will be canceled.
 *
 */
static int row_get_ioprio_class_to_serve(struct row_data *rd, int force)
{
	int i;
	int ret = IOPRIO_CLASS_NONE;

	if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) {
		row_log(rd->dispatch_queue, "No more requests in scheduler");
		goto check_idling;
	}

	/* First, go over the high priority queues */
	for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) {
		if (!list_empty(&rd->row_queues[i].fifo)) {
			if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
				if (hrtimer_try_to_cancel(
					&rd->rd_idle_data.hr_timer) >= 0) {
					row_log(rd->dispatch_queue,
					"Canceling delayed work on %d. RT pending",
					     rd->rd_idle_data.idling_queue_idx);
					rd->rd_idle_data.idling_queue_idx =
						ROWQ_MAX_PRIO;
				}
			}

			if (row_regular_req_pending(rd) &&
			    (rd->reg_prio_starvation.starvation_counter >=
			     rd->reg_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_BE;
			else if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_RT;

			goto done;
		}
	}

	/*
	 * At the moment idling is implemented only for READ queues.
	 * If enabled on WRITE, this needs updating
	 */
	if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
		row_log(rd->dispatch_queue, "Delayed work pending. Exiting");
		goto done;
	}
check_idling:
	/* Check for (high priority) idling and enable if needed */
	for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) {
		if (rd->row_queues[i].idle_data.begin_idling &&
		    row_queues_def[i].idling_enabled)
			goto initiate_idling;
	}

	/* Regular priority queues */
	for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) {
		if (list_empty(&rd->row_queues[i].fifo)) {
			/* We can idle only if this is not a forced dispatch */
			if (rd->row_queues[i].idle_data.begin_idling &&
			    !force && row_queues_def[i].idling_enabled)
				goto initiate_idling;
		} else {
			if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_BE;
			goto done;
		}
	}

	if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE])
		ret = IOPRIO_CLASS_IDLE;
	goto done;

initiate_idling:
	hrtimer_start(&rd->rd_idle_data.hr_timer,
		ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC),
		HRTIMER_MODE_REL);

	rd->rd_idle_data.idling_queue_idx = i;
	row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i);

done:
	return ret;
}
Ejemplo n.º 28
0
static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
{
	spin_lock_irq(&cpu_base->lock);

	while (!list_empty(&cpu_base->cb_pending)) {
		enum hrtimer_restart (*fn)(struct hrtimer *);
		struct hrtimer *timer;
		int restart;
		int emulate_hardirq_ctx = 0;

		timer = list_entry(cpu_base->cb_pending.next,
				   struct hrtimer, cb_entry);

		debug_hrtimer_deactivate(timer);
		timer_stats_account_hrtimer(timer);

		fn = timer->function;
		/*
		 * A timer might have been added to the cb_pending list
		 * when it was migrated during a cpu-offline operation.
		 * Emulate hardirq context for such timers.
		 */
		if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
		    timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
			emulate_hardirq_ctx = 1;

		__remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
		spin_unlock_irq(&cpu_base->lock);

		if (unlikely(emulate_hardirq_ctx)) {
			local_irq_disable();
			restart = fn(timer);
			local_irq_enable();
		} else
			restart = fn(timer);

		spin_lock_irq(&cpu_base->lock);

		timer->state &= ~HRTIMER_STATE_CALLBACK;
		if (restart == HRTIMER_RESTART) {
			BUG_ON(hrtimer_active(timer));
			/*
			 * Enqueue the timer, allow reprogramming of the event
			 * device
			 */
			enqueue_hrtimer(timer, timer->base, 1);
		} else if (hrtimer_active(timer)) {
			/*
			 * If the timer was rearmed on another CPU, reprogram
			 * the event device.
			 */
			struct hrtimer_clock_base *base = timer->base;

			if (base->first == &timer->node &&
			    hrtimer_reprogram(timer, base)) {
				/*
				 * Timer is expired. Thus move it from tree to
				 * pending list again.
				 */
				__remove_hrtimer(timer, base,
						 HRTIMER_STATE_PENDING, 0);
				list_add_tail(&timer->cb_entry,
					      &base->cpu_base->cb_pending);
			}
		}
	}
	spin_unlock_irq(&cpu_base->lock);
}
Ejemplo n.º 29
0
static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
	ktime_t last_update, expires, now;
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
	u64 time_delta;
	int cpu;

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	now = tick_nohz_start_idle(cpu, ts);

	/*
	 * If this cpu is offline and it is the one which updates
	 * jiffies, then give up the assignment and let it be taken by
	 * the cpu which runs the tick timer next. If we don't drop
	 * this here the jiffies might be stale and do_timer() never
	 * invoked.
	 */
	if (unlikely(!cpu_online(cpu))) {
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
	}

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		return;

	if (need_resched())
		return;

	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
		static int ratelimit;

		if (ratelimit < 10) {
			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
			       (unsigned int) local_softirq_pending());
			ratelimit++;
		}
		return;
	}

	ts->idle_calls++;
	/* Read jiffies and the time when jiffies were updated last */
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;
		time_delta = timekeeping_max_deferment();
	} while (read_seqretry(&xtime_lock, seq));

	if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
	    arch_needs_cpu(cpu)) {
		next_jiffies = last_jiffies + 1;
		delta_jiffies = 1;
	} else {
		/* Get the next timer wheel timer */
		next_jiffies = get_next_timer_interrupt(last_jiffies);
		delta_jiffies = next_jiffies - last_jiffies;
	}
	/*
	 * Do not stop the tick, if we are only one off
	 * or if the cpu is required for rcu
	 */
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	/* Schedule the tick, if we are at least one jiffie off */
	if ((long)delta_jiffies >= 1) {

		/*
		 * If this cpu is the one which updates jiffies, then
		 * give up the assignment and let it be taken by the
		 * cpu which runs the tick timer next, which might be
		 * this cpu as well. If we don't drop this here the
		 * jiffies might be stale and do_timer() never
		 * invoked. Keep track of the fact that it was the one
		 * which had the do_timer() duty last. If this cpu is
		 * the one which had the do_timer() duty last, we
		 * limit the sleep time to the timekeeping
		 * max_deferement value which we retrieved
		 * above. Otherwise we can sleep as long as we want.
		 */
		if (cpu == tick_do_timer_cpu) {
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
			ts->do_timer_last = 1;
		} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
			time_delta = KTIME_MAX;
			ts->do_timer_last = 0;
		} else if (!ts->do_timer_last) {
			time_delta = KTIME_MAX;
		}

		/*
		 * calculate the expiry time for the next timer wheel
		 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
		 * that there is no timer pending or at least extremely
		 * far into the future (12 days for HZ=1000). In this
		 * case we set the expiry to the end of time.
		 */
		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
			/*
			 * Calculate the time delta for the next timer event.
			 * If the time delta exceeds the maximum time delta
			 * permitted by the current clocksource then adjust
			 * the time delta accordingly to ensure the
			 * clocksource does not wrap.
			 */
			time_delta = min_t(u64, time_delta,
					   tick_period.tv64 * delta_jiffies);
		}

		if (time_delta < KTIME_MAX)
			expires = ktime_add_ns(last_update, time_delta);
		else
			expires.tv64 = KTIME_MAX;

		/* Skip reprogram of event if its not changed */
		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
			goto out;

		/*
		 * nohz_stop_sched_tick can be called several times before
		 * the nohz_restart_sched_tick is called. This happens when
		 * interrupts arrive which do not cause a reschedule. In the
		 * first call we save the current tick time, so we can restart
		 * the scheduler tick in nohz_restart_sched_tick.
		 */
		if (!ts->tick_stopped) {
			select_nohz_load_balancer(1);
			calc_load_enter_idle();

			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
		}

		ts->idle_sleeps++;

		/* Mark expires */
		ts->idle_expires = expires;

		/*
		 * If the expiration time == KTIME_MAX, then
		 * in this case we simply stop the tick timer.
		 */
		 if (unlikely(expires.tv64 == KTIME_MAX)) {
			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
				hrtimer_cancel(&ts->sched_timer);
			goto out;
		}

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS_PINNED);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if (!tick_program_event(expires, 0))
				goto out;
		/*
		 * We are past the event already. So we crossed a
		 * jiffie boundary. Update jiffies and raise the
		 * softirq.
		 */
		tick_do_update_jiffies64(ktime_get());
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
}
Ejemplo n.º 30
0
int aat1277_flashlight_control(int mode)
{
	int ret = 0;
	uint32_t flash_ns = ktime_to_ns(ktime_get());

#if 0 /* disable flash_adj_value check now */
	if (this_fl_str->flash_adj_value == 2) {
		printk(KERN_WARNING "%s: force disable function!\n", __func__);
		return -EIO;
	}
#endif
#ifndef CONFIG_ARCH_MSM_FLASHLIGHT_DEATH_RAY
	if (this_fl_str->mode_status == mode) {
		FLT_INFO_LOG("%s: mode is same: %d\n",
							FLASHLIGHT_NAME, mode);

		if (!hrtimer_active(&this_fl_str->timer) &&
			this_fl_str->mode_status == FL_MODE_OFF) {
			FLT_INFO_LOG("flashlight hasn't been enable or" \
				" has already reset to 0 due to timeout\n");
			return ret;
		} else
			return -EINVAL;
	}
#endif
	spin_lock_irqsave(&this_fl_str->spin_lock,
						this_fl_str->spinlock_flags);
	if (this_fl_str->mode_status == FL_MODE_FLASH) {
		hrtimer_cancel(&this_fl_str->timer);
		flashlight_turn_off();
	}

	switch (mode) {
	case FL_MODE_OFF:
		flashlight_turn_off();

	break;
	case FL_MODE_TORCH:
		gpio_direction_output(this_fl_str->gpio_torch, 0);
		gpio_set_value(this_fl_str->torch_set1, 1);
		gpio_set_value(this_fl_str->torch_set2, 1);
		gpio_direction_output(this_fl_str->gpio_torch, 1);
		this_fl_str->mode_status = FL_MODE_TORCH;
		this_fl_str->fl_lcdev.brightness = LED_HALF;
	break;

	case FL_MODE_FLASH:
		gpio_direction_output(this_fl_str->gpio_flash, 1);
		this_fl_str->mode_status = FL_MODE_FLASH;
		this_fl_str->fl_lcdev.brightness = LED_FULL;

		hrtimer_start(&this_fl_str->timer,
			ktime_set(this_fl_str->flash_sw_timeout_ms / 1000,
				(this_fl_str->flash_sw_timeout_ms % 1000) *
					NSEC_PER_MSEC), HRTIMER_MODE_REL);
	break;
	case FL_MODE_PRE_FLASH:
		gpio_direction_output(this_fl_str->gpio_torch, 0);
		gpio_set_value(this_fl_str->torch_set1, 1);
		gpio_set_value(this_fl_str->torch_set2, 1);
		gpio_direction_output(this_fl_str->gpio_torch, 1);
		this_fl_str->mode_status = FL_MODE_PRE_FLASH;
		this_fl_str->fl_lcdev.brightness = LED_HALF + 1;
	break;
	case FL_MODE_TORCH_LEVEL_1:
		gpio_direction_output(this_fl_str->gpio_torch, 0);
		gpio_set_value(this_fl_str->torch_set1, 0);
		gpio_set_value(this_fl_str->torch_set2, 0);
		gpio_direction_output(this_fl_str->gpio_torch, 1);
		this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_1;
		this_fl_str->fl_lcdev.brightness = LED_HALF - 2;
	break;
	case FL_MODE_TORCH_LEVEL_2:
		gpio_direction_output(this_fl_str->gpio_torch, 0);
		gpio_set_value(this_fl_str->torch_set1, 0);
		gpio_set_value(this_fl_str->torch_set2, 1);
		gpio_direction_output(this_fl_str->gpio_torch, 1);
		this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_2;
		this_fl_str->fl_lcdev.brightness = LED_HALF - 1;
	break;
#ifdef CONFIG_ARCH_MSM_FLASHLIGHT_DEATH_RAY
	case FL_MODE_DEATH_RAY:
		pr_info("%s: death ray\n", __func__);
		hrtimer_cancel(&this_fl_str->timer);
		gpio_direction_output(this_fl_str->gpio_flash, 0);
		udelay(40);
		gpio_direction_output(this_fl_str->gpio_flash, 1);
		this_fl_str->mode_status = 0;
		this_fl_str->fl_lcdev.brightness = 3;
	break;
#endif
	default:
		FLT_ERR_LOG("%s: unknown flash_light flags: %d\n",
							__func__, mode);
		ret = -EINVAL;
	break;
	}

	FLT_INFO_LOG("%s: mode: %d, %u\n", FLASHLIGHT_NAME, mode,
		flash_ns/(1000*1000));

	spin_unlock_irqrestore(&this_fl_str->spin_lock,
						this_fl_str->spinlock_flags);
	return ret;
}