static int devalarm_try_to_cancel(struct devalarm *alrm)
{
	if (is_wakeup(alrm->type))
		return alarm_try_to_cancel(&alrm->u.alrm);
	return hrtimer_try_to_cancel(&alrm->u.hrt);
}
Ejemplo n.º 2
0
static irqreturn_t _DPI_InterruptHandler(int irq, void *dev_id)
{   
    static int counter = 0;
    DPI_REG_INTERRUPT status = DPI_REG->INT_STATUS;
//    if (status.FIFO_EMPTY) ++ counter;

    if(status.VSYNC)
    {
        if(dpiIntCallback)
           dpiIntCallback(DISP_DPI_VSYNC_INT);
#ifndef BUILD_UBOOT
		if(atomic_read(&wait_dpi_vsync)){
			if(-1 != hrtimer_try_to_cancel(&hrtimer_vsync_dpi)){
				atomic_set(&wait_dpi_vsync, 0);
				atomic_set(&dpi_vsync, 1);
				wake_up_interruptible(&_vsync_wait_queue_dpi);
				hrtimer_start(&hrtimer_vsync_dpi, ktime_set(0, VSYNC_US_TO_NS(vsync_timer_dpi)), HRTIMER_MODE_REL);
			}
		}
#endif
    }

    if (status.VSYNC && counter) {
        DISP_LOG_PRINT(ANDROID_LOG_ERROR, "DPI", "[Error] DPI FIFO is empty, "
               "received %d times interrupt !!!\n", counter);
        counter = 0;
    }

	 if (status.FIFO_EMPTY)
	 {	 		
	 	int need_reset = 0;
	 	unsigned long long temp = sched_clock();

		unsigned int debug_while_loop_cnt = 0;
		volatile unsigned int dsi_state = INREG32(DSI_BASE+0x154);
		if((dsi_state & 0x1ff) == 0x80)
		{
			auto_sync_reset_count++;
			if(auto_sync_reset_count == 10)
			{
				auto_sync_reset_count = 0;
				need_reset = 2;
			}
		}
		else
		{
			auto_sync_reset_count = 0;
		}
		
		//printk("gmce,0x%08x, %d\n",(INREG32(DSI_BASE+0x154))&0x1ff, (unsigned int)(temp - last_fifo_empty_stamp));
		if(_fifo_empty_monitor_insert((unsigned int)(temp - last_fifo_empty_stamp)))
		{
			need_reset = 1;
		}
		
		last_fifo_empty_stamp = temp;
		if(need_reset)
		{
			unsigned int mode, suspend;

			mode = DSI_GetMode();
			suspend = DISP_GetSuspendMode();
			if ((mode != CMD_MODE) && !suspend)
			{      	
			DPI_DisableClk();
			#if 0
			while(1)
			{
				debug_while_loop_cnt++;
				dsi_state = INREG32(DSI_BASE+0x154);
				if((dsi_state &0x1ff) == 0x100) break;
				if(debug_while_loop_cnt > 0x1000000) 
				{
					printk("FATAL Error!! dsi in vact when dpi fifo empty, and can't into vfp until 0x100000 loops!!\n");
				}
	 }
			#endif

			DSI_clk_HS_mode(0);
			DSI_SetMode(CMD_MODE);
			DSI_Reset();
			DSI_SetMode(SYNC_PULSE_VDO_MODE);
			DSI_clk_HS_mode(1);
			DPI_EnableClk();
			DSI_EnableClk();
			}
			printk("[DSI/DPI]reset[%d] mode[%d], suspend[%d]\n", need_reset, mode, suspend);
			need_reset = 0;
		}		
	}
    _DPI_LogRefreshRate(status);
	OUTREG32(&DPI_REG->INT_STATUS, 0);
    return IRQ_HANDLED;
}
/*
 * row_get_ioprio_class_to_serve() - Return the next I/O priority
 *				      class to dispatch requests from
 * @rd:	pointer to struct row_data
 * @force:	flag indicating if forced dispatch
 *
 * This function returns the next I/O priority class to serve
 * {IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE}.
 * If there are no more requests in scheduler or if we're idling on some queue
 * IOPRIO_CLASS_NONE will be returned.
 * If idling is scheduled on a lower priority queue than the one that needs
 * to be served, it will be canceled.
 *
 */
static int row_get_ioprio_class_to_serve(struct row_data *rd, int force)
{
	int i;
	int ret = IOPRIO_CLASS_NONE;

	if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) {
		row_log(rd->dispatch_queue, "No more requests in scheduler");
		goto check_idling;
	}

	/* First, go over the high priority queues */
	for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) {
		if (!list_empty(&rd->row_queues[i].fifo)) {
			if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
				if (hrtimer_try_to_cancel(
					&rd->rd_idle_data.hr_timer) >= 0) {
					row_log(rd->dispatch_queue,
					"Canceling delayed work on %d. RT pending",
					     rd->rd_idle_data.idling_queue_idx);
					rd->rd_idle_data.idling_queue_idx =
						ROWQ_MAX_PRIO;
				}
			}

			if (row_regular_req_pending(rd) &&
			    (rd->reg_prio_starvation.starvation_counter >=
			     rd->reg_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_BE;
			else if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_RT;

			goto done;
		}
	}

	/*
	 * At the moment idling is implemented only for READ queues.
	 * If enabled on WRITE, this needs updating
	 */
	if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
		row_log(rd->dispatch_queue, "Delayed work pending. Exiting");
		goto done;
	}
check_idling:
	/* Check for (high priority) idling and enable if needed */
	for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) {
		if (rd->row_queues[i].idle_data.begin_idling &&
		    row_queues_def[i].idling_enabled)
			goto initiate_idling;
	}

	/* Regular priority queues */
	for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) {
		if (list_empty(&rd->row_queues[i].fifo)) {
			/* We can idle only if this is not a forced dispatch */
			if (rd->row_queues[i].idle_data.begin_idling &&
			    !force && row_queues_def[i].idling_enabled)
				goto initiate_idling;
		} else {
			if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_BE;
			goto done;
		}
	}

	if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE])
		ret = IOPRIO_CLASS_IDLE;
	goto done;

initiate_idling:
	hrtimer_start(&rd->rd_idle_data.hr_timer,
		ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC),
		HRTIMER_MODE_REL);

	rd->rd_idle_data.idling_queue_idx = i;
	row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i);

done:
	return ret;
}
Ejemplo n.º 4
0
MHI_STATUS mhi_process_link_down(mhi_device_ctxt *mhi_dev_ctxt)
{
	unsigned long flags;
	int r;
	mhi_log(MHI_MSG_INFO, "Entered.\n");
	if (NULL == mhi_dev_ctxt)
		return MHI_STATUS_ERROR;

	mhi_notify_clients(mhi_dev_ctxt,
			MHI_CB_MHI_DISABLED);
	write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
	mhi_dev_ctxt->flags.mhi_initialized = 0;
	mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
	mhi_deassert_device_wake(mhi_dev_ctxt);
	write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);

	r = cancel_delayed_work_sync(&mhi_dev_ctxt->m3_work);
	if (r) {
		atomic_set(&mhi_dev_ctxt->flags.m3_work_enabled, 0);
		mhi_log(MHI_MSG_INFO, "M3 work cancelled\n");
	}

	r = cancel_work_sync(&mhi_dev_ctxt->m0_work);
	if (r) {
		atomic_set(&mhi_dev_ctxt->flags.m0_work_enabled, 0);
		mhi_log(MHI_MSG_INFO, "M0 work cancelled\n");
	}
	mhi_dev_ctxt->flags.stop_threads = 1;

	while(!mhi_dev_ctxt->ev_thread_stopped) {
		wake_up_interruptible(mhi_dev_ctxt->event_handle);
		mhi_log(MHI_MSG_INFO,
				"Waiting for threads to SUSPEND EVT: %d, STT: %d\n",
				mhi_dev_ctxt->st_thread_stopped,
				mhi_dev_ctxt->ev_thread_stopped);
		msleep(20);
	}

	switch(hrtimer_try_to_cancel(&mhi_dev_ctxt->m1_timer))
	{
		case 0:
			mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
					"Timer was not active\n");
			break;
		case 1:
			mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
					"Timer was active\n");
			break;
		case -1:
			mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
					"Timer executing and can't stop\n");
	}
	r = mhi_set_bus_request(mhi_dev_ctxt, 0);
	if (r)
		mhi_log(MHI_MSG_INFO,
				"Failed to scale bus request to sleep set.\n");
	mhi_turn_off_pcie_link(mhi_dev_ctxt);
	mhi_dev_ctxt->dev_info->link_down_cntr++;
	atomic_set(&mhi_dev_ctxt->flags.data_pending, 0);
	mhi_log(MHI_MSG_INFO, "Exited.\n");

	return MHI_STATUS_SUCCESS;
}
/*
 * row_add_request() - Add request to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 */
static void row_add_request(struct request_queue *q,
			    struct request *rq)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	struct row_queue *rqueue = RQ_ROWQ(rq);
	s64 diff_ms;
	bool queue_was_empty = list_empty(&rqueue->fifo);
	unsigned long bv_page_flags = 0;

	if (rq->bio && rq->bio->bi_io_vec && rq->bio->bi_io_vec->bv_page)
		bv_page_flags = rq->bio->bi_io_vec->bv_page->flags;

	list_add_tail(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rqueue->nr_req++;
	rq_set_fifo_time(rq, jiffies); /* for statistics*/

	if (rq->cmd_flags & REQ_URGENT) {
		WARN_ON(1);
		blk_dump_rq_flags(rq, "");
		rq->cmd_flags &= ~REQ_URGENT;
	}

	if (row_queues_def[rqueue->prio].idling_enabled) {
		if (rd->rd_idle_data.idling_queue_idx == rqueue->prio &&
		    hrtimer_active(&rd->rd_idle_data.hr_timer)) {
			if (hrtimer_try_to_cancel(
				&rd->rd_idle_data.hr_timer) >= 0) {
				row_log_rowq(rd, rqueue->prio,
				    "Canceled delayed work on %d",
				    rd->rd_idle_data.idling_queue_idx);
				rd->rd_idle_data.idling_queue_idx =
					ROWQ_MAX_PRIO;
			}
		}
		diff_ms = ktime_to_ms(ktime_sub(ktime_get(),
				rqueue->idle_data.last_insert_time));
		if (unlikely(diff_ms < 0)) {
			pr_err("%s(): time delta error: diff_ms < 0",
				__func__);
			rqueue->idle_data.begin_idling = false;
			return;
		}

		if ((bv_page_flags & (1L << PG_readahead)) ||
		    (diff_ms < rd->rd_idle_data.freq_ms)) {
			rqueue->idle_data.begin_idling = true;
			row_log_rowq(rd, rqueue->prio, "Enable idling");
		} else {
			rqueue->idle_data.begin_idling = false;
			row_log_rowq(rd, rqueue->prio, "Disable idling (%ldms)",
				(long)diff_ms);
		}

		rqueue->idle_data.last_insert_time = ktime_get();
	}
	if (row_queues_def[rqueue->prio].is_urgent &&
	    !rd->pending_urgent_rq && !rd->urgent_in_flight) {
		/* Handle High Priority queues */
		if (rqueue->prio < ROWQ_REG_PRIO_IDX &&
		    rd->last_served_ioprio_class != IOPRIO_CLASS_RT &&
		    queue_was_empty) {
			row_log_rowq(rd, rqueue->prio,
				"added (high prio) urgent request");
			rq->cmd_flags |= REQ_URGENT;
			rd->pending_urgent_rq = rq;
		} else  if (row_rowq_unserved(rd, rqueue->prio)) {
			/* Handle Regular priotity queues */
			row_log_rowq(rd, rqueue->prio,
				"added urgent request (total on queue=%d)",
				rqueue->nr_req);
			rq->cmd_flags |= REQ_URGENT;
			rd->pending_urgent_rq = rq;
		}
	} else
		row_log_rowq(rd, rqueue->prio,
			"added request (total on queue=%d)", rqueue->nr_req);
}
Ejemplo n.º 6
0
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
{
    struct task_struct *tsk = current;
    struct hrtimer *timer;
    ktime_t expires;
    cputime_t cval, cinterval, nval, ninterval;

    /*
     * Validate the timevals in value.
     */
    if (!timeval_valid(&value->it_value) ||
            !timeval_valid(&value->it_interval))
        return -EINVAL;

    switch (which) {
    case ITIMER_REAL:
again:
        spin_lock_irq(&tsk->sighand->siglock);
        timer = &tsk->signal->real_timer;
        if (ovalue) {
            ovalue->it_value = itimer_get_remtime(timer);
            ovalue->it_interval
                = ktime_to_timeval(tsk->signal->it_real_incr);
        }
        /* We are sharing ->siglock with it_real_fn() */
        if (hrtimer_try_to_cancel(timer) < 0) {
            spin_unlock_irq(&tsk->sighand->siglock);
            hrtimer_wait_for_timer(&tsk->signal->real_timer);
            goto again;
        }
        expires = timeval_to_ktime(value->it_value);
        if (expires.tv64 != 0) {
            tsk->signal->it_real_incr =
                timeval_to_ktime(value->it_interval);
            hrtimer_start(timer, expires, HRTIMER_MODE_REL);
        } else
            tsk->signal->it_real_incr.tv64 = 0;

        spin_unlock_irq(&tsk->sighand->siglock);
        break;
    case ITIMER_VIRTUAL:
        nval = timeval_to_cputime(&value->it_value);
        ninterval = timeval_to_cputime(&value->it_interval);
        spin_lock_irq(&tsk->sighand->siglock);
        cval = tsk->signal->it_virt_expires;
        cinterval = tsk->signal->it_virt_incr;
        if (!cputime_eq(cval, cputime_zero) ||
                !cputime_eq(nval, cputime_zero)) {
            if (cputime_gt(nval, cputime_zero))
                nval = cputime_add(nval,
                                   jiffies_to_cputime(1));
            set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
                                  &nval, &cval);
        }
        tsk->signal->it_virt_expires = nval;
        tsk->signal->it_virt_incr = ninterval;
        spin_unlock_irq(&tsk->sighand->siglock);
        if (ovalue) {
            cputime_to_timeval(cval, &ovalue->it_value);
            cputime_to_timeval(cinterval, &ovalue->it_interval);
        }
        break;
    case ITIMER_PROF:
        nval = timeval_to_cputime(&value->it_value);
        ninterval = timeval_to_cputime(&value->it_interval);
        spin_lock_irq(&tsk->sighand->siglock);
        cval = tsk->signal->it_prof_expires;
        cinterval = tsk->signal->it_prof_incr;
        if (!cputime_eq(cval, cputime_zero) ||
                !cputime_eq(nval, cputime_zero)) {
            if (cputime_gt(nval, cputime_zero))
                nval = cputime_add(nval,
                                   jiffies_to_cputime(1));
            set_process_cpu_timer(tsk, CPUCLOCK_PROF,
                                  &nval, &cval);
        }
        tsk->signal->it_prof_expires = nval;
        tsk->signal->it_prof_incr = ninterval;
        spin_unlock_irq(&tsk->sighand->siglock);
        if (ovalue) {
            cputime_to_timeval(cval, &ovalue->it_value);
            cputime_to_timeval(cinterval, &ovalue->it_interval);
        }
        break;
    default:
        return -EINVAL;
    }
    return 0;
}
Ejemplo n.º 7
0
static int row_get_ioprio_class_to_serve(struct row_data *rd, int force)
{
	int i;
	int ret = IOPRIO_CLASS_NONE;

	if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) {
		row_log(rd->dispatch_queue, "No more requests in scheduler");
		goto check_idling;
	}

	
	for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) {
		if (!list_empty(&rd->row_queues[i].fifo)) {
			if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
				if (hrtimer_try_to_cancel(
					&rd->rd_idle_data.hr_timer) >= 0) {
					row_log(rd->dispatch_queue,
					"Canceling delayed work on %d. RT pending",
					     rd->rd_idle_data.idling_queue_idx);
					rd->rd_idle_data.idling_queue_idx =
						ROWQ_MAX_PRIO;
				}
			}

			if (row_regular_req_pending(rd) &&
			    (rd->reg_prio_starvation.starvation_counter >=
			     rd->reg_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_BE;
			else if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_RT;

			goto done;
		}
	}

	if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
		row_log(rd->dispatch_queue, "Delayed work pending. Exiting");
		goto done;
	}
check_idling:
	
	for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) {
		if (rd->row_queues[i].idle_data.begin_idling &&
		    row_queues_def[i].idling_enabled)
			goto initiate_idling;
	}

	
	for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) {
		if (list_empty(&rd->row_queues[i].fifo)) {
			
			if (rd->row_queues[i].idle_data.begin_idling &&
			    !force && row_queues_def[i].idling_enabled)
				goto initiate_idling;
		} else {
			if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_BE;
			goto done;
		}
	}

	if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE])
		ret = IOPRIO_CLASS_IDLE;
	goto done;

initiate_idling:
	hrtimer_start(&rd->rd_idle_data.hr_timer,
		ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC),
		HRTIMER_MODE_REL);

	rd->rd_idle_data.idling_queue_idx = i;
	row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i);

done:
	return ret;
}
Ejemplo n.º 8
0
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
{
	struct task_struct *tsk = current;
	struct hrtimer *timer;
	ktime_t expires;
	cputime_t cval, cinterval, nval, ninterval;

	/*
	 * Validate the timevals in value.
	 *
	 * Note: Although the spec requires that invalid values shall
	 * return -EINVAL, we just fixup the value and print a limited
	 * number of warnings in order not to break users of this
	 * historical misfeature.
	 *
	 * Scheduled for replacement in March 2007
	 */
	check_itimerval(value);

	switch (which) {
	case ITIMER_REAL:
again:
		spin_lock_irq(&tsk->sighand->siglock);
		timer = &tsk->signal->real_timer;
		if (ovalue) {
			ovalue->it_value = itimer_get_remtime(timer);
			ovalue->it_interval
				= ktime_to_timeval(tsk->signal->it_real_incr);
		}
		/* We are sharing ->siglock with it_real_fn() */
		if (hrtimer_try_to_cancel(timer) < 0) {
			spin_unlock_irq(&tsk->sighand->siglock);
			goto again;
		}
		tsk->signal->it_real_incr =
			timeval_to_ktime(value->it_interval);
		expires = timeval_to_ktime(value->it_value);
		if (expires.tv64 != 0)
			hrtimer_start(timer, expires, HRTIMER_REL);
		spin_unlock_irq(&tsk->sighand->siglock);
		break;
	case ITIMER_VIRTUAL:
		nval = timeval_to_cputime(&value->it_value);
		ninterval = timeval_to_cputime(&value->it_interval);
		read_lock(&tasklist_lock);
		spin_lock_irq(&tsk->sighand->siglock);
		cval = tsk->signal->it_virt_expires;
		cinterval = tsk->signal->it_virt_incr;
		if (!cputime_eq(cval, cputime_zero) ||
		    !cputime_eq(nval, cputime_zero)) {
			if (cputime_gt(nval, cputime_zero))
				nval = cputime_add(nval,
						   jiffies_to_cputime(1));
			set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
					      &nval, &cval);
		}
		tsk->signal->it_virt_expires = nval;
		tsk->signal->it_virt_incr = ninterval;
		spin_unlock_irq(&tsk->sighand->siglock);
		read_unlock(&tasklist_lock);
		if (ovalue) {
			cputime_to_timeval(cval, &ovalue->it_value);
			cputime_to_timeval(cinterval, &ovalue->it_interval);
		}
		break;
	case ITIMER_PROF:
		nval = timeval_to_cputime(&value->it_value);
		ninterval = timeval_to_cputime(&value->it_interval);
		read_lock(&tasklist_lock);
		spin_lock_irq(&tsk->sighand->siglock);
		cval = tsk->signal->it_prof_expires;
		cinterval = tsk->signal->it_prof_incr;
		if (!cputime_eq(cval, cputime_zero) ||
		    !cputime_eq(nval, cputime_zero)) {
			if (cputime_gt(nval, cputime_zero))
				nval = cputime_add(nval,
						   jiffies_to_cputime(1));
			set_process_cpu_timer(tsk, CPUCLOCK_PROF,
					      &nval, &cval);
		}
		tsk->signal->it_prof_expires = nval;
		tsk->signal->it_prof_incr = ninterval;
		spin_unlock_irq(&tsk->sighand->siglock);
		read_unlock(&tasklist_lock);
		if (ovalue) {
			cputime_to_timeval(cval, &ovalue->it_value);
			cputime_to_timeval(cinterval, &ovalue->it_interval);
		}
		break;
	default:
		return -EINVAL;
	}
	return 0;
}
static void pm8xxx_vib_enable(struct timed_output_dev *dev, int value)
{
	struct pm8xxx_vib *vib = container_of(dev, struct pm8xxx_vib,
					 timed_dev);
	unsigned long flags;

/* LGE_CHANGE */
	int origin_value;
#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_REST_POWER
	struct timeval current_tv;
	struct timeval interval_tv;
#endif

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE
	int over_ms = vib->overdrive_ms;
#endif

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT
	spin_lock_irqsave(&vib->lock, flags);
	if (value == 0 && vib->pre_value <= vib->min_timeout_ms) {
		spin_unlock_irqrestore(&vib->lock, flags);
		return;
	}
	spin_unlock_irqrestore(&vib->lock, flags);
#endif

/* LGE_CHANGE */
    if(unlikely(debug_mask))
        printk(KERN_INFO "pm8xxx_vib_enable value:%d\n",value);

retry:
	spin_lock_irqsave(&vib->lock, flags);
	if (hrtimer_try_to_cancel(&vib->vib_timer) < 0) {
		spin_unlock_irqrestore(&vib->lock, flags);
		cpu_relax();
		goto retry;
	}

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE
	if (hrtimer_try_to_cancel(&vib->vib_overdrive_timer) < 0) {
		spin_unlock_irqrestore(&vib->lock, flags);
		cpu_relax();
		goto retry;
	}
#endif

/* LGE_CHANGE */
	origin_value = value;

	if (value == 0)
		vib->state = 0;
	else {
		/* Set Min Timeout for normal fuction */
#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT
		value = (value < vib->min_timeout_ms ?
			vib->min_timeout_ms : value);
#endif

		value = (value > vib->pdata->max_timeout_ms ?
				 vib->pdata->max_timeout_ms : value);
		vib->state = 1;

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT
		vib->pre_value = value;
#endif

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE
		if(vib->overdrive_ms > 0 && value <= vib->overdrive_range_ms) {

			vib->remain_vib_ms = value - over_ms;
			vib->level = vib->max_level_mv / 100;
			vib->active_level = vib->request_level;

            if(unlikely(debug_mask))
                printk(KERN_INFO "start overdrive over_level:%d over_ms:%d \n",vib->level,over_ms);

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_REST_POWER
			do_gettimeofday(&current_tv);
			if(vib->vib_state) { // vibrator is working now.
				struct timeval min_timeout_tv;
				min_timeout_tv.tv_sec = vib->min_timeout_ms / 1000;
				min_timeout_tv.tv_usec = (vib->min_timeout_ms % 1000) * 1000;

				get_timeval_interval(&current_tv, &(vib->start_tv), &interval_tv);
                if(unlikely(debug_mask)) {
                    printk(KERN_INFO "vib_state is true, cur:%ld.%06ld, sta:%ld.%06ld, itv:%ld.%06ld\n",
                            current_tv.tv_sec, current_tv.tv_usec,
                            vib->start_tv.tv_sec, vib->start_tv.tv_usec,
                            interval_tv.tv_sec, interval_tv.tv_usec );
                }

				// if greater than min_timeout, no need over drive and min time.
				if(compare_timeval_interval(&interval_tv, &min_timeout_tv)==1) {
					value = origin_value;
                    if(unlikely(debug_mask))
                        printk(KERN_INFO "interval greater than min_timeout, start normal vib %dms\n",value);
					goto NORMAL_VIB_START;
				}
				// if less than min_timeout, need corrected value
				else {
					int interval_ms;
					interval_ms = (interval_tv.tv_sec * 1000) + (interval_tv.tv_usec / 1000000);
					if(over_ms > interval_ms) {
						over_ms = over_ms - interval_ms;
						vib->remain_vib_ms = origin_value;
                        if(unlikely(debug_mask))
                            printk(KERN_INFO "interval less than min_timeout, start overdrive %dms, remain %dms\n", over_ms, vib->remain_vib_ms);
						goto OVERDRIVE_VIB_START;
					}
					else
					{
						value = value - interval_ms;
                        if(unlikely(debug_mask))
                            printk(KERN_INFO "interval less than min_timeout, start normal vib %dms\n",value);
						goto NORMAL_VIB_START;
					}
				}
			}
			else { // vibrator is not working now.
				struct timeval min_stop_tv;
				min_stop_tv.tv_sec = vib->min_stop_ms / 1000;
				min_stop_tv.tv_usec = (vib->min_stop_ms % 1000) * 1000;

				get_timeval_interval(&current_tv, &(vib->stop_tv), &interval_tv);

                if(unlikely(debug_mask)) {
                    printk(KERN_INFO "vib_state is false, cur:%ld.%06ld, sto:%ld.%06ld, itv:%ld.%06ld\n",
                            current_tv.tv_sec, current_tv.tv_usec,
                            vib->stop_tv.tv_sec, vib->stop_tv.tv_usec,
                            interval_tv.tv_sec, interval_tv.tv_usec );
                }

				// if greater than min_stop_tv, start vibration over drive and value.
				if(compare_timeval_interval(&interval_tv, &min_stop_tv)==1) {
                    if(unlikely(debug_mask))
                        printk(KERN_INFO "greater than min_stop_timeout, start overdrive %dms, remain %dms\n", over_ms, vib->remain_vib_ms);
					goto OVERDRIVE_VIB_START;
				}
				// if less than min_stop_tv, reduce over drive time.
				else {
					int interval_ms;
					interval_ms = (interval_tv.tv_sec * 1000) + (interval_tv.tv_usec / 1000);
					over_ms = interval_ms / (vib->min_stop_ms / vib->overdrive_ms) / 2;
					vib->remain_vib_ms = (value - over_ms) / 2;

                    if(unlikely(debug_mask))
                        printk(KERN_INFO "less than min_stop_timeout, start overdrive %dms, remain %dms\n", over_ms, vib->remain_vib_ms);
					goto OVERDRIVE_VIB_START;
				}
			}
#else
			goto OVERDRIVE_VIB_START;
#endif
		}
		else
#endif
		{
			goto NORMAL_VIB_START;
		}
	}

NORMAL_VIB_START:
#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_VOL
	vib->level = vib->request_level;
#else
	vib->level = vib->default_level;
#endif
	hrtimer_start(&vib->vib_timer,
		ktime_set(value / 1000, (value % 1000) * 1000000),
		HRTIMER_MODE_REL);

	goto FINISH_VIB_ENABLE;

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE
OVERDRIVE_VIB_START:
	hrtimer_start(&vib->vib_overdrive_timer,
		ktime_set(over_ms / 1000, (over_ms % 1000) * 1000000),
		HRTIMER_MODE_REL);
#endif

FINISH_VIB_ENABLE:
	spin_unlock_irqrestore(&vib->lock, flags);
	schedule_work(&vib->work);
}