static ssize_t k3g_set_enable(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t size)
{
	int err = 0;
	struct k3g_data *k3g_data  = dev_get_drvdata(dev);
	bool new_enable;

	gyro_debug("GYRO sensor enter function  %s\n",__FUNCTION__);
	if (sysfs_streq(buf, "1"))
		new_enable = true;
	else if (sysfs_streq(buf, "0"))
		new_enable = false;
	else {
		pr_debug("%s: invalid value %d\n", __func__, *buf);
		return -EINVAL;
	}

	if (new_enable == k3g_data->enable)
		return size;

	mutex_lock(&k3g_data->lock);
	if (new_enable) {
		/* turning on */
		err = i2c_smbus_write_i2c_block_data(k3g_data->client,
			CTRL_REG1 | AC, sizeof(k3g_data->ctrl_regs),
						k3g_data->ctrl_regs);
		if (err < 0) {
			err = -EIO;
			goto unlock;
		}

		/* reset fifo entries */
		err = k3g_restart_fifo(k3g_data);
		if (err < 0) {
			err = -EIO;
			goto turn_off;
		}

		k3g_data->timer_enable = 1;
		k3g_data->work_enable = 1;

		if (k3g_data->interruptible)
			enable_irq(k3g_data->client->irq);
		else {
			set_polling_delay(k3g_data, 0);
			hrtimer_start(&k3g_data->timer,
				k3g_data->polling_delay, HRTIMER_MODE_REL);
		}
	} else {
		k3g_data->timer_enable = 0;
		k3g_data->work_enable = 0;
		if (k3g_data->interruptible)
			disable_irq(k3g_data->client->irq);
		else {
			hrtimer_cancel(&k3g_data->timer);
			cancel_work_sync(&k3g_data->work);
		}
		/* turning off */
		err = i2c_smbus_write_byte_data(k3g_data->client,
						CTRL_REG1, 0x00);
		if (err < 0)
			goto unlock;
	}
	k3g_data->enable = new_enable;

turn_off:
	if (err < 0)
		i2c_smbus_write_byte_data(k3g_data->client,
						CTRL_REG1, 0x00);
unlock:
	mutex_unlock(&k3g_data->lock);

	return err ? err : size;
}
Beispiel #2
0
/*
 * row_get_ioprio_class_to_serve() - Return the next I/O priority
 *				      class to dispatch requests from
 * @rd:	pointer to struct row_data
 * @force:	flag indicating if forced dispatch
 *
 * This function returns the next I/O priority class to serve
 * {IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE}.
 * If there are no more requests in scheduler or if we're idling on some queue
 * IOPRIO_CLASS_NONE will be returned.
 * If idling is scheduled on a lower priority queue than the one that needs
 * to be served, it will be canceled.
 *
 */
static int row_get_ioprio_class_to_serve(struct row_data *rd, int force)
{
	int i;
	int ret = IOPRIO_CLASS_NONE;

	if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) {
		row_log(rd->dispatch_queue, "No more requests in scheduler");
		goto check_idling;
	}

	/* First, go over the high priority queues */
	for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) {
		if (!list_empty(&rd->row_queues[i].fifo)) {
			if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
				if (hrtimer_try_to_cancel(
					&rd->rd_idle_data.hr_timer) >= 0) {
					row_log(rd->dispatch_queue,
					"Canceling delayed work on %d. RT pending",
					     rd->rd_idle_data.idling_queue_idx);
					rd->rd_idle_data.idling_queue_idx =
						ROWQ_MAX_PRIO;
				}
			}

			if (row_regular_req_pending(rd) &&
			    (rd->reg_prio_starvation.starvation_counter >=
			     rd->reg_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_BE;
			else if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_RT;

			goto done;
		}
	}

	/*
	 * At the moment idling is implemented only for READ queues.
	 * If enabled on WRITE, this needs updating
	 */
	if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
		row_log(rd->dispatch_queue, "Delayed work pending. Exiting");
		goto done;
	}
check_idling:
	/* Check for (high priority) idling and enable if needed */
	for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) {
		if (rd->row_queues[i].idle_data.begin_idling &&
		    row_queues_def[i].idling_enabled)
			goto initiate_idling;
	}

	/* Regular priority queues */
	for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) {
		if (list_empty(&rd->row_queues[i].fifo)) {
			/* We can idle only if this is not a forced dispatch */
			if (rd->row_queues[i].idle_data.begin_idling &&
			    !force && row_queues_def[i].idling_enabled)
				goto initiate_idling;
		} else {
			if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_BE;
			goto done;
		}
	}

	if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE])
		ret = IOPRIO_CLASS_IDLE;
	goto done;

initiate_idling:
	hrtimer_start(&rd->rd_idle_data.hr_timer,
		ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC),
		HRTIMER_MODE_REL);

	rd->rd_idle_data.idling_queue_idx = i;
	row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i);

done:
	return ret;
}
Beispiel #3
0
static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
{
	int out, in;
	int key_index;
	int gpio;
	struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
	struct gpio_event_matrix_info *mi = kp->keypad_info;
	unsigned gpio_keypad_flags = mi->flags;
	unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);

	out = kp->current_output;
	if (out == mi->noutputs) {
		out = 0;
		kp->last_key_state_changed = kp->key_state_changed;
		kp->key_state_changed = 0;
		kp->some_keys_pressed = 0;
	} else {
		key_index = out * mi->ninputs;
		for (in = 0; in < mi->ninputs; in++, key_index++) {
			gpio = mi->input_gpios[in];
			if (gpio_get_value(gpio) ^ !polarity) {
				if (kp->some_keys_pressed < 3)
					kp->some_keys_pressed++;
				kp->key_state_changed |= !__test_and_set_bit(
						key_index, kp->keys_pressed);
			} else
				kp->key_state_changed |= __test_and_clear_bit(
						key_index, kp->keys_pressed);
		}
		gpio = mi->output_gpios[out];
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(gpio, !polarity);
		else
			gpio_direction_input(gpio);
		out++;
	}
	kp->current_output = out;
	if (out < mi->noutputs) {
		gpio = mi->output_gpios[out];
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(gpio, polarity);
		else
			gpio_direction_output(gpio, polarity);
		hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
		return HRTIMER_NORESTART;
	}
	if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
		if (kp->key_state_changed) {
			hrtimer_start(&kp->timer, mi->debounce_delay,
				      HRTIMER_MODE_REL);
			return HRTIMER_NORESTART;
		}
		kp->key_state_changed = kp->last_key_state_changed;
	}
	if (kp->key_state_changed) {
		if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
			remove_phantom_keys(kp);
		key_index = 0;
		for (out = 0; out < mi->noutputs; out++)
			for (in = 0; in < mi->ninputs; in++, key_index++)
				report_key(kp, key_index, out, in);
	}
	if (!kp->use_irq || kp->some_keys_pressed) {
		hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
		return HRTIMER_NORESTART;
	}

	/* No keys are pressed, reenable interrupt */
	for (out = 0; out < mi->noutputs; out++) {
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(mi->output_gpios[out], polarity);
		else
			gpio_direction_output(mi->output_gpios[out], polarity);
	}
	for (in = 0; in < mi->ninputs; in++)
		enable_irq(gpio_to_irq(mi->input_gpios[in]));
	wake_unlock(&kp->wake_lock);
	return HRTIMER_NORESTART;
}
Beispiel #4
0
static enum hrtimer_restart sbd_tx_timer_func(struct hrtimer *timer)
{
	struct mem_link_device *mld;
	struct link_device *ld;
	struct modem_ctl *mc;
	struct sbd_link_device *sl;
	int i;
	bool need_schedule;
	u16 mask;
	unsigned long flags = 0;

	mld = container_of(timer, struct mem_link_device, sbd_tx_timer);
	ld = &mld->link_dev;
	mc = ld->mc;
	sl = &mld->sbd_link_dev;

	need_schedule = false;
	mask = 0;

	spin_lock_irqsave(&mc->lock, flags);
	if (unlikely(!ipc_active(mld))) {
		spin_unlock_irqrestore(&mc->lock, flags);
		goto exit;
	}
	spin_unlock_irqrestore(&mc->lock, flags);

	if (mld->link_active) {
		if (!mld->link_active(mld)) {
			need_schedule = true;
			goto exit;
		}
	}

	for (i = 0; i < sl->num_channels; i++) {
		struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, TX);
		int ret;

		ret = tx_frames_to_rb(rb);

		if (unlikely(ret < 0)) {
			if (ret == -EBUSY || ret == -ENOSPC) {
				need_schedule = true;
				mask = MASK_SEND_DATA;
				continue;
			} else {
				modemctl_notify_event(MDM_CRASH_INVALID_RB);
				need_schedule = false;
				goto exit;
			}
		}

		if (ret > 0)
			mask = MASK_SEND_DATA;

		if (!skb_queue_empty(&rb->skb_q))
			need_schedule = true;
	}

	if (!need_schedule) {
		for (i = 0; i < sl->num_channels; i++) {
			struct sbd_ring_buffer *rb;

			rb = sbd_id2rb(sl, i, TX);
			if (!rb_empty(rb)) {
				need_schedule = true;
				break;
			}
		}
	}

	if (mask) {
		spin_lock_irqsave(&mc->lock, flags);
		if (unlikely(!ipc_active(mld))) {
			spin_unlock_irqrestore(&mc->lock, flags);
			need_schedule = false;
			goto exit;
		}
		send_ipc_irq(mld, mask2int(mask));
		spin_unlock_irqrestore(&mc->lock, flags);
	}

exit:
	if (need_schedule) {
		ktime_t ktime = ktime_set(0, ms2ns(TX_PERIOD_MS));
		hrtimer_start(timer, ktime, HRTIMER_MODE_REL);
	}

	return HRTIMER_NORESTART;
}
void shm_ac_read_notif_0_tasklet(unsigned long tasklet_data)
{
	struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data;
	u32 writer_local_rptr;
	u32 writer_local_wptr;
	u32 shared_wptr;
	unsigned long flags;

	dev_dbg(shrm->dev, "%s IN\n", __func__);

	/* Update writer_local_rptrwith shared_rptr */
	update_ac_common_local_rptr(shrm);
	get_writer_pointers(COMMON_CHANNEL, &writer_local_rptr,
				&writer_local_wptr, &shared_wptr);

	if (check_modem_in_reset()) {
		dev_err(shrm->dev, "%s:Modem state reset or unknown\n",
				__func__);
		return;
	}

	if (boot_state == BOOT_INFO_SYNC) {
		/* BOOT_RESP sent by APE has been received by CMT */
		spin_lock_irqsave(&boot_lock, flags);
		boot_state = BOOT_DONE;
		spin_unlock_irqrestore(&boot_lock, flags);
		dev_info(shrm->dev, "IPC_ISA BOOT_DONE\n");

		if (shrm->msr_flag) {
#ifdef CONFIG_U8500_SHRM_DEFAULT_NET
			shrm_start_netdev(shrm->ndev);
#endif

			/* Notification of Modem reinit to SIPC layer */
			if (shrm->msr_reinit_cb)
				shrm->msr_reinit_cb(shrm->msr_cookie);

			shrm->msr_flag = 0;
			/* multicast that modem is online */
			nl_send_multicast_message(SHRM_NL_STATUS_MOD_ONLINE, GFP_ATOMIC);
		}

	} else if (boot_state == BOOT_DONE) {
		if (writer_local_rptr != writer_local_wptr) {
			shrm_common_tx_state = SHRM_PTR_FREE;
			queue_work(shrm->shm_common_ch_wr_wq,
					&shrm->send_ac_msg_pend_notify_0);
		} else {
			shrm_common_tx_state = SHRM_IDLE;
#ifdef CONFIG_U8500_SHRM_DEFAULT_NET
			shrm_restart_netdev(shrm->ndev);
#endif
		}
	} else {
		dev_err(shrm->dev, "Invalid boot state\n");
	}
	/* start timer here */
	hrtimer_start(&timer, ktime_set(0, 10*NSEC_PER_MSEC),
			HRTIMER_MODE_REL);
	atomic_dec(&ac_sleep_disable_count);

	dev_dbg(shrm->dev, "%s OUT\n", __func__);
}
Beispiel #6
0
static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
{
	struct gpio_key_state *ks = dev_id;
	struct gpio_input_state *ds = ks->ds;
	int keymap_index = ks - ds->key_state;
	const struct gpio_event_direct_entry *key_entry;
	unsigned long irqflags;
#ifndef CONFIG_MFD_MAX8957
	int pressed;
#endif
	KEY_LOGD("%s, irq=%d, use_irq=%d\n", __func__, irq, ds->use_irq);

	if (!ds->use_irq)
		return IRQ_HANDLED;

	key_entry = &ds->info->keymap[keymap_index];

	if (key_entry->code == KEY_POWER && power_key_intr_flag == 0) {
		irq_set_irq_type(irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING);
		power_key_intr_flag = 1;
		KEY_LOGD("%s, keycode = %d, first intr", __func__, key_entry->code);
	}
	if (ds->info->debounce_time.tv64) {
		spin_lock_irqsave(&ds->irq_lock, irqflags);
		if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
			ks->debounce = DEBOUNCE_UNKNOWN;
			if (ds->debounce_count++ == 0) {
				wake_lock(&ds->wake_lock);
#ifndef CONFIG_MFD_MAX8957
				hrtimer_start(
					&ds->timer, ds->info->debounce_time,
					HRTIMER_MODE_REL);
#endif
			}
			if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
				KEY_LOGD("gpio_event_input_irq_handler: "
					"key %x-%x, %d (%d) start debounce\n",
					ds->info->type, key_entry->code,
					keymap_index, key_entry->gpio);
		} else {
			disable_irq_nosync(irq);
			ks->debounce = DEBOUNCE_UNSTABLE;
		}
		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
	} else {
#ifdef CONFIG_MFD_MAX8957
		queue_work(ki_queue, &ks->work);
#else
		pressed = gpio_get_value(key_entry->gpio) ^
			!(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
		if (ds->info->flags & GPIOEDF_PRINT_KEYS)
			KEY_LOGD("gpio_event_input_irq_handler: key %x-%x, %d "
				"(%d) changed to %d\n",
				ds->info->type, key_entry->code, keymap_index,
				key_entry->gpio, pressed);
		input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
			    key_entry->code, pressed);
		input_sync(ds->input_devs->dev[key_entry->dev]);
#endif
	}
	return IRQ_HANDLED;
}
static void cc2520_sack_start_timer()
{
    ktime_t kt;
    kt = ktime_set(0, 1000 * ack_timeout);
	hrtimer_start(&timeout_timer, kt, HRTIMER_MODE_REL);
}
static void pm8xxx_vib_enable(struct timed_output_dev *dev, int value)
{
	struct pm8xxx_vib *vib = container_of(dev, struct pm8xxx_vib,
					 timed_dev);
	unsigned long flags;

/*            */
	int origin_value;
#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_REST_POWER
	struct timeval current_tv;
	struct timeval interval_tv;
#endif

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE
	int over_ms = vib->overdrive_ms;
#endif

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT
	spin_lock_irqsave(&vib->lock, flags);
	if (value == 0 && vib->pre_value <= vib->min_timeout_ms) {
		spin_unlock_irqrestore(&vib->lock, flags);
		return;
	}
	spin_unlock_irqrestore(&vib->lock, flags);
#endif

/*            */
    if(unlikely(debug_mask))
        printk(KERN_INFO "pm8xxx_vib_enable value:%d\n",value);

retry:
	spin_lock_irqsave(&vib->lock, flags);
	if (hrtimer_try_to_cancel(&vib->vib_timer) < 0) {
		spin_unlock_irqrestore(&vib->lock, flags);
		cpu_relax();
		goto retry;
	}

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE
	if (hrtimer_try_to_cancel(&vib->vib_overdrive_timer) < 0) {
		spin_unlock_irqrestore(&vib->lock, flags);
		cpu_relax();
		goto retry;
	}
#endif

/*            */
	origin_value = value;

	if (value == 0)
		vib->state = 0;
	else {
		/* Set Min Timeout for normal fuction */
#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT
		value = (value < vib->min_timeout_ms ?
			vib->min_timeout_ms : value);
#endif

		value = (value > vib->pdata->max_timeout_ms ?
				 vib->pdata->max_timeout_ms : value);
		vib->state = 1;

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT
		vib->pre_value = value;
#endif

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE
		if(vib->overdrive_ms > 0 && value <= vib->overdrive_range_ms) {

			vib->remain_vib_ms = value - over_ms;
			vib->level = vib->max_level_mv / 100;
			vib->active_level = vib->request_level;

            if(unlikely(debug_mask))
                printk(KERN_INFO "start overdrive over_level:%d over_ms:%d \n",vib->level,over_ms);

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_REST_POWER
			do_gettimeofday(&current_tv);
			if(vib->vib_state) { // vibrator is working now.
				struct timeval min_timeout_tv;
				min_timeout_tv.tv_sec = vib->min_timeout_ms / 1000;
				min_timeout_tv.tv_usec = (vib->min_timeout_ms % 1000) * 1000;

				get_timeval_interval(&current_tv, &(vib->start_tv), &interval_tv);
                if(unlikely(debug_mask)) {
                    printk(KERN_INFO "vib_state is true, cur:%ld.%06ld, sta:%ld.%06ld, itv:%ld.%06ld\n",
                            current_tv.tv_sec, current_tv.tv_usec,
                            vib->start_tv.tv_sec, vib->start_tv.tv_usec,
                            interval_tv.tv_sec, interval_tv.tv_usec );
                }

				// if greater than min_timeout, no need over drive and min time.
				if(compare_timeval_interval(&interval_tv, &min_timeout_tv)==1) {
					value = origin_value;
                    if(unlikely(debug_mask))
                        printk(KERN_INFO "interval greater than min_timeout, start normal vib %dms\n",value);
					goto NORMAL_VIB_START;
				}
				// if less than min_timeout, need corrected value
				else {
					int interval_ms;
					interval_ms = (interval_tv.tv_sec * 1000) + (interval_tv.tv_usec / 1000000);
					if(over_ms > interval_ms) {
						over_ms = over_ms - interval_ms;
						vib->remain_vib_ms = origin_value;
                        if(unlikely(debug_mask))
                            printk(KERN_INFO "interval less than min_timeout, start overdrive %dms, remain %dms\n", over_ms, vib->remain_vib_ms);
						goto OVERDRIVE_VIB_START;
					}
					else
					{
						value = value - interval_ms;
                        if(unlikely(debug_mask))
                            printk(KERN_INFO "interval less than min_timeout, start normal vib %dms\n",value);
						goto NORMAL_VIB_START;
					}
				}
			}
			else { // vibrator is not working now.
				struct timeval min_stop_tv;
				min_stop_tv.tv_sec = vib->min_stop_ms / 1000;
				min_stop_tv.tv_usec = (vib->min_stop_ms % 1000) * 1000;

				get_timeval_interval(&current_tv, &(vib->stop_tv), &interval_tv);

                if(unlikely(debug_mask)) {
                    printk(KERN_INFO "vib_state is false, cur:%ld.%06ld, sto:%ld.%06ld, itv:%ld.%06ld\n",
                            current_tv.tv_sec, current_tv.tv_usec,
                            vib->stop_tv.tv_sec, vib->stop_tv.tv_usec,
                            interval_tv.tv_sec, interval_tv.tv_usec );
                }

				// if greater than min_stop_tv, start vibration over drive and value.
				if(compare_timeval_interval(&interval_tv, &min_stop_tv)==1) {
                    if(unlikely(debug_mask))
                        printk(KERN_INFO "greater than min_stop_timeout, start overdrive %dms, remain %dms\n", over_ms, vib->remain_vib_ms);
					goto OVERDRIVE_VIB_START;
				}
				// if less than min_stop_tv, reduce over drive time.
				else {
					int interval_ms;
					interval_ms = (interval_tv.tv_sec * 1000) + (interval_tv.tv_usec / 1000);
					over_ms = interval_ms / (vib->min_stop_ms / vib->overdrive_ms) / 2;
					vib->remain_vib_ms = (value - over_ms) / 2;

                    if(unlikely(debug_mask))
                        printk(KERN_INFO "less than min_stop_timeout, start overdrive %dms, remain %dms\n", over_ms, vib->remain_vib_ms);
					goto OVERDRIVE_VIB_START;
				}
			}
#else
			goto OVERDRIVE_VIB_START;
#endif
		}
		else
#endif
		{
			goto NORMAL_VIB_START;
		}
	}

NORMAL_VIB_START:
#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_VOL
	vib->level = vib->request_level;
#else
	vib->level = vib->default_level;
#endif
	hrtimer_start(&vib->vib_timer,
		ktime_set(value / 1000, (value % 1000) * 1000000),
		HRTIMER_MODE_REL);

	goto FINISH_VIB_ENABLE;

#ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE
OVERDRIVE_VIB_START:
	hrtimer_start(&vib->vib_overdrive_timer,
		ktime_set(over_ms / 1000, (over_ms % 1000) * 1000000),
		HRTIMER_MODE_REL);
#endif

FINISH_VIB_ENABLE:
	spin_unlock_irqrestore(&vib->lock, flags);
	schedule_work(&vib->work);
}
Beispiel #9
0
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
{
	struct task_struct *tsk = current;
	struct hrtimer *timer;
	ktime_t expires;
	cputime_t cval, cinterval, nval, ninterval;

	/*
	 * Validate the timevals in value.
	 *
	 * Note: Although the spec requires that invalid values shall
	 * return -EINVAL, we just fixup the value and print a limited
	 * number of warnings in order not to break users of this
	 * historical misfeature.
	 *
	 * Scheduled for replacement in March 2007
	 */
	check_itimerval(value);

	MARK(kernel_timer_set_itimer, "%d %ld %ld %ld %ld",
			which,
			value->it_interval.tv_sec,
			value->it_interval.tv_usec,
			value->it_value.tv_sec,
			value->it_value.tv_usec);

	switch (which) {
	case ITIMER_REAL:
again:
		spin_lock_irq(&tsk->sighand->siglock);
		timer = &tsk->signal->real_timer;
		if (ovalue) {
			ovalue->it_value = itimer_get_remtime(timer);
			ovalue->it_interval
				= ktime_to_timeval(tsk->signal->it_real_incr);
		}
		/* We are sharing ->siglock with it_real_fn() */
		if (hrtimer_try_to_cancel(timer) < 0) {
			spin_unlock_irq(&tsk->sighand->siglock);
			goto again;
		}
		tsk->signal->it_real_incr =
			timeval_to_ktime(value->it_interval);
		expires = timeval_to_ktime(value->it_value);
		if (expires.tv64 != 0)
			hrtimer_start(timer, expires, HRTIMER_REL);
		spin_unlock_irq(&tsk->sighand->siglock);
		break;
	case ITIMER_VIRTUAL:
		nval = timeval_to_cputime(&value->it_value);
		ninterval = timeval_to_cputime(&value->it_interval);
		read_lock(&tasklist_lock);
		spin_lock_irq(&tsk->sighand->siglock);
		cval = tsk->signal->it_virt_expires;
		cinterval = tsk->signal->it_virt_incr;
		if (!cputime_eq(cval, cputime_zero) ||
		    !cputime_eq(nval, cputime_zero)) {
			if (cputime_gt(nval, cputime_zero))
				nval = cputime_add(nval,
						   jiffies_to_cputime(1));
			set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
					      &nval, &cval);
		}
		tsk->signal->it_virt_expires = nval;
		tsk->signal->it_virt_incr = ninterval;
		spin_unlock_irq(&tsk->sighand->siglock);
		read_unlock(&tasklist_lock);
		if (ovalue) {
			cputime_to_timeval(cval, &ovalue->it_value);
			cputime_to_timeval(cinterval, &ovalue->it_interval);
		}
		break;
	case ITIMER_PROF:
		nval = timeval_to_cputime(&value->it_value);
		ninterval = timeval_to_cputime(&value->it_interval);
		read_lock(&tasklist_lock);
		spin_lock_irq(&tsk->sighand->siglock);
		cval = tsk->signal->it_prof_expires;
		cinterval = tsk->signal->it_prof_incr;
		if (!cputime_eq(cval, cputime_zero) ||
		    !cputime_eq(nval, cputime_zero)) {
			if (cputime_gt(nval, cputime_zero))
				nval = cputime_add(nval,
						   jiffies_to_cputime(1));
			set_process_cpu_timer(tsk, CPUCLOCK_PROF,
					      &nval, &cval);
		}
		tsk->signal->it_prof_expires = nval;
		tsk->signal->it_prof_incr = ninterval;
		spin_unlock_irq(&tsk->sighand->siglock);
		read_unlock(&tasklist_lock);
		if (ovalue) {
			cputime_to_timeval(cval, &ovalue->it_value);
			cputime_to_timeval(cinterval, &ovalue->it_interval);
		}
		break;
	default:
		return -EINVAL;
	}
	return 0;
}
Beispiel #10
0
void netmap_mitigation_start(struct nm_generic_mit *mit)
{
    hrtimer_start(&mit->mit_timer, ktime_set(0, netmap_generic_mit), HRTIMER_MODE_REL);
}
Beispiel #11
0
static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
{
	struct watchdog_core_data *wd_data;
	int err;

	wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
	if (!wd_data)
		return -ENOMEM;
	kref_init(&wd_data->kref);
	mutex_init(&wd_data->lock);

	wd_data->wdd = wdd;
	wdd->wd_data = wd_data;

	if (IS_ERR_OR_NULL(watchdog_kworker))
		return -ENODEV;

	kthread_init_work(&wd_data->work, watchdog_ping_work);
	hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	wd_data->timer.function = watchdog_timer_expired;

	if (wdd->id == 0) {
		old_wd_data = wd_data;
		watchdog_miscdev.parent = wdd->parent;
		err = misc_register(&watchdog_miscdev);
		if (err != 0) {
			pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
				wdd->info->identity, WATCHDOG_MINOR, err);
			if (err == -EBUSY)
				pr_err("%s: a legacy watchdog module is probably present.\n",
					wdd->info->identity);
			old_wd_data = NULL;
			kfree(wd_data);
			return err;
		}
	}

	/* Fill in the data structures */
	cdev_init(&wd_data->cdev, &watchdog_fops);
	wd_data->cdev.owner = wdd->ops->owner;

	/* Add the device */
	err = cdev_add(&wd_data->cdev, devno, 1);
	if (err) {
		pr_err("watchdog%d unable to add device %d:%d\n",
			wdd->id,  MAJOR(watchdog_devt), wdd->id);
		if (wdd->id == 0) {
			misc_deregister(&watchdog_miscdev);
			old_wd_data = NULL;
			kref_put(&wd_data->kref, watchdog_core_data_release);
		}
		return err;
	}

	/* Record time of most recent heartbeat as 'just before now'. */
	wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);

	/*
	 * If the watchdog is running, prevent its driver from being unloaded,
	 * and schedule an immediate ping.
	 */
	if (watchdog_hw_running(wdd)) {
		__module_get(wdd->ops->owner);
		kref_get(&wd_data->kref);
		if (handle_boot_enabled)
			hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL);
		else
			pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
				wdd->id);
	}

	return 0;
}
tEplKernel EplTimerHighReskModifyTimerNs(tEplTimerHdl *pTimerHdl_p,
					 unsigned long long ullTimeNs_p,
					 tEplTimerkCallback pfnCallback_p,
					 unsigned long ulArgument_p,
					 BOOL fContinuously_p)
{
	tEplKernel Ret;
	unsigned int uiIndex;
	tEplTimerHighReskTimerInfo *pTimerInfo;
	ktime_t RelTime;

	Ret = kEplSuccessful;

	// check pointer to handle
	if (pTimerHdl_p == NULL) {
		Ret = kEplTimerInvalidHandle;
		goto Exit;
	}

	if (*pTimerHdl_p == 0) {	// no timer created yet

		// search free timer info structure
		pTimerInfo = &EplTimerHighReskInstance_l.m_aTimerInfo[0];
		for (uiIndex = 0; uiIndex < TIMER_COUNT;
		     uiIndex++, pTimerInfo++) {
			if (pTimerInfo->m_EventArg.m_TimerHdl == 0) {	// free structure found
				break;
			}
		}
		if (uiIndex >= TIMER_COUNT) {	// no free structure found
			Ret = kEplTimerNoTimerCreated;
			goto Exit;
		}

		pTimerInfo->m_EventArg.m_TimerHdl = HDL_INIT(uiIndex);
	} else {
		uiIndex = HDL_TO_IDX(*pTimerHdl_p);
		if (uiIndex >= TIMER_COUNT) {	// invalid handle
			Ret = kEplTimerInvalidHandle;
			goto Exit;
		}

		pTimerInfo = &EplTimerHighReskInstance_l.m_aTimerInfo[uiIndex];
	}

	/*
	 * increment timer handle
	 * (if timer expires right after this statement, the user
	 * would detect an unknown timer handle and discard it)
	 */
	pTimerInfo->m_EventArg.m_TimerHdl =
	    HDL_INC(pTimerInfo->m_EventArg.m_TimerHdl);
	*pTimerHdl_p = pTimerInfo->m_EventArg.m_TimerHdl;

	// reject too small time values
	if ((fContinuously_p && (ullTimeNs_p < TIMER_MIN_VAL_CYCLE))
	    || (!fContinuously_p && (ullTimeNs_p < TIMER_MIN_VAL_SINGLE))) {
		Ret = kEplTimerNoTimerCreated;
		goto Exit;
	}

	pTimerInfo->m_EventArg.m_ulArg = ulArgument_p;
	pTimerInfo->m_pfnCallback = pfnCallback_p;
	pTimerInfo->m_fContinuously = fContinuously_p;
	pTimerInfo->m_ullPeriod = ullTimeNs_p;

	/*
	 * HRTIMER_MODE_REL does not influence general handling of this timer.
	 * It only sets relative mode for this start operation.
	 * -> Expire time is calculated by: Now + RelTime
	 * hrtimer_start also skips pending timer events.
	 * The state HRTIMER_STATE_CALLBACK is ignored.
	 * We have to cope with that in our callback function.
	 */
	RelTime = ktime_add_ns(ktime_set(0, 0), ullTimeNs_p);
	hrtimer_start(&pTimerInfo->m_Timer, RelTime, HRTIMER_MODE_REL);

      Exit:
	return Ret;

}
struct lego_port_device
*ev3_output_port_register(struct ev3_output_port_platform_data *pdata,
			  struct device *parent)
{
	struct ev3_output_port_data *data;
	struct pwm_device *pwm;
	int err;

	if (WARN(!pdata, "Platform data is required."))
		return ERR_PTR(-EINVAL);

	data = kzalloc(sizeof(struct ev3_output_port_data), GFP_KERNEL);
	if (!data)
		return ERR_PTR(-ENOMEM);

	data->id = pdata->id;
	data->analog = get_legoev3_analog();
	if (IS_ERR(data->analog)) {
		dev_err(parent, "Could not get legoev3-analog device.\n");
		err = PTR_ERR(data->analog);
		goto err_request_legoev3_analog;
	}

	data->gpio[GPIO_PIN1].gpio	= pdata->pin1_gpio;
	data->gpio[GPIO_PIN1].flags	= GPIOF_IN;
	data->gpio[GPIO_PIN1].label	= "pin1";

	data->gpio[GPIO_PIN2].gpio	= pdata->pin2_gpio;
	data->gpio[GPIO_PIN2].flags	= GPIOF_IN;
	data->gpio[GPIO_PIN2].label	= "pin2";

	data->gpio[GPIO_PIN5].gpio	= pdata->pin5_gpio;
	data->gpio[GPIO_PIN5].flags	= GPIOF_IN;
	data->gpio[GPIO_PIN5].label	= "pin5";

	data->gpio[GPIO_PIN5_INT].gpio	= pdata->pin5_int_gpio;
	data->gpio[GPIO_PIN5_INT].flags	= GPIOF_IN;
	data->gpio[GPIO_PIN5_INT].label	= "pin5_tacho";

	data->gpio[GPIO_PIN6_DIR].gpio	= pdata->pin6_dir_gpio;
	data->gpio[GPIO_PIN6_DIR].flags	= GPIOF_IN;
	data->gpio[GPIO_PIN6_DIR].label	= "pin6";

	err = gpio_request_array(data->gpio, ARRAY_SIZE(data->gpio));
	if (err) {
		dev_err(parent, "Requesting GPIOs failed.\n");
		goto err_gpio_request_array;
	}

	data->out_port.name = ev3_output_port_type.name;
	snprintf(data->out_port.port_name, LEGO_PORT_NAME_SIZE, "out%c",
		 data->id + 'A');
	pwm = pwm_get(NULL, data->out_port.port_name);
	if (IS_ERR(pwm)) {
		dev_err(parent, "Could not get pwm! (%ld)\n", PTR_ERR(pwm));
		err = PTR_ERR(pwm);
		goto err_pwm_get;
	}

	err = pwm_config(pwm, 0, NSEC_PER_SEC / 10000);
	if (err) {
		dev_err(parent,
			"Failed to set pwm duty percent and frequency! (%d)\n",
			err);
		goto err_pwm_config;
	}

	err = pwm_enable(pwm);
	if (err) {
		dev_err(parent, "Failed to start pwm! (%d)\n", err);
		goto err_pwm_start;
	}
	/* This lets us set the pwm duty cycle in an atomic context */
	pm_runtime_irq_safe(pwm->chip->dev);
	data->pwm = pwm;

	data->out_port.num_modes = NUM_EV3_OUTPUT_PORT_MODE;
	data->out_port.mode_info = legoev3_output_port_mode_info;
	data->out_port.set_mode = ev3_output_port_set_mode;
	data->out_port.set_device = ev3_output_port_set_device;
	data->out_port.get_status = ev3_output_port_get_status;
	data->out_port.dc_motor_ops = &ev3_output_port_motor_ops;
	data->out_port.context = data;
	err = lego_port_register(&data->out_port, &ev3_output_port_type, parent);
	if (err) {
		dev_err(parent, "Failed to register lego_port_device. (%d)\n",
			err);
		goto err_lego_port_register;
	}

	INIT_WORK(&data->change_uevent_work, ev3_output_port_change_uevent_work);
	INIT_WORK(&data->work, NULL);

	data->con_state = CON_STATE_INIT;

	hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	data->timer.function = ev3_output_port_timer_callback;
	hrtimer_start(&data->timer, ktime_set(0, OUTPUT_PORT_POLL_NS),
		      HRTIMER_MODE_REL);

	return &data->out_port;

err_lego_port_register:
	pwm_disable(pwm);
err_pwm_start:
err_pwm_config:
	pwm_put(pwm);
err_pwm_get:
	gpio_free_array(data->gpio, ARRAY_SIZE(data->gpio));
err_gpio_request_array:
	put_legoev3_analog(data->analog);
err_request_legoev3_analog:
	kfree(data);

	return ERR_PTR(err);
}
static ssize_t k3g_set_delay(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t size)
{
	struct k3g_data *k3g_data  = dev_get_drvdata(dev);
	int odr_value = ODR105_BW25;
	int res = 0;
	int i;
	u64 delay_ns;
	u8 ctrl;

	res = strict_strtoll(buf, 10, &delay_ns);
	if (res < 0)
		return res;

	mutex_lock(&k3g_data->lock);
	if (!k3g_data->interruptible)
		hrtimer_cancel(&k3g_data->timer);
	else
		disable_irq(k3g_data->client->irq);

	/* round to the nearest supported ODR that is equal or above than
	 * the requested value
	 */
	for (i = 0; i < ARRAY_SIZE(odr_delay_table); i++) {
		if (delay_ns < odr_delay_table[i].delay_ns)
			break;
	}
	if (i > 0)
		i--;

	odr_value = odr_delay_table[i].odr;
	delay_ns = odr_delay_table[i].delay_ns;
	k3g_data->time_to_read = delay_ns;
	k3g_data->entries = 1;

	if (delay_ns >= odr_delay_table[3].delay_ns) {
		if (delay_ns >= MAX_DELAY) {
			k3g_data->entries = MAX_ENTRY;
			delay_ns = MAX_DELAY;
		} else {
			do_div(delay_ns, odr_delay_table[3].delay_ns);
			k3g_data->entries = delay_ns;
		}
		k3g_data->time_to_read = odr_delay_table[3].delay_ns;
	}

	if (odr_value != (k3g_data->ctrl_regs[0] & ODR_MASK)) {
		ctrl = (k3g_data->ctrl_regs[0] & ~ODR_MASK);
		ctrl |= odr_value;
		k3g_data->ctrl_regs[0] = ctrl;
		res = i2c_smbus_write_byte_data(k3g_data->client,
						CTRL_REG1, ctrl);
	}

	/* we see a noise in the first sample or two after we
	 * change rates.  this delay helps eliminate that noise.
	 */
	msleep((u32)delay_ns * 2 / NSEC_PER_MSEC);

	/* (re)start fifo */
	k3g_restart_fifo(k3g_data);

	if (!k3g_data->interruptible) {
		delay_ns = k3g_data->entries * k3g_data->time_to_read;
		k3g_data->polling_delay = ns_to_ktime(delay_ns);
		if (k3g_data->enable)
			hrtimer_start(&k3g_data->timer,
				k3g_data->polling_delay, HRTIMER_MODE_REL);
	} else
		enable_irq(k3g_data->client->irq);

	mutex_unlock(&k3g_data->lock);

	return size;
}
static int constant_flashlight_ioctl(MUINT32 cmd, MUINT32 arg)
{
	int i4RetValue = 0;
	int iFlashType = (int)FLASHLIGHT_NONE;
	int ior;
	int iow;
	int iowr;
	ior = _IOR(FLASHLIGHT_MAGIC,0, int);
	iow = _IOW(FLASHLIGHT_MAGIC,0, int);
	iowr = _IOWR(FLASHLIGHT_MAGIC,0, int);
	PK_DBG("constant_flashlight_ioctl() line=%d cmd=%d, ior=%d, iow=%d iowr=%d arg=%d\n",__LINE__, cmd, ior, iow, iowr, arg);
	PK_DBG("constant_flashlight_ioctl() line=%d cmd-ior=%d, cmd-iow=%d cmd-iowr=%d arg=%d\n",__LINE__, cmd-ior, cmd-iow, cmd-iowr, arg);
    switch(cmd)
    {

        case FLASH_IOC_SET_TIME_OUT_TIME_MS:
                PK_DBG("FLASH_IOC_SET_TIME_OUT_TIME_MS: %d\n",arg);
                g_timeOutTimeMs=arg;
                break;

    	case FLASH_IOC_SET_DUTY :
    		PK_DBG("FLASHLIGHT_DUTY: %d\n",arg);
    		g_duty=arg;
    		FL_dim_duty(arg);
    		break;

    	case FLASH_IOC_SET_STEP:
    		PK_DBG("FLASH_IOC_SET_STEP: %d\n",arg);
    		g_step=arg;
    		FL_step(arg);
    		break;

    	case FLASH_IOC_SET_ONOFF :
    		PK_DBG("FLASHLIGHT_ONOFF: %d\n",arg);
    		if(arg==1)
    		{
	            if(g_timeOutTimeMs!=0)
	            {
	            	ktime_t ktime;
                        ktime = ktime_set( 0, g_timeOutTimeMs*1000000 );
                        hrtimer_start( &g_timeOutTimer, ktime, HRTIMER_MODE_REL );
	            }
	            FL_enable();
	            g_strobe_On=1;
    		}
    		else
    		{
	            FL_disable();
	            hrtimer_cancel( &g_timeOutTimer );
	            g_strobe_On=0;
    		}
    		break;
        case FLASHLIGHTIOC_G_FLASHTYPE:
            iFlashType = FLASHLIGHT_LED_CONSTANT;
            if(copy_to_user((void __user *) arg , (void*)&iFlashType , _IOC_SIZE(cmd)))
            {
                PK_DBG("[strobe_ioctl] ioctl copy to user failed\n");
                return -EFAULT;
            }
            break;
        default :
    		PK_DBG(" No such command \n");
    		i4RetValue = -EPERM;
    		break;
    }
    return i4RetValue;
}
Beispiel #16
0
int flashlight_control(int mode)
{
	int ret = 0;
	uint32_t flash_ns = ktime_to_ns(ktime_get());

#if 0 /* disable flash_adj_value check now */
	if (this_fl_str->flash_adj_value == 2) {
		printk(KERN_WARNING "%s: force disable function!\n", __func__);
		return -EIO;
	}
#endif
	spin_lock_irqsave(&this_fl_str->spin_lock,
						this_fl_str->spinlock_flags);
	if (this_fl_str->mode_status == FL_MODE_FLASH) {
		hrtimer_cancel(&this_fl_str->timer);
		wake_unlock(&this_fl_str->wake_lock);
		flashlight_turn_off();
	}
	switch (mode) {
	case FL_MODE_OFF:
		flashlight_turn_off();
	break;
	case FL_MODE_TORCH:
		flashlight_hw_command(3, 1);
		flashlight_hw_command(0, 15);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_TORCH;
		this_fl_str->fl_lcdev.brightness = LED_HALF;
	break;
	case FL_MODE_TORCH_LED_A:
		flashlight_hw_command(3, 1);
		flashlight_hw_command(0, 15);
		flashlight_hw_command(2, 3);
		this_fl_str->mode_status = FL_MODE_TORCH_LED_A;
		this_fl_str->fl_lcdev.brightness = 1;
	break;
	case FL_MODE_TORCH_LED_B:
		flashlight_hw_command(3, 1);
		flashlight_hw_command(0, 15);
		flashlight_hw_command(2, 2);
		this_fl_str->mode_status = FL_MODE_TORCH_LED_B;
		this_fl_str->fl_lcdev.brightness = 2;
	break;
	case FL_MODE_FLASH:
		flashlight_hw_command(2, 4);
		gpio_direction_output(this_fl_str->gpio_flash, 1);
		this_fl_str->mode_status = FL_MODE_FLASH;
		this_fl_str->fl_lcdev.brightness = LED_FULL;
		hrtimer_start(&this_fl_str->timer,
			ktime_set(this_fl_str->flash_sw_timeout_ms / 1000,
				(this_fl_str->flash_sw_timeout_ms % 1000) *
					NSEC_PER_MSEC), HRTIMER_MODE_REL);
		wake_lock(&this_fl_str->wake_lock);
	break;
	case FL_MODE_PRE_FLASH:
		flashlight_hw_command(3, 1);
		flashlight_hw_command(0, 9);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_PRE_FLASH;
		this_fl_str->fl_lcdev.brightness = LED_HALF + 1;
	break;
	case FL_MODE_TORCH_LEVEL_1:
		flashlight_hw_command(3, 8);
		flashlight_hw_command(0, 15);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_1;
		this_fl_str->fl_lcdev.brightness = LED_HALF - 2;
	break;
	case FL_MODE_TORCH_LEVEL_2:
		flashlight_hw_command(3, 4);
		flashlight_hw_command(0, 15);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_2;
		this_fl_str->fl_lcdev.brightness = LED_HALF - 1;
	break;
        case FL_MODE_DEATH_RAY:
		pr_info("%s: death ray\n", __func__);
		hrtimer_cancel(&this_fl_str->timer);
		gpio_direction_output(this_fl_str->gpio_flash, 0);
		udelay(40);
		gpio_direction_output(this_fl_str->gpio_flash, 1);
		this_fl_str->mode_status = 0;
		this_fl_str->fl_lcdev.brightness = 3;
		wake_lock(&this_fl_str->wake_lock);
	break;
	default:
		printk(KERN_ERR "%s: unknown flash_light flags: %d\n",
							__func__, mode);
		ret = -EINVAL;
	break;
	}

	printk(KERN_DEBUG "%s: mode: %d, %u\n", FLASHLIGHT_NAME, mode,
		flash_ns/(1000*1000));

	spin_unlock_irqrestore(&this_fl_str->spin_lock,
						this_fl_str->spinlock_flags);
	return ret;
}
Beispiel #17
0
static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer)
{
	int i;
	int pressed;
	struct gpio_input_state *ds =
		container_of(timer, struct gpio_input_state, timer);
	unsigned gpio_flags = ds->info->flags;
	unsigned npolarity;
	int nkeys = ds->info->keymap_size;
	const struct gpio_event_direct_entry *key_entry;
	struct gpio_key_state *key_state;
	unsigned long irqflags;
	uint8_t debounce;
	bool sync_needed;

#if 0
	key_entry = kp->keys_info->keymap;
	key_state = kp->key_state;
	for (i = 0; i < nkeys; i++, key_entry++, key_state++)
		pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
			gpio_read_detect_status(key_entry->gpio));
#endif
	key_entry = ds->info->keymap;
	key_state = ds->key_state;
	sync_needed = false;
	spin_lock_irqsave(&ds->irq_lock, irqflags);
	for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
		debounce = key_state->debounce;
		if (debounce & DEBOUNCE_WAIT_IRQ)
			continue;
		if (key_state->debounce & DEBOUNCE_UNSTABLE) {
			debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
			enable_irq(gpio_to_irq(key_entry->gpio));
			if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE)
				KEY_LOGI("gpio_keys_scan_keys: key %x-%x, %d "
					"(%d) continue debounce\n",
					ds->info->type, key_entry->code,
					i, key_entry->gpio);
		}
		npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
		pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
		if (debounce & DEBOUNCE_POLL) {
			if (pressed == !(debounce & DEBOUNCE_PRESSED)) {
				ds->debounce_count++;
				key_state->debounce = DEBOUNCE_UNKNOWN;
				if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
					KEY_LOGI("gpio_keys_scan_keys: key %x-"
						"%x, %d (%d) start debounce\n",
						ds->info->type, key_entry->code,
						i, key_entry->gpio);
			}
			continue;
		}
		if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) {
			if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
				KEY_LOGI("gpio_keys_scan_keys: key %x-%x, %d "
					"(%d) debounce pressed 1\n",
					ds->info->type, key_entry->code,
					i, key_entry->gpio);
			key_state->debounce = DEBOUNCE_PRESSED;
			continue;
		}
		if (!pressed && (debounce & DEBOUNCE_PRESSED)) {
			if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
				KEY_LOGI("gpio_keys_scan_keys: key %x-%x, %d "
					"(%d) debounce pressed 0\n",
					ds->info->type, key_entry->code,
					i, key_entry->gpio);
			key_state->debounce = DEBOUNCE_NOTPRESSED;
			continue;
		}
		
		ds->debounce_count--;
		if (ds->use_irq)
			key_state->debounce |= DEBOUNCE_WAIT_IRQ;
		else
			key_state->debounce |= DEBOUNCE_POLL;
		if (gpio_flags & GPIOEDF_PRINT_KEYS)
			KEY_LOGI("gpio_keys_scan_keys: key %x-%x, %d (%d) "
				"changed to %d\n", ds->info->type,
				key_entry->code, i, key_entry->gpio, pressed);
#ifdef CONFIG_POWER_KEY_LED
		handle_power_key_led(key_entry->code, pressed);
#endif
#ifdef CONFIG_POWER_KEY_CLR_RESET
		handle_power_key_reset(key_entry->code, pressed);
#endif
		handle_power_key_state(key_entry->code, pressed);

		input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
			    key_entry->code, pressed);
		sync_needed = true;
	}
	if (sync_needed) {
		for (i = 0; i < ds->input_devs->count; i++)
			input_sync(ds->input_devs->dev[i]);
	}

#if 0
	key_entry = kp->keys_info->keymap;
	key_state = kp->key_state;
	for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
		pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
			gpio_read_detect_status(key_entry->gpio));
	}
#endif

	if (ds->debounce_count)
		hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL);
	else if (!ds->use_irq)
		hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL);
	else
		wake_unlock(&ds->wake_lock);

	spin_unlock_irqrestore(&ds->irq_lock, irqflags);

	return HRTIMER_NORESTART;
}
static int jz_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
	struct jz_pcm_runtime_data *prtd = substream->runtime->private_data;

#ifdef CONFIG_JZ_ASOC_DMA_HRTIMER_MODE
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
#endif

#ifdef CONFIG_JZ_ASOC_DMA_AUTO_CLR_DRT_MEM
	size_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
#endif
	int ret;

	DMA_SUBSTREAM_MSG(substream,"%s enter cmd %d\n", __func__, cmd);
	switch (cmd) {
	case SNDRV_PCM_TRIGGER_START:
	case SNDRV_PCM_TRIGGER_RESUME:
	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
#ifndef CONFIG_JZ_ASOC_DMA_HRTIMER_MODE
		if (atomic_read(&prtd->stopped_pending))
			return -EPIPE;
#endif
		DMA_SUBSTREAM_MSG(substream,"start trigger\n");
		ret = jz_asoc_dma_prepare_and_submit(substream);
		if (ret)
			return ret;
		dma_async_issue_pending(prtd->dma_chan);
#ifdef CONFIG_JZ_ASOC_DMA_HRTIMER_MODE
		atomic_set(&prtd->stopped, 0);
		hrtimer_start(&prtd->hr_timer, prtd->expires , HRTIMER_MODE_REL);
#endif
		break;
	case SNDRV_PCM_TRIGGER_STOP:
	case SNDRV_PCM_TRIGGER_SUSPEND:
	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
		DMA_SUBSTREAM_MSG(substream,"stop trigger\n");
#ifdef CONFIG_JZ_ASOC_DMA_HRTIMER_MODE
		atomic_set(&prtd->stopped, 1);
		/* To make sure there is not data transfer on AHB bus,
		 * then we can stop the dma, Wait tur or ror happen
		 */
		if (cpu_dai->driver->ops->trigger) {
			ret = cpu_dai->driver->ops->trigger(substream, cmd, cpu_dai);
			if (ret < 0)
				return ret;
		}
		dmaengine_terminate_all(prtd->dma_chan);
#else
		/* To make sure there is not data transfer on AHB bus,
		 * then we can stop the dma, Wait dma callback happen
		 */
		if (dmaengine_terminate_all(prtd->dma_chan)) {
			prtd->stopped_cmd = cmd;
			atomic_set(&prtd->stopped_pending, 1);
			schedule_delayed_work(&prtd->dwork_stop_dma, prtd->delayed_jiffies);
		}
#endif
#ifdef CONFIG_JZ_ASOC_DMA_AUTO_CLR_DRT_MEM
		printk(KERN_DEBUG"show the time memset1 %d\n", buffer_bytes);
		memset(snd_pcm_get_ptr(substream, 0), 0, buffer_bytes);
		printk(KERN_DEBUG"show the time memset2 %d\n", buffer_bytes);
#endif
		break;
	default:
		return -EINVAL;
	}
	return 0;
}
Beispiel #19
0
int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
			struct gpio_event_info *info, void **data, int func)
{
	int ret;
	int i;
	unsigned long irqflags;
	struct gpio_event_input_info *di;
	struct gpio_input_state *ds = *data;
	struct kobject *keyboard_kobj;

	di = container_of(info, struct gpio_event_input_info, info);

#ifdef CONFIG_POWER_KEY_CLR_RESET
	gis = di;
#endif

	if (func == GPIO_EVENT_FUNC_SUSPEND) {
		if (ds->use_irq)
			for (i = 0; i < di->keymap_size; i++)
				disable_irq(gpio_to_irq(di->keymap[i].gpio));
#ifndef CONFIG_MFD_MAX8957
		hrtimer_cancel(&ds->timer);
#endif
		return 0;
	}
	if (func == GPIO_EVENT_FUNC_RESUME) {
		spin_lock_irqsave(&ds->irq_lock, irqflags);
		if (ds->use_irq)
			for (i = 0; i < di->keymap_size; i++)
				enable_irq(gpio_to_irq(di->keymap[i].gpio));
#ifndef CONFIG_MFD_MAX8957
		hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
#endif
		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
		return 0;
	}

	if (func == GPIO_EVENT_FUNC_INIT) {
		if (ktime_to_ns(di->poll_time) <= 0)
			di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);

		*data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
					di->keymap_size, GFP_KERNEL);
		if (ds == NULL) {
			ret = -ENOMEM;
			KEY_LOGE("KEY_ERR: %s: "
				"Failed to allocate private data\n", __func__);
			goto err_ds_alloc_failed;
		}
		ds->debounce_count = di->keymap_size;
		ds->input_devs = input_devs;
		ds->info = di;
		wake_lock_init(&ds->wake_lock, WAKE_LOCK_SUSPEND, "gpio_input");
#ifdef CONFIG_MFD_MAX8957
		wake_lock_init(&ds->key_pressed_wake_lock, WAKE_LOCK_SUSPEND, "pwr_key_pressed");
#endif
#ifdef CONFIG_POWER_KEY_CLR_RESET
		wake_lock_init(&key_reset_clr_wake_lock, WAKE_LOCK_SUSPEND, "gpio_input_pwr_clear");
#endif
		spin_lock_init(&ds->irq_lock);
		if (board_build_flag() == 0)
			ds->debug_log = 0;
		else
			ds->debug_log = 1;

		for (i = 0; i < di->keymap_size; i++) {
			int dev = di->keymap[i].dev;
			if (dev >= input_devs->count) {
				KEY_LOGE("KEY_ERR: %s: bad device "
					"index %d >= %d for key code %d\n",
					__func__, dev, input_devs->count,
					di->keymap[i].code);
				ret = -EINVAL;
				goto err_bad_keymap;
			}
			input_set_capability(input_devs->dev[dev], di->type,
					     di->keymap[i].code);
			ds->key_state[i].ds = ds;
			ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
		}

		for (i = 0; i < di->keymap_size; i++) {
			ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
			if (ret) {
				KEY_LOGE("KEY_ERR: %s: gpio_request "
					"failed for %d\n", __func__, di->keymap[i].gpio);
				goto err_gpio_request_failed;
			}
			ret = gpio_direction_input(di->keymap[i].gpio);
			if (ret) {
				KEY_LOGE("KEY_ERR: %s: "
					"gpio_direction_input failed for %d\n",
					__func__, di->keymap[i].gpio);
				goto err_gpio_configure_failed;
			}
		}

		if (di->setup_input_gpio)
			di->setup_input_gpio();
#ifdef CONFIG_MFD_MAX8957
		ki_queue = create_singlethread_workqueue("ki_queue");
#endif

		ret = gpio_event_input_request_irqs(ds);

		keyboard_kobj = kobject_create_and_add("keyboard", NULL);
		if (keyboard_kobj == NULL) {
			KEY_LOGE("KEY_ERR: %s: subsystem_register failed\n", __func__);
			ret = -ENOMEM;
			return ret;
		}
		if (sysfs_create_file(keyboard_kobj, &dev_attr_vol_wakeup.attr))
			KEY_LOGE("KEY_ERR: %s: sysfs_create_file "
					"return %d\n", __func__, ret);
		wakeup_bitmask = 0;
		set_wakeup = 0;
		spin_lock_irqsave(&ds->irq_lock, irqflags);
		ds->use_irq = ret == 0;

		KEY_LOGI("GPIO Input Driver: Start gpio inputs for %s%s in %s "
			"mode\n", input_devs->dev[0]->name,
			(input_devs->count > 1) ? "..." : "",
			ret == 0 ? "interrupt" : "polling");

#ifndef CONFIG_MFD_MAX8957
		hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
		ds->timer.function = gpio_event_input_timer_func;
		hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
#endif
		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
		return 0;
	}

	ret = 0;
	spin_lock_irqsave(&ds->irq_lock, irqflags);
#ifndef CONFIG_MFD_MAX8957
	hrtimer_cancel(&ds->timer);
#endif
	if (ds->use_irq) {
		for (i = di->keymap_size - 1; i >= 0; i--) {
			free_irq(gpio_to_irq(di->keymap[i].gpio),
				 &ds->key_state[i]);
		}
	}
	spin_unlock_irqrestore(&ds->irq_lock, irqflags);

	for (i = di->keymap_size - 1; i >= 0; i--) {
err_gpio_configure_failed:
		gpio_free(di->keymap[i].gpio);
err_gpio_request_failed:
		;
	}
err_bad_keymap:
	wake_lock_destroy(&ds->wake_lock);
#ifdef CONFIG_MFD_MAX8957
	wake_lock_destroy(&ds->key_pressed_wake_lock);
#endif
#ifdef CONFIG_POWER_KEY_CLR_RESET
	wake_lock_destroy(&key_reset_clr_wake_lock);
#endif
	kfree(ds);
err_ds_alloc_failed:
	return ret;
}
Beispiel #20
0
static void null_cmd_end_timer(struct nullb_cmd *cmd)
{
	ktime_t kt = ktime_set(0, completion_nsec);

	hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
}
Beispiel #21
0
static void msmfb_pan_update(struct fb_info *info, uint32_t left, uint32_t top,
			     uint32_t eright, uint32_t ebottom,
			     uint32_t yoffset, int pan_display)
{
	struct msmfb_info *msmfb = info->par;
	struct msm_panel_data *panel = msmfb->panel;
	unsigned long irq_flags;
	int sleeping;
	int retry = 1;

	DLOG(SHOW_UPDATES, "update %d %d %d %d %d %d\n",
		left, top, eright, ebottom, yoffset, pan_display);
restart:
	spin_lock_irqsave(&msmfb->update_lock, irq_flags);

	/* if we are sleeping, on a pan_display wait 10ms (to throttle back
	 * drawing otherwise return */
	if (msmfb->sleeping == SLEEPING) {
		DLOG(SUSPEND_RESUME, "drawing while asleep\n");
		spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
		if (pan_display)
			wait_event_interruptible_timeout(msmfb->frame_wq,
				msmfb->sleeping != SLEEPING, HZ/10);
		return;
	}

	sleeping = msmfb->sleeping;
	/* on a full update, if the last frame has not completed, wait for it */
	if ((pan_display && msmfb->frame_requested != msmfb->frame_done) ||
			    sleeping == UPDATING) {
		int ret;
		spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
		ret = wait_event_interruptible_timeout(msmfb->frame_wq,
			msmfb->frame_done == msmfb->frame_requested &&
			msmfb->sleeping != UPDATING, 5 * HZ);
		if (ret <= 0 && (msmfb->frame_requested != msmfb->frame_done ||
				 msmfb->sleeping == UPDATING)) {
			if (retry && panel->request_vsync &&
			    (sleeping == AWAKE)) {
				panel->request_vsync(panel,
					&msmfb->vsync_callback);
				retry = 0;
				printk(KERN_WARNING "msmfb_pan_display timeout "
					"rerequest vsync\n");
			} else {
				printk(KERN_WARNING "msmfb_pan_display timeout "
					"waiting for frame start, %d %d\n",
					msmfb->frame_requested,
					msmfb->frame_done);
				return;
			}
		}
		goto restart;
	}


	msmfb->frame_requested++;
	/* if necessary, update the y offset, if this is the
	 * first full update on resume, set the sleeping state */
	if (pan_display) {
		msmfb->yoffset = yoffset;
		if (left == 0 && top == 0 && eright == info->var.xres &&
		    ebottom == info->var.yres) {
			if (sleeping == WAKING) {
				msmfb->update_frame = msmfb->frame_requested;
				DLOG(SUSPEND_RESUME, "full update starting\n");
				msmfb->sleeping = UPDATING;
			}
		}
	}

	/* set the update request */
	if (left < msmfb->update_info.left)
		msmfb->update_info.left = left;
	if (top < msmfb->update_info.top)
		msmfb->update_info.top = top;
	if (eright > msmfb->update_info.eright)
		msmfb->update_info.eright = eright;
	if (ebottom > msmfb->update_info.ebottom)
		msmfb->update_info.ebottom = ebottom;
	DLOG(SHOW_UPDATES, "update queued %d %d %d %d %d\n",
		msmfb->update_info.left, msmfb->update_info.top,
		msmfb->update_info.eright, msmfb->update_info.ebottom,
		msmfb->yoffset);
	spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);

	/* if the panel is all the way on wait for vsync, otherwise sleep
	 * for 16 ms (long enough for the dma to panel) and then begin dma */
	msmfb->vsync_request_time = ktime_get();
	if (panel->request_vsync && (sleeping == AWAKE)) {
		panel->request_vsync(panel, &msmfb->vsync_callback);
	} else {
		if (!hrtimer_active(&msmfb->fake_vsync)) {
			hrtimer_start(&msmfb->fake_vsync,
				      ktime_set(0, NSEC_PER_SEC/60),
				      HRTIMER_MODE_REL);
		}
	}
}
Beispiel #22
0
static void optjoy_spi_work_func(struct work_struct *work)
{
	struct optjoy_drv_data *optjoy_data = container_of(work, struct optjoy_drv_data, work);
	uint8_t nx, ny, mot_val;
	u8 sq,pxsum, shu_up,shu_dn;
	u16 shutter;
	int8_t dx, dy;
	int i;
	unsigned int keycode = 0;	
	bool check_env = false;

	u16 oj_sht_tbl[12]= {0,500,750,1000,1250,1500,1750,2000,2250,2500,2750,3000};
	u8  oj_pxsum_tbl[12] = {0,0,40,50,60,70,80,80,80,90,90,90};
	u8  oj_sq_tbl[12] = {0,0,35,35,35,35,35,35,35,35,35,35};


	/* reading motion */
   	mot_val = optjoy_spi_read_byte(OJT_MOT);
	if(!(mot_val & 0x80) || lock_oj_event)
		return;


   	sq = optjoy_spi_read_byte(OJT_SQ);
   	shu_up = optjoy_spi_read_byte(OJT_SHUTTER_UP);
   	shu_dn = optjoy_spi_read_byte(OJT_SHUTTER_DOWN);
	shutter = (shu_up << 8) | shu_dn;
   	pxsum = optjoy_spi_read_byte(OJT_PIXEL_SUM);
	nx = optjoy_spi_read_byte(OJT_DELT_Y);
	ny = optjoy_spi_read_byte(OJT_DELT_X);

	for(i=1;i<12;i++)
	{
		if( ((oj_sht_tbl[i-1] <= shutter) && (shutter < oj_sht_tbl[i])) && 
		((oj_pxsum_tbl[i]<=pxsum) && (oj_sq_tbl[i] <=sq)) )
		{
			gprintk("[OJ_KEY] valid environment \n");
			check_env = true;
			
			break;
		}

	}

	if(!check_env)
	{
		gprintk("[OJ_KEY] invalid environment \n");
		return;
	}



	dx = (int8_t)nx;
	dy = ((int8_t)ny) * (-1);


	sum_x = sum_x + dx;
	sum_y = sum_y + dy;

	gprintk("dx=%d, dy=%d , sum_x = %d , sum_y = %d \n",dx, dy,sum_x ,sum_y);		
	if(sum_x > SUM_X_THRESHOLD) keycode = SEC_KEY_DOWN;
	else if(sum_x < -SUM_X_THRESHOLD) keycode = SEC_KEY_UP;
	else if(sum_y > SUM_Y_THRESHOLD) keycode = SEC_KEY_LEFT;
	else if(sum_y < -SUM_Y_THRESHOLD) keycode = SEC_KEY_RIGHT;
	else keycode = 0;

	if (keycode) {

		input_report_key(optjoy_data->input_dev, keycode, 1);
		input_report_key(optjoy_data->input_dev, keycode, 0);
		input_sync(optjoy_data->input_dev);

		hrtimer_cancel(&optjoy_data->timer_touchlock); 

		printk("[opt_joy] key code: %d (sum_x: %d, sum_y: %d)\n",keycode,sum_x,sum_y);
		sum_x = sum_y = 0;
		
		sending_oj_event = ACTIVE;

		hrtimer_start(&optjoy_data->timer_touchlock, ktime_set(0,500000000), HRTIMER_MODE_REL);
	}

	if (optjoy_data->use_irq)
		enable_irq(IRQ_OJT_INT);
}
/**
 * shm_write_msg() - write message to shared memory
 * @shrm:	pointer to the shrm device information structure
 * @l2_header:	L2 header
 * @addr:	pointer to the message
 * @length:	length of the message to be written
 *
 * This function is called from net or char interface driver write operation.
 * Prior to calling this function the message is copied from the user space
 * buffer to the kernel buffer. This function based on the l2 header routes
 * the message to the respective channel and FIFO. Then makes a call to the
 * fifo write function where the message is written to the physical device.
 */
int shm_write_msg(struct shrm_dev *shrm, u8 l2_header,
					void *addr, u32 length)
{
	u8 channel = 0;
	int ret;

	dev_dbg(shrm->dev, "%s IN\n", __func__);

	if (boot_state != BOOT_DONE) {
		dev_err(shrm->dev,
				"error after boot done  call this fn\n");
		ret = -ENODEV;
		goto out;
	}

	if ((l2_header == L2_HEADER_ISI) ||
			(l2_header == L2_HEADER_RPC) ||
			(l2_header == L2_HEADER_SECURITY) ||
			(l2_header == L2_HEADER_COMMON_SIMPLE_LOOPBACK) ||
			(l2_header == L2_HEADER_COMMON_ADVANCED_LOOPBACK) ||
			(l2_header == L2_HEADER_IPCCTRL) ||
			(l2_header == L2_HEADER_IPCDATA)) {
		channel = 0;
		if (shrm_common_tx_state == SHRM_SLEEP_STATE)
			shrm_common_tx_state = SHRM_PTR_FREE;
		else if (shrm_common_tx_state == SHRM_IDLE)
			shrm_common_tx_state = SHRM_PTR_FREE;

	} else if ((l2_header == L2_HEADER_AUDIO) ||
			(l2_header == L2_HEADER_AUDIO_SIMPLE_LOOPBACK) ||
			(l2_header == L2_HEADER_AUDIO_ADVANCED_LOOPBACK)) {
		if (shrm_audio_tx_state == SHRM_SLEEP_STATE)
			shrm_audio_tx_state = SHRM_PTR_FREE;
		else if (shrm_audio_tx_state == SHRM_IDLE)
			shrm_audio_tx_state = SHRM_PTR_FREE;

		channel = 1;
	} else {
		ret = -ENODEV;
		goto out;
	}
	ret = shm_write_msg_to_fifo(shrm, channel, l2_header, addr, length);
	if (ret < 0) {
		dev_err(shrm->dev, "write message to fifo failed\n");
		if (ret == -EAGAIN) {
			/* Start a timer so as to handle this gently */
			if(!atomic_read(&fifo_full)) {
				atomic_set(&fifo_full, 1);
				hrtimer_start(&fifo_full_timer, ktime_set(
						FIFO_FULL_TIMEOUT, 0),
						HRTIMER_MODE_REL);
			}
		}
		return ret;
	}
	/*
	 * notify only if new msg copied is the only unread one
	 * otherwise it means that reading process is ongoing
	 */
	if (is_the_only_one_unread_message(shrm, channel, length)) {

		/* Send Message Pending Noitication to CMT */
		if (channel == 0)
			queue_work(shrm->shm_common_ch_wr_wq,
					&shrm->send_ac_msg_pend_notify_0);
		else
			queue_work(shrm->shm_audio_ch_wr_wq,
					&shrm->send_ac_msg_pend_notify_1);

	}

	dev_dbg(shrm->dev, "%s OUT\n", __func__);
	return 0;

out:
	return ret;
}
Beispiel #24
0
static int __devinit optjoy_spi_probe(struct platform_device *pdev)
{
	struct optjoy_drv_data *optjoy_data;
	int ret = 0;

	gprintk("start.\n");

	optjoy_gpio_init();  

	optjoy_workqueue = create_singlethread_workqueue("optjoy_workqueue");  
	if (optjoy_workqueue == NULL){
		printk(KERN_ERR "[optjoy_spi_probe] create_singlethread_workqueue failed.\n");
		ret = -ENOMEM;
		goto err_create_workqueue_failed;
	}

	/* alloc driver data */
	optjoy_data = kzalloc(sizeof(struct optjoy_drv_data), GFP_KERNEL);
	if (!optjoy_data) {
		printk(KERN_ERR "[optjoy_spi_probe] kzalloc error\n");
		ret = -ENOMEM;
		goto err_alloc_data_failed;
	}
  	
	optjoy_data->input_dev = input_allocate_device();
	if (optjoy_data->input_dev == NULL) {
		printk(KERN_ERR "[optjoy_spi_probe] Failed to allocate input device\n");
		ret = -ENOMEM;
		goto err_input_dev_alloc_failed;
	}


	/* workqueue initialize */
	INIT_WORK(&optjoy_data->work, optjoy_spi_work_func);

	optjoy_hw_init();

	optjoy_data->input_dev->name = "optjoy_device";
	//optjoy_data->input_dev->phys = "optjoy_device/input2";

	set_bit(EV_KEY, optjoy_data->input_dev->evbit);

	set_bit(SEC_KEY_LEFT, optjoy_data->input_dev->keybit);
	set_bit(SEC_KEY_RIGHT, optjoy_data->input_dev->keybit);
	set_bit(SEC_KEY_UP, optjoy_data->input_dev->keybit);	
	set_bit(SEC_KEY_DOWN, optjoy_data->input_dev->keybit);

	optjoy_data->input_dev->keycode = optjoy_keycode;

	ret = input_register_device(optjoy_data->input_dev);
	if (ret) {
		printk(KERN_ERR "[optjoy_spi_probe] Unable to register %s input device\n", optjoy_data->input_dev->name);
		goto err_input_register_device_failed;
	}

#if 0  //TEMP
	/* IRQ setting */
	ret = request_irq(IRQ_OJT_INT, optjoy_spi_irq_handler, 0, "optjoy_device", optjoy_data);
	if (!ret) {
		optjoy_data->use_irq = 1;
		gprintk("Start INTERRUPT mode!\n");
	}
	else {
		gprintk(KERN_ERR "[optjoy_spi_probe] unable to request_irq\n");
		optjoy_data->use_irq = 0;
	}       
#else
	optjoy_data->use_irq = 0;
#endif

	/* timer init & start (if not INTR mode...) */
	if (!optjoy_data->use_irq) {
		hrtimer_init(&optjoy_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
		optjoy_data->timer.function = optjoy_spi_timer_func;
		hrtimer_start(&optjoy_data->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
	}

	hrtimer_init(&optjoy_data->timer_touchlock, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	optjoy_data->timer_touchlock.function = optjoy_spi_timer_func_touchlock;
	

#ifdef CONFIG_HAS_EARLYSUSPEND
	optjoy_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 2;
	optjoy_data->early_suspend.suspend = optjoy_spi_early_suspend;
	optjoy_data->early_suspend.resume = optjoy_spi_late_resume;
	register_early_suspend(&optjoy_data->early_suspend);
#endif	/* CONFIG_HAS_EARLYSUSPEND */

	return 0;

err_input_register_device_failed:
	input_free_device(optjoy_data->input_dev);

err_input_dev_alloc_failed:
	kfree(optjoy_data);

err_create_workqueue_failed:
err_alloc_data_failed:
	return ret;
}
Beispiel #25
0
static void mcs6000_work(struct work_struct *work)
{
	int x1=0, y1 = 0;
#ifdef LG_FW_MULTI_TOUCH
	int x2=0, y2 = 0;
	static int pre_x1, pre_x2, pre_y1, pre_y2;
	static unsigned int s_input_type = NON_TOUCHED_STATE;
#endif
	unsigned int input_type;
	unsigned int key_touch;	
	unsigned char read_buf[READ_NUM];

	static int key_pressed = 0;
	static int touch_pressed = 0;

	struct mcs6000_ts_device *dev 
		= container_of(to_delayed_work(work), struct mcs6000_ts_device, work);

	dev->pendown = !gpio_get_value(dev->intr_gpio);

	/* read the registers of MCS6000 IC */
	if ( i2c_smbus_read_i2c_block_data(dev->client, MCS6000_TS_INPUT_INFO, READ_NUM, read_buf) < 0) {
		printk(KERN_ERR "%s touch ic read error\n", __FUNCTION__);
		goto touch_retry;
	}

	input_type = read_buf[0] & 0x0f;
	key_touch = (read_buf[0] & 0xf0) >> 4;

	x1 = y1 =0;
#ifdef LG_FW_MULTI_TOUCH
	x2 = y2 = 0;
#endif
	x1 = (read_buf[1] & 0xf0) << 4;
	y1 = (read_buf[1] & 0x0f) << 8;

	x1 |= read_buf[2];	
	y1 |= read_buf[3];		

#ifdef LG_FW_MULTI_TOUCH
	if(input_type == MULTI_POINT_TOUCH) {
		s_input_type = input_type;
		x2 = (read_buf[5] & 0xf0) << 4;
		y2 = (read_buf[5] & 0x0f) << 8;
		x2 |= read_buf[6];
		y2 |= read_buf[7];
	}
#endif

	if (dev->pendown) { /* touch pressed case */
#ifdef LG_FW_HARDKEY_BLOCK
		if(dev->hardkey_block == 0)
#endif
		if(key_touch && key_pressed != key_touch) {
			if(key_pressed)
				mcs6000_key_event_touch(key_pressed, RELEASED, dev);
			mcs6000_key_event_touch(key_touch, PRESSED, dev);
			key_pressed = key_touch;
		}

		if(input_type) {
			touch_pressed = 1;

#ifdef LG_FW_MULTI_TOUCH
			if(input_type == MULTI_POINT_TOUCH) {
				mcs6000_multi_ts_event_touch(x1, y1, x2, y2, PRESSED, dev);
				pre_x1 = x1;
				pre_y1 = y1;
				pre_x2 = x2;
				pre_y2 = y2;
			}
			else if(input_type == SINGLE_POINT_TOUCH) {
				mcs6000_multi_ts_event_touch(x1, y1, -1, -1, PRESSED, dev);
				s_input_type = SINGLE_POINT_TOUCH;				
			}
#else
			if(input_type == SINGLE_POINT_TOUCH) {
				mcs6000_single_ts_event_touch(x1, y1, PRESSED, dev);
			}
#endif
#ifdef LG_FW_HARDKEY_BLOCK
			dev->hardkey_block = 1;
#endif
		}
	} 
	else { /* touch released case */
		if(key_pressed) {
			mcs6000_key_event_touch(key_pressed, RELEASED, dev);
			key_pressed = 0;
		}

		if(touch_pressed) {
#ifdef LG_FW_MULTI_TOUCH
			if(s_input_type == MULTI_POINT_TOUCH) {
				DMSG("%s: multi touch release...(%d, %d), (%d, %d)\n", __FUNCTION__,pre_x1,pre_y1,pre_x2,pre_y2);
				mcs6000_multi_ts_event_touch(pre_x1, pre_y1, pre_x2, pre_y2, RELEASED, dev);
				s_input_type = NON_TOUCHED_STATE; 
				pre_x1 = -1; pre_y1 = -1; pre_x2 = -1; pre_y2 = -1;
			} else {
				DMSG("%s: single touch release... %d, %d\n", __FUNCTION__, x1, y1);
				mcs6000_multi_ts_event_touch(x1, y1, -1, -1, RELEASED, dev);
			}
#else
			DMSG("%s: single release... %d, %d\n", __FUNCTION__, x1, y1);
			mcs6000_single_ts_event_touch (x1, y1, RELEASED, dev);
			touch_pressed = 0;
#endif
#ifdef LG_FW_HARDKEY_BLOCK
			hrtimer_cancel(&dev->touch_timer);
			hrtimer_start(&dev->touch_timer, ktime_set(0, 800),
			HRTIMER_MODE_REL);
#endif
		}
	}

touch_retry:
	if (dev->pendown) {
		//ret = schedule_delayed_work(&dev->work, msecs_to_jiffies(TS_POLLING_TIME));
		queue_delayed_work(dev->ts_wq, &dev->work,msecs_to_jiffies(TS_POLLING_TIME));
	} else {
		enable_irq(dev->num_irq);
		DMSG("%s: irq enable\n", __FUNCTION__);
	}
}
static void mdp_dma_schedule(struct msm_fb_data_type *mfd, uint32 term)
{
	/*
	 * dma2 configure VSYNC block
	 * vsync supported on Primary LCD only for now
	 */
	int32 mdp_lcd_rd_cnt;
	uint32 usec_wait_time;
	uint32 start_y;

	/*
	 * ToDo: if we can move HRT timer callback to workqueue, we can
	 * move DMA2 power on under mdp_pipe_kickoff().
	 * This will save a power for hrt time wait.
	 * However if the latency for context switch (hrt irq -> workqueue)
	 * is too big, we will miss the vsync timing.
	 */
	if (term == MDP_DMA2_TERM)
		mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);

	mdp_dma2_update_time_in_usec =
	    MDP_KTIME2USEC(mdp_dma2_last_update_time);

	if ((!mfd->ibuf.vsync_enable) || (!mfd->panel_info.lcd.vsync_enable)
	    || (mfd->use_mdp_vsync)) {
		mdp_pipe_kickoff(term, mfd);
		return;
	}
	/* SW vsync logic starts here */

	/* get current rd counter */
	mdp_lcd_rd_cnt = mdp_get_lcd_line_counter(mfd);
	if (mdp_dma2_update_time_in_usec != 0) {
		uint32 num, den;

		/*
		 * roi width boundary calculation to know the size of pixel
		 * width that MDP can send faster or slower than LCD read
		 * pointer
		 */

		num = mdp_last_dma2_update_width * mdp_last_dma2_update_height;
		den =
		    (((mfd->panel_info.lcd.refx100 * mfd->total_lcd_lines) /
		      1000) * (mdp_dma2_update_time_in_usec / 100)) / 1000;

		if (den == 0)
			mfd->vsync_width_boundary[mdp_last_dma2_update_width] =
			    mfd->panel_info.xres + 1;
		else
			mfd->vsync_width_boundary[mdp_last_dma2_update_width] =
			    (int)(num / den);
	}

	if (mfd->vsync_width_boundary[mdp_last_dma2_update_width] >
	    mdp_curr_dma2_update_width) {
		/* MDP wrp is faster than LCD rdp */
		mdp_lcd_rd_cnt += mdp_lcd_rd_cnt_offset_fast;
	} else {
		/* MDP wrp is slower than LCD rdp */
		mdp_lcd_rd_cnt -= mdp_lcd_rd_cnt_offset_slow;
	}

	if (mdp_lcd_rd_cnt < 0)
		mdp_lcd_rd_cnt = mfd->total_lcd_lines + mdp_lcd_rd_cnt;
	else if (mdp_lcd_rd_cnt > mfd->total_lcd_lines)
		mdp_lcd_rd_cnt = mdp_lcd_rd_cnt - mfd->total_lcd_lines - 1;

	/* get wrt pointer position */
	start_y = mfd->ibuf.dma_y;

	/* measure line difference between start_y and rd counter */
	if (start_y > mdp_lcd_rd_cnt) {
		/*
		 * *100 for lcd_ref_hzx100 was already multiplied by 100
		 * *1000000 is for usec conversion
		 */

		if ((start_y - mdp_lcd_rd_cnt) <=
		    mdp_vsync_usec_wait_line_too_short)
			usec_wait_time = 0;
		else
			usec_wait_time =
			    ((start_y -
			      mdp_lcd_rd_cnt) * 1000000) /
			    ((mfd->total_lcd_lines *
			      mfd->panel_info.lcd.refx100) / 100);
	} else {
		if ((start_y + (mfd->total_lcd_lines - mdp_lcd_rd_cnt)) <=
		    mdp_vsync_usec_wait_line_too_short)
			usec_wait_time = 0;
		else
			usec_wait_time =
			    ((start_y +
			      (mfd->total_lcd_lines -
			       mdp_lcd_rd_cnt)) * 1000000) /
			    ((mfd->total_lcd_lines *
			      mfd->panel_info.lcd.refx100) / 100);
	}

	mdp_last_dma2_update_width = mdp_curr_dma2_update_width;
	mdp_last_dma2_update_height = mdp_curr_dma2_update_height;

	if (usec_wait_time == 0) {
		mdp_pipe_kickoff(term, mfd);
	} else {
		ktime_t wait_time;

		wait_time.tv.sec = 0;
		wait_time.tv.nsec = usec_wait_time * 1000;

		if (msm_fb_debug_enabled) {
			vt = ktime_get_real();
			mdp_expected_usec_wait = usec_wait_time;
		}
		hrtimer_start(&mfd->dma_hrtimer, wait_time, HRTIMER_MODE_REL);
	}
}
Beispiel #27
0
static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
	unsigned long rcu_delta_jiffies;
	ktime_t last_update, expires, now;
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
	u64 time_delta;
	int cpu;

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	now = tick_nohz_start_idle(cpu, ts);

	/*
	 * If this cpu is offline and it is the one which updates
	 * jiffies, then give up the assignment and let it be taken by
	 * the cpu which runs the tick timer next. If we don't drop
	 * this here the jiffies might be stale and do_timer() never
	 * invoked.
	 */
	if (unlikely(!cpu_online(cpu))) {
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
		return;
	}

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		return;

	if (need_resched())
		return;

	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
		static int ratelimit;

		if (ratelimit < 10) {
			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
			       (unsigned int) local_softirq_pending());
			ratelimit++;
		}
		return;
	}

	ts->idle_calls++;
	/* Read jiffies and the time when jiffies were updated last */
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;
		time_delta = timekeeping_max_deferment();
	} while (read_seqretry(&xtime_lock, seq));

	if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
	    arch_needs_cpu(cpu)) {
		next_jiffies = last_jiffies + 1;
		delta_jiffies = 1;
	} else {
		/* Get the next timer wheel timer */
		next_jiffies = get_next_timer_interrupt(last_jiffies);
		delta_jiffies = next_jiffies - last_jiffies;
		if (rcu_delta_jiffies < delta_jiffies) {
			next_jiffies = last_jiffies + rcu_delta_jiffies;
			delta_jiffies = rcu_delta_jiffies;
		}
	}
	/*
	 * Do not stop the tick, if we are only one off
	 * or if the cpu is required for rcu
	 */
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	/* Schedule the tick, if we are at least one jiffie off */
	if ((long)delta_jiffies >= 1) {

		/*
		 * If this cpu is the one which updates jiffies, then
		 * give up the assignment and let it be taken by the
		 * cpu which runs the tick timer next, which might be
		 * this cpu as well. If we don't drop this here the
		 * jiffies might be stale and do_timer() never
		 * invoked. Keep track of the fact that it was the one
		 * which had the do_timer() duty last. If this cpu is
		 * the one which had the do_timer() duty last, we
		 * limit the sleep time to the timekeeping
		 * max_deferement value which we retrieved
		 * above. Otherwise we can sleep as long as we want.
		 */
		if (cpu == tick_do_timer_cpu) {
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
			ts->do_timer_last = 1;
		} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
			time_delta = KTIME_MAX;
			ts->do_timer_last = 0;
		} else if (!ts->do_timer_last) {
			time_delta = KTIME_MAX;
		}

		/*
		 * calculate the expiry time for the next timer wheel
		 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
		 * that there is no timer pending or at least extremely
		 * far into the future (12 days for HZ=1000). In this
		 * case we set the expiry to the end of time.
		 */
		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
			/*
			 * Calculate the time delta for the next timer event.
			 * If the time delta exceeds the maximum time delta
			 * permitted by the current clocksource then adjust
			 * the time delta accordingly to ensure the
			 * clocksource does not wrap.
			 */
			time_delta = min_t(u64, time_delta,
					   tick_period.tv64 * delta_jiffies);
		}

		if (time_delta < KTIME_MAX)
			expires = ktime_add_ns(last_update, time_delta);
		else
			expires.tv64 = KTIME_MAX;

		/* Skip reprogram of event if its not changed */
		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
			goto out;

		/*
		 * nohz_stop_sched_tick can be called several times before
		 * the nohz_restart_sched_tick is called. This happens when
		 * interrupts arrive which do not cause a reschedule. In the
		 * first call we save the current tick time, so we can restart
		 * the scheduler tick in nohz_restart_sched_tick.
		 */
		if (!ts->tick_stopped) {
			select_nohz_load_balancer(1);

			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
		}

		ts->idle_sleeps++;

		/* Mark expires */
		ts->idle_expires = expires;

		/*
		 * If the expiration time == KTIME_MAX, then
		 * in this case we simply stop the tick timer.
		 */
		 if (unlikely(expires.tv64 == KTIME_MAX)) {
			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
				hrtimer_cancel(&ts->sched_timer);
			goto out;
		}

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS_PINNED);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if (!tick_program_event(expires, 0))
				goto out;
		/*
		 * We are past the event already. So we crossed a
		 * jiffie boundary. Update jiffies and raise the
		 * softirq.
		 */
		tick_do_update_jiffies64(ktime_get());
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
}
Beispiel #28
0
static void mdp_dma_schedule(struct msm_fb_data_type *mfd, uint32 term)
{
	int32 mdp_lcd_rd_cnt;
	uint32 usec_wait_time;
	uint32 start_y;

	if (term == MDP_DMA2_TERM)
		mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);

	mdp_dma2_update_time_in_usec = ktime_to_us(mdp_dma2_last_update_time);

	if ((!mfd->ibuf.vsync_enable) || (!mfd->panel_info.lcd.vsync_enable)
	    || (mfd->use_mdp_vsync)) {
		mdp_pipe_kickoff(term, mfd);
		return;
	}
	

	
	mdp_lcd_rd_cnt = mdp_get_lcd_line_counter(mfd);
	if (mdp_dma2_update_time_in_usec != 0) {
		uint32 num, den;


		num = mdp_last_dma2_update_width * mdp_last_dma2_update_height;
		den =
		    (((mfd->panel_info.lcd.refx100 * mfd->total_lcd_lines) /
		      1000) * (mdp_dma2_update_time_in_usec / 100)) / 1000;

		if (den == 0)
			mfd->vsync_width_boundary[mdp_last_dma2_update_width] =
			    mfd->panel_info.xres + 1;
		else
			mfd->vsync_width_boundary[mdp_last_dma2_update_width] =
			    (int)(num / den);
	}

	if (mfd->vsync_width_boundary[mdp_last_dma2_update_width] >
	    mdp_curr_dma2_update_width) {
		
		mdp_lcd_rd_cnt += mdp_lcd_rd_cnt_offset_fast;
	} else {
		
		mdp_lcd_rd_cnt -= mdp_lcd_rd_cnt_offset_slow;
	}

	if (mdp_lcd_rd_cnt < 0)
		mdp_lcd_rd_cnt = mfd->total_lcd_lines + mdp_lcd_rd_cnt;
	else if (mdp_lcd_rd_cnt > mfd->total_lcd_lines)
		mdp_lcd_rd_cnt = mdp_lcd_rd_cnt - mfd->total_lcd_lines - 1;

	
	start_y = mfd->ibuf.dma_y;

	
	if (start_y > mdp_lcd_rd_cnt) {

		if ((start_y - mdp_lcd_rd_cnt) <=
		    mdp_vsync_usec_wait_line_too_short)
			usec_wait_time = 0;
		else
			usec_wait_time =
			    ((start_y -
			      mdp_lcd_rd_cnt) * 1000000) /
			    ((mfd->total_lcd_lines *
			      mfd->panel_info.lcd.refx100) / 100);
	} else {
		if ((start_y + (mfd->total_lcd_lines - mdp_lcd_rd_cnt)) <=
		    mdp_vsync_usec_wait_line_too_short)
			usec_wait_time = 0;
		else
			usec_wait_time =
			    ((start_y +
			      (mfd->total_lcd_lines -
			       mdp_lcd_rd_cnt)) * 1000000) /
			    ((mfd->total_lcd_lines *
			      mfd->panel_info.lcd.refx100) / 100);
	}

	mdp_last_dma2_update_width = mdp_curr_dma2_update_width;
	mdp_last_dma2_update_height = mdp_curr_dma2_update_height;

	if (usec_wait_time == 0) {
		mdp_pipe_kickoff(term, mfd);
	} else {
		ktime_t wait_time;

		wait_time = ns_to_ktime(usec_wait_time * 1000);

		if (msm_fb_debug_enabled) {
			vt = ktime_get_real();
			mdp_expected_usec_wait = usec_wait_time;
		}
		hrtimer_start(&mfd->dma_hrtimer, wait_time, HRTIMER_MODE_REL);
	}
}
Beispiel #29
0
int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
	struct gpio_event_info *info, void **data, int func)
{
	int i;
	int err;
	int key_count;
	struct gpio_kp *kp;
	struct gpio_event_matrix_info *mi;

	mi = container_of(info, struct gpio_event_matrix_info, info);
	if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) {
		/* TODO: disable scanning */
		return 0;
	}

	if (func == GPIO_EVENT_FUNC_INIT) {
		if (mi->keymap == NULL ||
		   mi->input_gpios == NULL ||
		   mi->output_gpios == NULL) {
			err = -ENODEV;
			pr_err("gpiomatrix: Incomplete pdata\n");
			goto err_invalid_platform_data;
		}
		key_count = mi->ninputs * mi->noutputs;

		*data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) *
				     BITS_TO_LONGS(key_count), GFP_KERNEL);
		if (kp == NULL) {
			err = -ENOMEM;
			pr_err("gpiomatrix: Failed to allocate private data\n");
			goto err_kp_alloc_failed;
		}
		kp->input_devs = input_devs;
		kp->keypad_info = mi;
		//set_bit(EV_KEY, input_dev->evbit);
		for (i = 0; i < key_count; i++) {
			unsigned short keyentry = mi->keymap[i];
			unsigned short keycode = keyentry & MATRIX_KEY_MASK;
			unsigned short dev = keyentry >> MATRIX_CODE_BITS;
			if (dev >= input_devs->count) {
				pr_err("gpiomatrix: bad device index %d >= "
					"%d for key code %d\n",
					dev, input_devs->count, keycode);
				err = -EINVAL;
				goto err_bad_keymap;
			}

			if (keycode && keycode <= KEY_MAX)
				input_set_capability(input_devs->dev[dev],
							EV_KEY, keycode);
		}

		for (i = 0; i < mi->noutputs; i++) {
			if (gpio_cansleep(mi->output_gpios[i])) {
				pr_err("gpiomatrix: unsupported output gpio %d,"
					" can sleep\n", mi->output_gpios[i]);
				err = -EINVAL;
				goto err_request_output_gpio_failed;
			}
			err = gpio_request(mi->output_gpios[i], "gpio_kp_out");
			if (err) {
				pr_err("gpiomatrix: gpio_request failed for "
					"output %d\n", mi->output_gpios[i]);
				goto err_request_output_gpio_failed;
			}
			if (gpio_cansleep(mi->output_gpios[i])) {
				pr_err("gpiomatrix: unsupported output gpio %d,"
					" can sleep\n", mi->output_gpios[i]);
				err = -EINVAL;
				goto err_output_gpio_configure_failed;
			}
			if (mi->flags & GPIOKPF_DRIVE_INACTIVE)
				err = gpio_direction_output(mi->output_gpios[i],
					!(mi->flags & GPIOKPF_ACTIVE_HIGH));
			else
				err = gpio_direction_input(mi->output_gpios[i]);
			if (err) {
				pr_err("gpiomatrix: gpio_configure failed for "
					"output %d\n", mi->output_gpios[i]);
				goto err_output_gpio_configure_failed;
			}
		}
		for (i = 0; i < mi->ninputs; i++) {
			err = gpio_request(mi->input_gpios[i], "gpio_kp_in");
			if (err) {
				pr_err("gpiomatrix: gpio_request failed for "
					"input %d\n", mi->input_gpios[i]);
				goto err_request_input_gpio_failed;
			}
			err = gpio_direction_input(mi->input_gpios[i]);
			if (err) {
				pr_err("gpiomatrix: gpio_direction_input failed"
					" for input %d\n", mi->input_gpios[i]);
				goto err_gpio_direction_input_failed;
			}
		}
		kp->current_output = mi->noutputs;
		kp->key_state_changed = 1;

		hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
		kp->timer.function = gpio_keypad_timer_func;
		wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp");
		err = gpio_keypad_request_irqs(kp);
		kp->use_irq = err == 0;

		pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for "
			"%s%s in %s mode\n", input_devs->dev[0]->name,
			(input_devs->count > 1) ? "..." : "",
			kp->use_irq ? "interrupt" : "polling");

		if (kp->use_irq)
			wake_lock(&kp->wake_lock);
		hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
		return 0;
	}

	err = 0;
	kp = *data;

	if (kp->use_irq)
		for (i = mi->noutputs - 1; i >= 0; i--)
			free_irq(gpio_to_irq(mi->input_gpios[i]), kp);

	hrtimer_cancel(&kp->timer);
	wake_lock_destroy(&kp->wake_lock);
	for (i = mi->noutputs - 1; i >= 0; i--) {
err_gpio_direction_input_failed:
		gpio_free(mi->input_gpios[i]);
err_request_input_gpio_failed:
		;
	}
	for (i = mi->noutputs - 1; i >= 0; i--) {
err_output_gpio_configure_failed:
		gpio_free(mi->output_gpios[i]);
err_request_output_gpio_failed:
		;
	}
err_bad_keymap:
	kfree(kp);
err_kp_alloc_failed:
err_invalid_platform_data:
	return err;
}
static enum hrtimer_restart tx_timer_func(struct hrtimer *timer)
{
	struct mem_link_device *mld;
	struct link_device *ld;
	struct modem_ctl *mc;
	int i;
	bool need_schedule;
	u16 mask;
	unsigned long flags;

	mld = container_of(timer, struct mem_link_device, tx_timer);
	ld = &mld->link_dev;
	mc = ld->mc;

	need_schedule = false;
	mask = 0;

	spin_lock_irqsave(&mc->lock, flags);

	if (unlikely(!ipc_active(mld)))
		goto exit;

#ifdef CONFIG_LINK_POWER_MANAGEMENT_WITH_FSM
	if (mld->link_active) {
		if (!mld->link_active(mld)) {
			need_schedule = true;
			goto exit;
		}
	}
#endif

	for (i = 0; i < MAX_SIPC5_DEVICES; i++) {
		struct mem_ipc_device *dev = mld->dev[i];
		int ret;

		if (unlikely(under_tx_flow_ctrl(mld, dev))) {
			ret = check_tx_flow_ctrl(mld, dev);
			if (ret < 0) {
				if (ret == -EBUSY || ret == -ETIME) {
					need_schedule = true;
					continue;
				} else {
					mem_forced_cp_crash(mld);
					need_schedule = false;
					goto exit;
				}
			}
		}

		ret = tx_frames_to_dev(mld, dev);
		if (unlikely(ret < 0)) {
			if (ret == -EBUSY || ret == -ENOSPC) {
				need_schedule = true;
				start_tx_flow_ctrl(mld, dev);
				continue;
			} else {
				mem_forced_cp_crash(mld);
				need_schedule = false;
				goto exit;
			}
		}

		if (ret > 0)
			mask |= msg_mask(dev);

		if (!skb_queue_empty(dev->skb_txq))
			need_schedule = true;
	}

	if (!need_schedule) {
		for (i = 0; i < MAX_SIPC5_DEVICES; i++) {
			if (!txq_empty(mld->dev[i])) {
				need_schedule = true;
				break;
			}
		}
	}

	if (mask)
		send_ipc_irq(mld, mask2int(mask));

exit:
	if (need_schedule) {
		ktime_t ktime = ktime_set(0, ms2ns(TX_PERIOD_MS));
		hrtimer_start(timer, ktime, HRTIMER_MODE_REL);
	}

	spin_unlock_irqrestore(&mc->lock, flags);

	return HRTIMER_NORESTART;
}