int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs, struct gpio_event_info *info, void **data, int func) { int i; int err; int key_count; int phone_call_status; int fm_radio_status; int irq; static int irq_status = 1; struct gpio_kp *kp; struct gpio_event_matrix_info *mi; mi = container_of(info, struct gpio_event_matrix_info, info); if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) { /* TODO: disable scanning */ if (mi->detect_phone_status == 0) { if (func == GPIO_EVENT_FUNC_SUSPEND) irq_status = 0; else irq_status = 1; } else { phone_call_status = gpio_event_get_phone_call_status() & 0x01; fm_radio_status = gpio_event_get_fm_radio_status() & 0x01; KEY_LOGI("%s: mi->ninputs: %d, func&0x01 = %d, phone_call_status=%d, fm_radio_status=%d\n", __func__, mi->ninputs, func & 0x01, phone_call_status, fm_radio_status); if (irq_status != ((func & 0x01) | phone_call_status | fm_radio_status)) { irq_status = ((func & 0x01) | phone_call_status | fm_radio_status); KEY_LOGI("%s: irq_status %d \n", __func__, irq_status); } else { KEY_LOGI("%s: irq_status %d, did not change\n", __func__, irq_status); return 0; } } for (i = 0; i < mi->ninputs; i++) { irq = gpio_to_irq(mi->input_gpios[i]); err = set_irq_wake(irq, irq_status); if (err) KEY_LOGE("gpiomatrix: set_irq_wake failed ,irq_status %d ,for input irq %d,%d\n", irq_status, i, irq); else KEY_LOGD("%s: set ok,irq_status %d, irq %d = %d\n", __func__, irq_status, i, irq); } return 0; } if (func == GPIO_EVENT_FUNC_INIT) { if (mi->keymap == NULL || mi->input_gpios == NULL || mi->output_gpios == NULL) { err = -ENODEV; KEY_LOGE("gpiomatrix: Incomplete pdata\n"); goto err_invalid_platform_data; } key_count = mi->ninputs * mi->noutputs; *data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) * BITS_TO_LONGS(key_count), GFP_KERNEL); if (kp == NULL) { err = -ENOMEM; KEY_LOGE("gpiomatrix: Failed to allocate private data\n"); goto err_kp_alloc_failed; } kp->input_devs = input_devs; kp->keypad_info = mi; for (i = 0; i < key_count; i++) { unsigned short keyentry = mi->keymap[i]; unsigned short keycode = keyentry & MATRIX_KEY_MASK; unsigned short dev = keyentry >> MATRIX_CODE_BITS; if (dev >= input_devs->count) { KEY_LOGE("gpiomatrix: bad device index %d >= " "%d for key code %d\n", dev, input_devs->count, keycode); err = -EINVAL; goto err_bad_keymap; } if (keycode && keycode <= KEY_MAX) input_set_capability(input_devs->dev[dev], EV_KEY, keycode); } #ifndef CONFIG_ARCH_MSM8X60 if (mi->setup_ninputs_gpio) mi->setup_ninputs_gpio(); #else if (mi->setup_matrix_gpio) mi->setup_matrix_gpio(); #endif for (i = 0; i < mi->noutputs; i++) { err = gpio_request(mi->output_gpios[i], "gpio_kp_out"); if (err) { KEY_LOGE("gpiomatrix: gpio_request failed for " "output %d\n", mi->output_gpios[i]); goto err_request_output_gpio_failed; } if (gpio_cansleep(mi->output_gpios[i])) { KEY_LOGE("gpiomatrix: unsupported output gpio %d," " can sleep\n", mi->output_gpios[i]); err = -EINVAL; goto err_output_gpio_configure_failed; } if (mi->flags & GPIOKPF_DRIVE_INACTIVE) err = gpio_direction_output(mi->output_gpios[i], !(mi->flags & GPIOKPF_ACTIVE_HIGH)); else err = gpio_direction_input(mi->output_gpios[i]); if (err) { KEY_LOGE("gpiomatrix: gpio_configure failed for " "output %d\n", mi->output_gpios[i]); goto err_output_gpio_configure_failed; } } for (i = 0; i < mi->ninputs; i++) { err = gpio_request(mi->input_gpios[i], "gpio_kp_in"); if (err) { KEY_LOGE("gpiomatrix: gpio_request failed for " "input %d\n", mi->input_gpios[i]); goto err_request_input_gpio_failed; } err = gpio_direction_input(mi->input_gpios[i]); if (err) { KEY_LOGE("gpiomatrix: gpio_direction_input failed" " for input %d\n", mi->input_gpios[i]); goto err_gpio_direction_input_failed; } } kp->current_output = mi->noutputs; kp->key_state_changed = 1; #ifndef CONFIG_ARCH_MSM8X60 hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); kp->timer.function = gpio_keypad_timer_func; #else km_queue = create_singlethread_workqueue("km_queue"); INIT_WORK(&kp->work, gpio_keypad_timer_func); #endif wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp"); err = gpio_keypad_request_irqs(kp); kp->use_irq = err == 0; #ifndef CONFIG_ARCH_MSM8X60 kp_use_irq = kp->use_irq; #endif KEY_LOGI("GPIO Matrix Keypad Driver: Start keypad matrix for " "%s%s in %s mode\n", input_devs->dev[0]->name, (input_devs->count > 1) ? "..." : "", kp->use_irq ? "interrupt" : "polling"); if (kp->use_irq) wake_lock(&kp->wake_lock); #ifndef CONFIG_ARCH_MSM8X60 hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL); #else queue_work(km_queue, &kp->work); #endif return 0; } err = 0; kp = *data; if (kp->use_irq) for (i = mi->noutputs - 1; i >= 0; i--) free_irq(gpio_to_irq(mi->input_gpios[i]), kp); #ifndef CONFIG_ARCH_MSM8X60 hrtimer_cancel(&kp->timer); #else cancel_work_sync(&kp->work); #endif wake_lock_destroy(&kp->wake_lock); for (i = mi->noutputs - 1; i >= 0; i--) { err_gpio_direction_input_failed: gpio_free(mi->input_gpios[i]); err_request_input_gpio_failed: ; } for (i = mi->noutputs - 1; i >= 0; i--) { err_output_gpio_configure_failed: gpio_free(mi->output_gpios[i]); err_request_output_gpio_failed: ; } err_bad_keymap: kfree(kp); err_kp_alloc_failed: err_invalid_platform_data: return err; }
void shm_ac_read_notif_0_tasklet(unsigned long tasklet_data) { struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data; u32 writer_local_rptr; u32 writer_local_wptr; u32 shared_wptr; unsigned long flags; dev_dbg(shrm->dev, "%s IN\n", __func__); /* Update writer_local_rptrwith shared_rptr */ update_ac_common_local_rptr(shrm); get_writer_pointers(COMMON_CHANNEL, &writer_local_rptr, &writer_local_wptr, &shared_wptr); if (check_modem_in_reset()) { dev_err(shrm->dev, "%s:Modem state reset or unknown\n", __func__); return; } if (boot_state == BOOT_INFO_SYNC) { /* BOOT_RESP sent by APE has been received by CMT */ spin_lock_irqsave(&boot_lock, flags); boot_state = BOOT_DONE; spin_unlock_irqrestore(&boot_lock, flags); dev_info(shrm->dev, "IPC_ISA BOOT_DONE\n"); if (shrm->msr_flag) { #ifdef CONFIG_U8500_SHRM_DEFAULT_NET shrm_start_netdev(shrm->ndev); #endif /* Notification of Modem reinit to SIPC layer */ if (shrm->msr_reinit_cb) shrm->msr_reinit_cb(shrm->msr_cookie); shrm->msr_flag = 0; /* multicast that modem is online */ nl_send_multicast_message(SHRM_NL_STATUS_MOD_ONLINE, GFP_ATOMIC); } } else if (boot_state == BOOT_DONE) { if (writer_local_rptr != writer_local_wptr) { shrm_common_tx_state = SHRM_PTR_FREE; queue_work(shrm->shm_common_ch_wr_wq, &shrm->send_ac_msg_pend_notify_0); } else { shrm_common_tx_state = SHRM_IDLE; #ifdef CONFIG_U8500_SHRM_DEFAULT_NET shrm_restart_netdev(shrm->ndev); #endif } } else { dev_err(shrm->dev, "Invalid boot state\n"); } /* start timer here */ hrtimer_start(&timer, ktime_set(0, 10*NSEC_PER_MSEC), HRTIMER_MODE_REL); atomic_dec(&ac_sleep_disable_count); dev_dbg(shrm->dev, "%s OUT\n", __func__); }
static void mcs6000_work(struct work_struct *work) { int x1=0, y1 = 0; #ifdef LG_FW_MULTI_TOUCH int x2=0, y2 = 0; static int pre_x1, pre_x2, pre_y1, pre_y2; static unsigned int s_input_type = NON_TOUCHED_STATE; #endif unsigned int input_type; unsigned int key_touch; unsigned char read_buf[READ_NUM]; static int key_pressed = 0; static int touch_pressed = 0; struct mcs6000_ts_device *dev = container_of(to_delayed_work(work), struct mcs6000_ts_device, work); dev->pendown = !gpio_get_value(dev->intr_gpio); /* read the registers of MCS6000 IC */ if ( i2c_smbus_read_i2c_block_data(dev->client, MCS6000_TS_INPUT_INFO, READ_NUM, read_buf) < 0) { printk(KERN_ERR "%s touch ic read error\n", __FUNCTION__); goto touch_retry; } input_type = read_buf[0] & 0x0f; key_touch = (read_buf[0] & 0xf0) >> 4; x1 = y1 =0; #ifdef LG_FW_MULTI_TOUCH x2 = y2 = 0; #endif x1 = (read_buf[1] & 0xf0) << 4; y1 = (read_buf[1] & 0x0f) << 8; x1 |= read_buf[2]; y1 |= read_buf[3]; #ifdef LG_FW_MULTI_TOUCH if(input_type == MULTI_POINT_TOUCH) { s_input_type = input_type; x2 = (read_buf[5] & 0xf0) << 4; y2 = (read_buf[5] & 0x0f) << 8; x2 |= read_buf[6]; y2 |= read_buf[7]; } #endif if (dev->pendown) { /* touch pressed case */ #ifdef LG_FW_HARDKEY_BLOCK if(dev->hardkey_block == 0) #endif if(key_touch && key_pressed != key_touch) { if(key_pressed) mcs6000_key_event_touch(key_pressed, RELEASED, dev); mcs6000_key_event_touch(key_touch, PRESSED, dev); key_pressed = key_touch; } if(input_type) { touch_pressed = 1; #ifdef LG_FW_MULTI_TOUCH if(input_type == MULTI_POINT_TOUCH) { mcs6000_multi_ts_event_touch(x1, y1, x2, y2, PRESSED, dev); pre_x1 = x1; pre_y1 = y1; pre_x2 = x2; pre_y2 = y2; } else if(input_type == SINGLE_POINT_TOUCH) { mcs6000_multi_ts_event_touch(x1, y1, -1, -1, PRESSED, dev); s_input_type = SINGLE_POINT_TOUCH; } #else if(input_type == SINGLE_POINT_TOUCH) { mcs6000_single_ts_event_touch(x1, y1, PRESSED, dev); } #endif #ifdef LG_FW_HARDKEY_BLOCK dev->hardkey_block = 1; #endif } } else { /* touch released case */ if(key_pressed) { mcs6000_key_event_touch(key_pressed, RELEASED, dev); key_pressed = 0; } if(touch_pressed) { #ifdef LG_FW_MULTI_TOUCH if(s_input_type == MULTI_POINT_TOUCH) { DMSG("%s: multi touch release...(%d, %d), (%d, %d)\n", __FUNCTION__,pre_x1,pre_y1,pre_x2,pre_y2); mcs6000_multi_ts_event_touch(pre_x1, pre_y1, pre_x2, pre_y2, RELEASED, dev); s_input_type = NON_TOUCHED_STATE; pre_x1 = -1; pre_y1 = -1; pre_x2 = -1; pre_y2 = -1; } else { DMSG("%s: single touch release... %d, %d\n", __FUNCTION__, x1, y1); mcs6000_multi_ts_event_touch(x1, y1, -1, -1, RELEASED, dev); } #else DMSG("%s: single release... %d, %d\n", __FUNCTION__, x1, y1); mcs6000_single_ts_event_touch (x1, y1, RELEASED, dev); touch_pressed = 0; #endif #ifdef LG_FW_HARDKEY_BLOCK hrtimer_cancel(&dev->touch_timer); hrtimer_start(&dev->touch_timer, ktime_set(0, 800), HRTIMER_MODE_REL); #endif } } touch_retry: if (dev->pendown) { //ret = schedule_delayed_work(&dev->work, msecs_to_jiffies(TS_POLLING_TIME)); queue_delayed_work(dev->ts_wq, &dev->work,msecs_to_jiffies(TS_POLLING_TIME)); } else { enable_irq(dev->num_irq); DMSG("%s: irq enable\n", __FUNCTION__); } }
static int ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) #endif { switch (cmd) { case TSPDRV_SET_MAGIC_NUMBER: file->private_data = (void*)TSPDRV_MAGIC_NUMBER; break; case TSPDRV_ENABLE_AMP: if (wake_lock_active(&tspdrv_wakelock)) wake_unlock(&tspdrv_wakelock); wake_lock(&tspdrv_wakelock); ImmVibeSPI_ForceOut_AmpEnable(arg); #ifdef VIBE_RUNTIME_RECORD if (atomic_read(&g_bRuntimeRecord)) { DbgRecord((arg,";------- TSPDRV_ENABLE_AMP ---------\n")); } #else DbgRecorderReset((arg)); DbgRecord((arg,";------- TSPDRV_ENABLE_AMP ---------\n")); #endif break; case TSPDRV_DISABLE_AMP: ImmVibeSPI_ForceOut_AmpDisable(arg); #ifdef VIBE_RUNTIME_RECORD if (atomic_read(&g_bRuntimeRecord)) { DbgRecord((arg,";------- TSPDRV_DISABLE_AMP ---------\n")); } #endif if (wake_lock_active(&tspdrv_wakelock)) wake_unlock(&tspdrv_wakelock); break; case TSPDRV_GET_NUM_ACTUATORS: return NUM_ACTUATORS; case TSPDRV_SET_DBG_LEVEL: { long nDbgLevel; if (0 != copy_from_user((void *)&nDbgLevel, (const void __user *)arg, sizeof(long))) { /* Error copying the data */ DbgOut((DBL_ERROR, "copy_from_user failed to copy debug level data.\n")); return -1; } if (DBL_TEMP <= nDbgLevel && nDbgLevel <= DBL_OVERKILL) { atomic_set(&g_nDebugLevel, nDbgLevel); } else { DbgOut((DBL_ERROR, "Invalid debug level requested, ignored.")); } break; } case TSPDRV_GET_DBG_LEVEL: return atomic_read(&g_nDebugLevel); #ifdef VIBE_RUNTIME_RECORD case TSPDRV_SET_RUNTIME_RECORD_FLAG: { long nRecordFlag; if (0 != copy_from_user((void *)&nRecordFlag, (const void __user *)arg, sizeof(long))) { /* Error copying the data */ DbgOut((DBL_ERROR, "copy_from_user failed to copy runtime record flag.\n")); return -1; } atomic_set(&g_bRuntimeRecord, nRecordFlag); if (nRecordFlag) { int i; for (i=0; i<NUM_ACTUATORS; i++) { DbgRecorderReset((i)); } } break; } case TSPDRV_GET_RUNTIME_RECORD_FLAG: return atomic_read(&g_bRuntimeRecord); case TSPDRV_SET_RUNTIME_RECORD_BUF_SIZE: { long nRecorderBufSize; if (0 != copy_from_user((void *)&nRecorderBufSize, (const void __user *)arg, sizeof(long))) { /* Error copying the data */ DbgOut((DBL_ERROR, "copy_from_user failed to copy recorder buffer size.\n")); return -1; } if (0 == DbgSetRecordBufferSize(nRecorderBufSize)) { DbgOut((DBL_ERROR, "DbgSetRecordBufferSize failed.\n")); return -1; } break; } case TSPDRV_GET_RUNTIME_RECORD_BUF_SIZE: return DbgGetRecordBufferSize(); #endif case TSPDRV_SET_DEVICE_PARAMETER: { device_parameter deviceParam; if (0 != copy_from_user((void *)&deviceParam, (const void __user *)arg, sizeof(deviceParam))) { /* Error copying the data */ DbgOut((DBL_ERROR, "tspdrv: copy_from_user failed to copy kernel parameter data.\n")); return -1; } switch (deviceParam.nDeviceParamID) { case VIBE_KP_CFG_UPDATE_RATE_MS: /* Update the timer period */ g_nTimerPeriodMs = deviceParam.nDeviceParamValue; #ifdef CONFIG_HIGH_RES_TIMERS /* For devices using high resolution timer we need to update the ktime period value */ g_ktTimerPeriod = ktime_set(0, g_nTimerPeriodMs * 1000000); #endif break; case VIBE_KP_CFG_FREQUENCY_PARAM1: case VIBE_KP_CFG_FREQUENCY_PARAM2: case VIBE_KP_CFG_FREQUENCY_PARAM3: case VIBE_KP_CFG_FREQUENCY_PARAM4: case VIBE_KP_CFG_FREQUENCY_PARAM5: case VIBE_KP_CFG_FREQUENCY_PARAM6: #if 0 if (0 > ImmVibeSPI_ForceOut_SetFrequency(deviceParam.nDeviceIndex, deviceParam.nDeviceParamID, deviceParam.nDeviceParamValue)) { DbgOut((DBL_ERROR, "tspdrv: cannot set device frequency parameter.\n")); return -1; } #endif break; } } } return 0; }
static int acer_hs_probe(struct platform_device *pdev) { int ret; printk(KERN_INFO "[ACER-HS]: Registering ACER headset driver\n"); hr = kzalloc(sizeof(struct hs_res), GFP_KERNEL); if (!hr) return -ENOMEM; hr->debounce_time = ktime_set(0, 500000000); /* 500 ms */ INIT_WORK(&short_wq, acer_update_state_work); hr->sdev.name = "acer-hs"; hr->sdev.print_name = acer_hs_print_name; hr->sdev.print_state = acer_hs_print_state; hr->headsetOn = false; ret = switch_dev_register(&hr->sdev); if (ret < 0) { pr_err("switch_dev fail!\n"); goto err_switch_dev_register; } hr->det = HS_DET; ret = gpio_request(hr->det, "hs_detect"); if (ret < 0) { pr_err("request detect gpio fail!\n"); goto err_request_detect_gpio; } /* mic_bias_en - mic bias enable*/ hr->mic_bias_en = MIC_BIAS_EN; ret = gpio_request(hr->mic_bias_en, "MIC BIAS EN"); if (ret) { pr_err("GPIO request for MIC BIAS EN failed\n"); goto err_request_mic_bias_gpio; } /* hph_en_amp - head phone amplifier enable*/ hr->hph_amp_en = HPH_AMP_EN; ret = gpio_request(hr->hph_amp_en, "HPH AMP EN"); if (ret) { pr_err("GPIO request for HPH AMP EN failed!\n"); return ret; } hr->irq = gpio_to_irq(hr->det); if (hr->irq < 0) { ret = hr->irq; pr_err("get hs detect irq num fail!\n"); goto err_get_hs_detect_irq_num_failed; } hrtimer_init(&hr->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hr->timer.function = detect_event_timer_func; ret = request_irq(hr->irq, hs_det_irq, IRQF_TRIGGER_FALLING, "hs_detect", NULL); if (ret < 0) { pr_err("request detect irq fail!\n"); goto err_request_detect_irq; } ret = misc_register(&acer_hs_dev); if(ret) { pr_err("acer_hs_probe: acer_hs_dev register failed!\n"); goto err_acer_hs_dev; } #if 0 /* open this section for debug usage. */ ret = set_irq_wake(hr->irq, 1); if (ret < 0) { pr_info("err_request_detect_irq fail!\n"); goto err_request_detect_irq; } #endif curstate = switch_get_state(&hr->sdev); ACER_HS_DBG("probe done.\n"); return 0; err_request_detect_irq: free_irq(hr->irq, 0); err_get_hs_detect_irq_num_failed: gpio_free(hr->det); err_request_detect_gpio: gpio_free(hr->det); err_request_mic_bias_gpio: gpio_free(hr->mic_bias_en); err_switch_dev_register: pr_err("ACER-HS: Failed to register driver\n"); err_acer_hs_dev: pr_err("ACER-HS: Failed to register MISC acer-hs driver"); return ret; }
int gpio_event_input_func(struct gpio_event_input_devs *input_devs, struct gpio_event_info *info, void **data, int func) { int ret; int i; unsigned long irqflags; struct gpio_event_input_info *di; struct gpio_input_state *ds = *data; struct kobject *keyboard_kobj; di = container_of(info, struct gpio_event_input_info, info); #ifdef CONFIG_POWER_KEY_CLR_RESET gis = di; #endif if (func == GPIO_EVENT_FUNC_SUSPEND) { if (ds->use_irq) for (i = 0; i < di->keymap_size; i++) disable_irq(gpio_to_irq(di->keymap[i].gpio)); #ifndef CONFIG_MFD_MAX8957 hrtimer_cancel(&ds->timer); #endif return 0; } if (func == GPIO_EVENT_FUNC_RESUME) { spin_lock_irqsave(&ds->irq_lock, irqflags); if (ds->use_irq) for (i = 0; i < di->keymap_size; i++) enable_irq(gpio_to_irq(di->keymap[i].gpio)); #ifndef CONFIG_MFD_MAX8957 hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL); #endif spin_unlock_irqrestore(&ds->irq_lock, irqflags); return 0; } if (func == GPIO_EVENT_FUNC_INIT) { if (ktime_to_ns(di->poll_time) <= 0) di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC); *data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) * di->keymap_size, GFP_KERNEL); if (ds == NULL) { ret = -ENOMEM; KEY_LOGE("KEY_ERR: %s: " "Failed to allocate private data\n", __func__); goto err_ds_alloc_failed; } ds->debounce_count = di->keymap_size; ds->input_devs = input_devs; ds->info = di; wake_lock_init(&ds->wake_lock, WAKE_LOCK_SUSPEND, "gpio_input"); #ifdef CONFIG_MFD_MAX8957 wake_lock_init(&ds->key_pressed_wake_lock, WAKE_LOCK_SUSPEND, "pwr_key_pressed"); #endif #ifdef CONFIG_POWER_KEY_CLR_RESET wake_lock_init(&key_reset_clr_wake_lock, WAKE_LOCK_SUSPEND, "gpio_input_pwr_clear"); #endif spin_lock_init(&ds->irq_lock); if (board_build_flag() == 0) ds->debug_log = 0; else ds->debug_log = 1; for (i = 0; i < di->keymap_size; i++) { int dev = di->keymap[i].dev; if (dev >= input_devs->count) { KEY_LOGE("KEY_ERR: %s: bad device " "index %d >= %d for key code %d\n", __func__, dev, input_devs->count, di->keymap[i].code); ret = -EINVAL; goto err_bad_keymap; } input_set_capability(input_devs->dev[dev], di->type, di->keymap[i].code); ds->key_state[i].ds = ds; ds->key_state[i].debounce = DEBOUNCE_UNKNOWN; } for (i = 0; i < di->keymap_size; i++) { ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in"); if (ret) { KEY_LOGE("KEY_ERR: %s: gpio_request " "failed for %d\n", __func__, di->keymap[i].gpio); goto err_gpio_request_failed; } ret = gpio_direction_input(di->keymap[i].gpio); if (ret) { KEY_LOGE("KEY_ERR: %s: " "gpio_direction_input failed for %d\n", __func__, di->keymap[i].gpio); goto err_gpio_configure_failed; } } if (di->setup_input_gpio) di->setup_input_gpio(); #ifdef CONFIG_MFD_MAX8957 ki_queue = create_singlethread_workqueue("ki_queue"); #endif ret = gpio_event_input_request_irqs(ds); keyboard_kobj = kobject_create_and_add("keyboard", NULL); if (keyboard_kobj == NULL) { KEY_LOGE("KEY_ERR: %s: subsystem_register failed\n", __func__); ret = -ENOMEM; return ret; } if (sysfs_create_file(keyboard_kobj, &dev_attr_vol_wakeup.attr)) KEY_LOGE("KEY_ERR: %s: sysfs_create_file " "return %d\n", __func__, ret); wakeup_bitmask = 0; set_wakeup = 0; spin_lock_irqsave(&ds->irq_lock, irqflags); ds->use_irq = ret == 0; KEY_LOGI("GPIO Input Driver: Start gpio inputs for %s%s in %s " "mode\n", input_devs->dev[0]->name, (input_devs->count > 1) ? "..." : "", ret == 0 ? "interrupt" : "polling"); #ifndef CONFIG_MFD_MAX8957 hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ds->timer.function = gpio_event_input_timer_func; hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL); #endif spin_unlock_irqrestore(&ds->irq_lock, irqflags); return 0; } ret = 0; spin_lock_irqsave(&ds->irq_lock, irqflags); #ifndef CONFIG_MFD_MAX8957 hrtimer_cancel(&ds->timer); #endif if (ds->use_irq) { for (i = di->keymap_size - 1; i >= 0; i--) { free_irq(gpio_to_irq(di->keymap[i].gpio), &ds->key_state[i]); } } spin_unlock_irqrestore(&ds->irq_lock, irqflags); for (i = di->keymap_size - 1; i >= 0; i--) { err_gpio_configure_failed: gpio_free(di->keymap[i].gpio); err_gpio_request_failed: ; } err_bad_keymap: wake_lock_destroy(&ds->wake_lock); #ifdef CONFIG_MFD_MAX8957 wake_lock_destroy(&ds->key_pressed_wake_lock); #endif #ifdef CONFIG_POWER_KEY_CLR_RESET wake_lock_destroy(&key_reset_clr_wake_lock); #endif kfree(ds); err_ds_alloc_failed: return ret; }
static void cc2520_sack_start_timer() { ktime_t kt; kt = ktime_set(0, 1000 * ack_timeout); hrtimer_start(&timeout_timer, kt, HRTIMER_MODE_REL); }
tEplKernel EplTimerHighReskModifyTimerNs(tEplTimerHdl *pTimerHdl_p, unsigned long long ullTimeNs_p, tEplTimerkCallback pfnCallback_p, unsigned long ulArgument_p, BOOL fContinuously_p) { tEplKernel Ret; unsigned int uiIndex; tEplTimerHighReskTimerInfo *pTimerInfo; ktime_t RelTime; Ret = kEplSuccessful; // check pointer to handle if (pTimerHdl_p == NULL) { Ret = kEplTimerInvalidHandle; goto Exit; } if (*pTimerHdl_p == 0) { // no timer created yet // search free timer info structure pTimerInfo = &EplTimerHighReskInstance_l.m_aTimerInfo[0]; for (uiIndex = 0; uiIndex < TIMER_COUNT; uiIndex++, pTimerInfo++) { if (pTimerInfo->m_EventArg.m_TimerHdl == 0) { // free structure found break; } } if (uiIndex >= TIMER_COUNT) { // no free structure found Ret = kEplTimerNoTimerCreated; goto Exit; } pTimerInfo->m_EventArg.m_TimerHdl = HDL_INIT(uiIndex); } else { uiIndex = HDL_TO_IDX(*pTimerHdl_p); if (uiIndex >= TIMER_COUNT) { // invalid handle Ret = kEplTimerInvalidHandle; goto Exit; } pTimerInfo = &EplTimerHighReskInstance_l.m_aTimerInfo[uiIndex]; } /* * increment timer handle * (if timer expires right after this statement, the user * would detect an unknown timer handle and discard it) */ pTimerInfo->m_EventArg.m_TimerHdl = HDL_INC(pTimerInfo->m_EventArg.m_TimerHdl); *pTimerHdl_p = pTimerInfo->m_EventArg.m_TimerHdl; // reject too small time values if ((fContinuously_p && (ullTimeNs_p < TIMER_MIN_VAL_CYCLE)) || (!fContinuously_p && (ullTimeNs_p < TIMER_MIN_VAL_SINGLE))) { Ret = kEplTimerNoTimerCreated; goto Exit; } pTimerInfo->m_EventArg.m_ulArg = ulArgument_p; pTimerInfo->m_pfnCallback = pfnCallback_p; pTimerInfo->m_fContinuously = fContinuously_p; pTimerInfo->m_ullPeriod = ullTimeNs_p; /* * HRTIMER_MODE_REL does not influence general handling of this timer. * It only sets relative mode for this start operation. * -> Expire time is calculated by: Now + RelTime * hrtimer_start also skips pending timer events. * The state HRTIMER_STATE_CALLBACK is ignored. * We have to cope with that in our callback function. */ RelTime = ktime_add_ns(ktime_set(0, 0), ullTimeNs_p); hrtimer_start(&pTimerInfo->m_Timer, RelTime, HRTIMER_MODE_REL); Exit: return Ret; }
void netmap_mitigation_start(struct nm_generic_mit *mit) { hrtimer_start(&mit->mit_timer, ktime_set(0, netmap_generic_mit), HRTIMER_MODE_REL); }
static enum hrtimer_restart ev3_output_port_timer_callback(struct hrtimer *timer) { struct ev3_output_port_data *data = container_of(timer, struct ev3_output_port_data, timer); enum motor_type prev_motor_type = data->motor_type; unsigned new_pin_state_flags = 0; unsigned new_pin5_mv = 0; hrtimer_forward_now(timer, ktime_set(0, OUTPUT_PORT_POLL_NS)); data->timer_loop_cnt++; switch(data->con_state) { case CON_STATE_INIT: if (!data->motor) { ev3_output_port_float(data); data->timer_loop_cnt = 0; data->motor_type = MOTOR_NONE; data->con_state = CON_STATE_INIT_SETTLE; } break; case CON_STATE_INIT_SETTLE: if (data->timer_loop_cnt >= SETTLE_CNT) { data->timer_loop_cnt = 0; data->con_state = CON_STATE_NO_DEV; } break; case CON_STATE_NO_DEV: new_pin5_mv = legoev3_analog_out_pin5_value(data->analog, data->id); if (gpio_get_value(data->gpio[GPIO_PIN6_DIR].gpio)) new_pin_state_flags |= BIT(PIN_STATE_FLAG_PIN6_HIGH); if ((new_pin5_mv < PIN5_BALANCE_LOW) || (new_pin5_mv > PIN5_BALANCE_HIGH)) new_pin_state_flags |= BIT(PIN_STATE_FLAG_PIN5_LOADED); if (new_pin_state_flags != data->pin_state_flags) { data->pin_state_flags = new_pin_state_flags; data->timer_loop_cnt = 0; } if (data->pin_state_flags && (data->timer_loop_cnt >= ADD_CNT)) { data->pin5_float_mv = new_pin5_mv; data->timer_loop_cnt = 0; gpio_direction_output(data->gpio[GPIO_PIN6_DIR].gpio, 0); data->con_state = CON_STATE_PIN6_SETTLE; } break; case CON_STATE_PIN6_SETTLE: new_pin5_mv = legoev3_analog_out_pin5_value(data->analog, data->id); if (data->timer_loop_cnt >= SETTLE_CNT) { data->pin5_low_mv = new_pin5_mv; data->timer_loop_cnt = 0; gpio_direction_input(data->gpio[GPIO_PIN6_DIR].gpio); data->con_state = CON_STATE_CONNECTED; } break; case CON_STATE_CONNECTED: /* * Make a temporary variable that we can use to determine the relative * difference between pin5_float_mv and pin5_low_mv */ new_pin5_mv = ADC_REF + data->pin5_float_mv - data->pin5_low_mv; if ((new_pin5_mv > (ADC_REF - 50)) && (new_pin5_mv < (ADC_REF + 50))) { // The pin5 values are the same, let's see what we have! if ((data->pin5_float_mv >= PIN5_BALANCE_LOW) && (data->pin5_float_mv <= PIN5_BALANCE_HIGH) && (data->pin_state_flags & (0x01 << PIN_STATE_FLAG_PIN6_HIGH))) { /* NXT TOUCH SENSOR, NXT SOUND SENSOR or NEW UART SENSOR */ data->motor_type = MOTOR_ERR; data->con_state = CON_STATE_WAITING_FOR_DISCONNECT; } else if (data->pin5_float_mv < PIN5_NEAR_GND) { /* NEW DUMB SENSOR */ data->motor_type = MOTOR_ERR; data->con_state = CON_STATE_WAITING_FOR_DISCONNECT; } else if ((data->pin5_float_mv >= PIN5_LIGHT_LOW) && (data->pin5_float_mv <= PIN5_LIGHT_HIGH)) { /* NXT LIGHT SENSOR */ data->motor_type = MOTOR_ERR; data->con_state = CON_STATE_WAITING_FOR_DISCONNECT; } else if ((data->pin5_float_mv >= PIN5_IIC_LOW) && (data->pin5_float_mv <= PIN5_IIC_HIGH)) { /* NXT IIC SENSOR */ data->motor_type = MOTOR_ERR; data->con_state = CON_STATE_WAITING_FOR_DISCONNECT; } else if (data->pin5_float_mv < PIN5_BALANCE_LOW) { data->motor_type = MOTOR_TACHO; if (data->pin5_float_mv > PIN5_MINITACHO_HIGH2) { data->motor_id = LEGO_EV3_LARGE_MOTOR; } else if (data->pin5_float_mv > PIN5_MINITACHO_LOW2) { data->motor_id = LEGO_EV3_MEDIUM_MOTOR; } else { data->motor_id = LEGO_EV3_LARGE_MOTOR; } data->con_state = CON_STATE_DEVICE_CONNECTED; } else { gpio_direction_output(data->gpio[GPIO_PIN5].gpio, 1); data->timer_loop_cnt = 0; data->con_state = CON_STATE_PIN5_SETTLE; } /* Value5Float is NOT equal to Value5Low */ } else if ((data->pin5_float_mv > PIN5_NEAR_GND) && (data->pin5_float_mv < PIN5_BALANCE_LOW)) { /* NEW ACTUATOR */ data->motor_type = MOTOR_ERR; data->con_state = CON_STATE_WAITING_FOR_DISCONNECT; } else { data->motor_type = MOTOR_ERR; data->con_state = CON_STATE_WAITING_FOR_DISCONNECT; } break; case CON_STATE_PIN5_SETTLE: /* Update connection type, may need to force pin5 low to determine motor type */ if (data->timer_loop_cnt >= SETTLE_CNT) { data->pin5_low_mv = legoev3_analog_out_pin5_value(data->analog, data->id); data->timer_loop_cnt = 0; gpio_direction_output(data->gpio[GPIO_PIN5].gpio, 0); if (data->pin5_low_mv < PIN5_MINITACHO_LOW1) { data->motor_type = MOTOR_ERR; } else { data->motor_type = MOTOR_TACHO; if (data->pin5_low_mv < PIN5_MINITACHO_HIGH1) data->motor_id = LEGO_EV3_MEDIUM_MOTOR; else data->motor_id = LEGO_EV3_LARGE_MOTOR; } data->con_state = CON_STATE_DEVICE_CONNECTED; } break; case CON_STATE_DEVICE_CONNECTED: data->timer_loop_cnt = 0; if (data->motor_type != MOTOR_ERR && !work_busy(&data->work)) { INIT_WORK(&data->work, ev3_output_port_register_motor); schedule_work(&data->work); data->con_state = CON_STATE_WAITING_FOR_DISCONNECT; } break; case CON_STATE_WAITING_FOR_DISCONNECT: new_pin5_mv = legoev3_analog_out_pin5_value(data->analog, data->id); if ((new_pin5_mv < PIN5_BALANCE_LOW) || (new_pin5_mv > PIN5_BALANCE_HIGH)) data->timer_loop_cnt = 0; if ((data->timer_loop_cnt >= REMOVE_CNT) && !work_busy(&data->work) && data) { INIT_WORK(&data->work, ev3_output_port_unregister_motor); schedule_work(&data->work); data->con_state = CON_STATE_INIT; } break; default: data->con_state = CON_STATE_INIT; break; } /* * data->tacho_motor_type determines the status for the lego-port class * so we need to trigger a change uevent when it changes. */ if (prev_motor_type != data->motor_type) schedule_work(&data->change_uevent_work); return HRTIMER_RESTART; }
struct lego_port_device *ev3_output_port_register(struct ev3_output_port_platform_data *pdata, struct device *parent) { struct ev3_output_port_data *data; struct pwm_device *pwm; int err; if (WARN(!pdata, "Platform data is required.")) return ERR_PTR(-EINVAL); data = kzalloc(sizeof(struct ev3_output_port_data), GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); data->id = pdata->id; data->analog = get_legoev3_analog(); if (IS_ERR(data->analog)) { dev_err(parent, "Could not get legoev3-analog device.\n"); err = PTR_ERR(data->analog); goto err_request_legoev3_analog; } data->gpio[GPIO_PIN1].gpio = pdata->pin1_gpio; data->gpio[GPIO_PIN1].flags = GPIOF_IN; data->gpio[GPIO_PIN1].label = "pin1"; data->gpio[GPIO_PIN2].gpio = pdata->pin2_gpio; data->gpio[GPIO_PIN2].flags = GPIOF_IN; data->gpio[GPIO_PIN2].label = "pin2"; data->gpio[GPIO_PIN5].gpio = pdata->pin5_gpio; data->gpio[GPIO_PIN5].flags = GPIOF_IN; data->gpio[GPIO_PIN5].label = "pin5"; data->gpio[GPIO_PIN5_INT].gpio = pdata->pin5_int_gpio; data->gpio[GPIO_PIN5_INT].flags = GPIOF_IN; data->gpio[GPIO_PIN5_INT].label = "pin5_tacho"; data->gpio[GPIO_PIN6_DIR].gpio = pdata->pin6_dir_gpio; data->gpio[GPIO_PIN6_DIR].flags = GPIOF_IN; data->gpio[GPIO_PIN6_DIR].label = "pin6"; err = gpio_request_array(data->gpio, ARRAY_SIZE(data->gpio)); if (err) { dev_err(parent, "Requesting GPIOs failed.\n"); goto err_gpio_request_array; } data->out_port.name = ev3_output_port_type.name; snprintf(data->out_port.port_name, LEGO_PORT_NAME_SIZE, "out%c", data->id + 'A'); pwm = pwm_get(NULL, data->out_port.port_name); if (IS_ERR(pwm)) { dev_err(parent, "Could not get pwm! (%ld)\n", PTR_ERR(pwm)); err = PTR_ERR(pwm); goto err_pwm_get; } err = pwm_config(pwm, 0, NSEC_PER_SEC / 10000); if (err) { dev_err(parent, "Failed to set pwm duty percent and frequency! (%d)\n", err); goto err_pwm_config; } err = pwm_enable(pwm); if (err) { dev_err(parent, "Failed to start pwm! (%d)\n", err); goto err_pwm_start; } /* This lets us set the pwm duty cycle in an atomic context */ pm_runtime_irq_safe(pwm->chip->dev); data->pwm = pwm; data->out_port.num_modes = NUM_EV3_OUTPUT_PORT_MODE; data->out_port.mode_info = legoev3_output_port_mode_info; data->out_port.set_mode = ev3_output_port_set_mode; data->out_port.set_device = ev3_output_port_set_device; data->out_port.get_status = ev3_output_port_get_status; data->out_port.dc_motor_ops = &ev3_output_port_motor_ops; data->out_port.context = data; err = lego_port_register(&data->out_port, &ev3_output_port_type, parent); if (err) { dev_err(parent, "Failed to register lego_port_device. (%d)\n", err); goto err_lego_port_register; } INIT_WORK(&data->change_uevent_work, ev3_output_port_change_uevent_work); INIT_WORK(&data->work, NULL); data->con_state = CON_STATE_INIT; hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); data->timer.function = ev3_output_port_timer_callback; hrtimer_start(&data->timer, ktime_set(0, OUTPUT_PORT_POLL_NS), HRTIMER_MODE_REL); return &data->out_port; err_lego_port_register: pwm_disable(pwm); err_pwm_start: err_pwm_config: pwm_put(pwm); err_pwm_get: gpio_free_array(data->gpio, ARRAY_SIZE(data->gpio)); err_gpio_request_array: put_legoev3_analog(data->analog); err_request_legoev3_analog: kfree(data); return ERR_PTR(err); }
static enum hrtimer_restart tx_timer_func(struct hrtimer *timer) { struct mem_link_device *mld; struct link_device *ld; struct modem_ctl *mc; int i; bool need_schedule; u16 mask; unsigned long flags; mld = container_of(timer, struct mem_link_device, tx_timer); ld = &mld->link_dev; mc = ld->mc; need_schedule = false; mask = 0; spin_lock_irqsave(&mc->lock, flags); if (unlikely(!ipc_active(mld))) goto exit; #ifdef CONFIG_LINK_POWER_MANAGEMENT_WITH_FSM if (mld->link_active) { if (!mld->link_active(mld)) { need_schedule = true; goto exit; } } #endif for (i = 0; i < MAX_SIPC5_DEVICES; i++) { struct mem_ipc_device *dev = mld->dev[i]; int ret; if (unlikely(under_tx_flow_ctrl(mld, dev))) { ret = check_tx_flow_ctrl(mld, dev); if (ret < 0) { if (ret == -EBUSY || ret == -ETIME) { need_schedule = true; continue; } else { mem_forced_cp_crash(mld); need_schedule = false; goto exit; } } } ret = tx_frames_to_dev(mld, dev); if (unlikely(ret < 0)) { if (ret == -EBUSY || ret == -ENOSPC) { need_schedule = true; start_tx_flow_ctrl(mld, dev); continue; } else { mem_forced_cp_crash(mld); need_schedule = false; goto exit; } } if (ret > 0) mask |= msg_mask(dev); if (!skb_queue_empty(dev->skb_txq)) need_schedule = true; } if (!need_schedule) { for (i = 0; i < MAX_SIPC5_DEVICES; i++) { if (!txq_empty(mld->dev[i])) { need_schedule = true; break; } } } if (mask) send_ipc_irq(mld, mask2int(mask)); exit: if (need_schedule) { ktime_t ktime = ktime_set(0, ms2ns(TX_PERIOD_MS)); hrtimer_start(timer, ktime, HRTIMER_MODE_REL); } spin_unlock_irqrestore(&mc->lock, flags); return HRTIMER_NORESTART; }
/* Do state change timing delay. */ static void at86rf230_async_state_delay(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; struct at86rf2xx_chip_data *c = lp->data; bool force = false; ktime_t tim; /* The force state changes are will show as normal states in the * state status subregister. We change the to_state to the * corresponding one and remember if it was a force change, this * differs if we do a state change from STATE_BUSY_RX_AACK. */ switch (ctx->to_state) { case STATE_FORCE_TX_ON: ctx->to_state = STATE_TX_ON; force = true; break; case STATE_FORCE_TRX_OFF: ctx->to_state = STATE_TRX_OFF; force = true; break; default: break; } switch (ctx->from_state) { case STATE_TRX_OFF: switch (ctx->to_state) { case STATE_RX_AACK_ON: tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC); /* state change from TRX_OFF to RX_AACK_ON to do a * calibration, we need to reset the timeout for the * next one. */ lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; goto change; case STATE_TX_ARET_ON: case STATE_TX_ON: tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC); /* state change from TRX_OFF to TX_ON or ARET_ON to do * a calibration, we need to reset the timeout for the * next one. */ lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; goto change; default: break; } break; case STATE_BUSY_RX_AACK: switch (ctx->to_state) { case STATE_TRX_OFF: case STATE_TX_ON: /* Wait for worst case receiving time if we * didn't make a force change from BUSY_RX_AACK * to TX_ON or TRX_OFF. */ if (!force) { tim = ktime_set(0, (c->t_frame + c->t_p_ack) * NSEC_PER_USEC); goto change; } break; default: break; } break; /* Default value, means RESET state */ case STATE_P_ON: switch (ctx->to_state) { case STATE_TRX_OFF: tim = ktime_set(0, c->t_reset_to_off * NSEC_PER_USEC); goto change; default: break; } break; default: break; } /* Default delay is 1us in the most cases */ udelay(1); at86rf230_async_state_timer(&ctx->timer); return; change: hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL); }
int mma8452_update_odr(struct gs_mma8452_data *mma, int poll_interval_ms) { int err = -1; int i; u8 val; unsigned long interval; interval=poll_interval_ms*1000; for (i = ARRAY_SIZE(mma8452_odr_table) - 1; i >= 0; i--) { if (mma8452_odr_table[i].cutoff_us <= interval) break; } if(i<0) i=0; //if(mma->mma_status.active ==MMA_ACTIVED) if(atomic_read(&mma->chip_enabled)) { hrtimer_cancel(&mma->timer); } val = i2c_smbus_read_byte_data(mma->client, MMA8452_CTRL_REG1); if (val & 0x01) { /*if the chip was actived,set it as standby*/ val &= 0xc6; err = i2c_smbus_write_byte_data(mma->client, MMA8452_CTRL_REG1, val); if (err) { dev_err(&mma->client->dev, "%s: disable write error.\n", __func__); goto error; } } val&=0xC7; val|= mma8452_odr_table[i].mask; err = i2c_smbus_write_byte_data(mma->client, MMA8452_CTRL_REG1,val); if(err<0) { dev_err(&mma->client->dev, "%s: odr write error.\n", __func__); goto error; } //if(mma->mma_status.active == MMA_ACTIVED) if(atomic_read(&mma->chip_enabled)) { val=i2c_smbus_read_byte_data(mma->client, MMA8452_CTRL_REG1); err=i2c_smbus_write_byte_data(mma->client, MMA8452_CTRL_REG1, val | 0x01); if(err) { dev_err(&mma->client->dev, "%s: enable write error.\n", __func__); goto error; } hrtimer_start(&mma->timer, ktime_set(0, NORMAL_TM), HRTIMER_MODE_REL); } return err; error: dev_err(&mma->client->dev, "update odr failed %d\n",err); return err; }
static int gp2a_opt_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err = 0; int i; #if USE_INTERRUPT int irq; #endif int config; int ret; int a; struct gp2a_data *gp2a; #ifdef STM_DEBUG printk(KERN_INFO "%s\n",__FUNCTION__); #endif #if defined(CONFIG_MACH_VASTO) vreg_proximity = vreg_get(NULL, "vcama"); ret = vreg_set_level(vreg_proximity, 3000); // 2800 -> 3000 H/W requeset if (ret) { printk(KERN_ERR "%s: vreg set level failed (%d)\n", __func__, ret); return -EIO; } ret = vreg_enable(vreg_proximity); if (ret) { printk(KERN_ERR "%s: vreg enable failed (%d)\n", __func__, ret); return -EIO; } #else if( board_hw_revision < 3 ) { vreg_proximity = vreg_get(0, "vcama"); if (IS_ERR(vreg_proximity)) { printk("===== [PROXIMITY] proximity IS_ERR TEST =====\n"); return PTR_ERR(vreg_proximity); } vreg_set_level(vreg_proximity, 12); // set to 3.0V voltage vreg_enable(vreg_proximity); // voltage } else { gpio_set_value(VIR_LED_EN, 1); } #endif if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { printk(KERN_INFO "[GP2A] i2c_check_functionality error\n"); err = -ENODEV; goto exit; } if ( !i2c_check_functionality(client->adapter,I2C_FUNC_SMBUS_BYTE_DATA) ) { printk(KERN_INFO "[GP2A] byte op is not permited.\n"); goto exit; } /* OK. For now, we presume we have a valid client. We now create the client structure, even though we cannot fill it completely yet. */ if (!(gp2a = kzalloc(sizeof(struct gp2a_data), GFP_KERNEL))) { err = -ENOMEM; goto exit; } memset(gp2a, 0, sizeof(struct gp2a_data)); gp2a->client = client; i2c_set_clientdata(client, gp2a); opt_i2c_client = client; if (i2c_smbus_read_byte(client) < 0) { printk(KERN_ERR "[GP2A] i2c_smbus_read_byte error!!\n"); goto exit_kfree; } else { printk("GP2A Device detected!\n"); } printk("[%s] slave addr = %x\n", __func__, client->addr); /* Input device Settings */ if(USE_INPUT_DEVICE) { gp2a->input_dev = input_allocate_device(); if (gp2a->input_dev == NULL) { pr_err("Failed to allocate input device\n"); return -ENOMEM; } gp2a->input_dev->name = "proximity"; set_bit(EV_SYN,gp2a->input_dev->evbit); set_bit(EV_ABS,gp2a->input_dev->evbit); input_set_abs_params(gp2a->input_dev, ABS_DISTANCE, 0, 1, 0, 0); err = input_register_device(gp2a->input_dev); if (err) { pr_err("Unable to register %s input device\n", gp2a->input_dev->name); input_free_device(gp2a->input_dev); kfree(gp2a); return -1; } } #if USE_INTERRUPT /* WORK QUEUE Settings */ gp2a_wq = create_singlethread_workqueue("gp2a_wq"); if (!gp2a_wq) return -ENOMEM; INIT_WORK(&gp2a->work_prox, gp2a_work_func_prox); gprintk("Workqueue Settings complete\n"); #endif /* misc device Settings */ err = misc_register(&proximity_device); if(err) { pr_err(KERN_ERR "misc_register failed - prox \n"); } /* wake lock init */ wake_lock_init(&prx_wake_lock, WAKE_LOCK_SUSPEND, "prx_wake_lock"); /* set sysfs for light sensor */ proxsensor_class = class_create(THIS_MODULE, "proxsensor"); if (IS_ERR(proxsensor_class)) pr_err("Failed to create class(proxsensor)!\n"); switch_cmd_dev = device_create(proxsensor_class, NULL, 0, NULL, "switch_cmd"); if (device_create_file(switch_cmd_dev, &dev_attr_proxsensor_file_state) < 0) pr_err("Failed to create device file(%s)!\n", dev_attr_proxsensor_file_state.attr.name); dev_set_drvdata(switch_cmd_dev,gp2a); /* ktime init */ timeA = ktime_set(0,0); timeB = ktime_set(0,0); /* gpio config */ // set in board file config = GPIO_CFG(GPIO_SENSE_OUT, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA); err = gpio_tlmm_config(config, GPIO_CFG_ENABLE); if (err) printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n", __func__, GPIO_SENSE_OUT, err); //for(a = 0; a < 10 ; a++) //{ /* GP2A Regs INIT SETTINGS */ for(i=1;i<5;i++) { opt_i2c_write((u8)(i),&gp2a_original_image[i]); mdelay(5); mdelay(5); // printk("%d",i); } //} mdelay(2); #if USE_INTERRUPT /* INT Settings */ irq = gpio_to_irq(GPIO_SENSE_OUT); gp2a->irq = -1; set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); err = request_irq(irq, gp2a_irq_handler, IRQF_DISABLED, "gp2a_int", gp2a); if (err) { printk("[GP2A] request_irq failed for gp2a\n"); goto exit_kfree; } printk("[GP2A] register irq = %d\n",irq); err = set_irq_wake(irq, 1); printk("[GP2A] register wakeup source = %d\n",err); if (err) printk("[GP2A] register wakeup source failed\n"); gp2a->irq = irq; gprintk("INT Settings complete\n"); #endif // maintain power-down mode before using sensor gp2a_off(gp2a,ALL); //++ // test for sensor /* printk("[GP2A] curr prox value = %d\n", gpio_get_value(GPIO_SENSE_OUT)); gp2a_on(gp2a,PROXIMITY); printk("[GP2A] curr prox value = %d\n", gpio_get_value(GPIO_SENSE_OUT)); //-- // maintain power-down mode before using sensor //ESD test sleep gp2a_off(gp2a,ALL); */ printk("gp2a_opt_probe is OK!!\n"); return 0; exit_kfree: kfree(gp2a); exit: return err; }
void netmap_mitigation_restart(struct nm_generic_mit *mit) { hrtimer_forward_now(&mit->mit_timer, ktime_set(0, netmap_generic_mit)); }
static int constant_flashlight_ioctl(MUINT32 cmd, MUINT32 arg) { int i4RetValue = 0; int iFlashType = (int)FLASHLIGHT_NONE; int ior; int iow; int iowr; ior = _IOR(FLASHLIGHT_MAGIC,0, int); iow = _IOW(FLASHLIGHT_MAGIC,0, int); iowr = _IOWR(FLASHLIGHT_MAGIC,0, int); PK_DBG("constant_flashlight_ioctl() line=%d cmd=%d, ior=%d, iow=%d iowr=%d arg=%d\n",__LINE__, cmd, ior, iow, iowr, arg); PK_DBG("constant_flashlight_ioctl() line=%d cmd-ior=%d, cmd-iow=%d cmd-iowr=%d arg=%d\n",__LINE__, cmd-ior, cmd-iow, cmd-iowr, arg); switch(cmd) { case FLASH_IOC_SET_TIME_OUT_TIME_MS: PK_DBG("FLASH_IOC_SET_TIME_OUT_TIME_MS: %d\n",arg); g_timeOutTimeMs=arg; break; case FLASH_IOC_SET_DUTY : PK_DBG("FLASHLIGHT_DUTY: %d\n",arg); g_duty=arg; FL_dim_duty(arg); break; case FLASH_IOC_SET_STEP: PK_DBG("FLASH_IOC_SET_STEP: %d\n",arg); g_step=arg; FL_step(arg); break; case FLASH_IOC_SET_ONOFF : PK_DBG("FLASHLIGHT_ONOFF: %d\n",arg); if(arg==1) { if(g_timeOutTimeMs!=0) { ktime_t ktime; ktime = ktime_set( 0, g_timeOutTimeMs*1000000 ); hrtimer_start( &g_timeOutTimer, ktime, HRTIMER_MODE_REL ); } FL_enable(); g_strobe_On=1; } else { FL_disable(); hrtimer_cancel( &g_timeOutTimer ); g_strobe_On=0; } break; case FLASHLIGHTIOC_G_FLASHTYPE: iFlashType = FLASHLIGHT_LED_CONSTANT; if(copy_to_user((void __user *) arg , (void*)&iFlashType , _IOC_SIZE(cmd))) { PK_DBG("[strobe_ioctl] ioctl copy to user failed\n"); return -EFAULT; } break; default : PK_DBG(" No such command \n"); i4RetValue = -EPERM; break; } return i4RetValue; }
static void pm8xxx_vib_enable(struct timed_output_dev *dev, int value) { struct pm8xxx_vib *vib = container_of(dev, struct pm8xxx_vib, timed_dev); unsigned long flags; /* */ int origin_value; #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_REST_POWER struct timeval current_tv; struct timeval interval_tv; #endif #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE int over_ms = vib->overdrive_ms; #endif #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT spin_lock_irqsave(&vib->lock, flags); if (value == 0 && vib->pre_value <= vib->min_timeout_ms) { spin_unlock_irqrestore(&vib->lock, flags); return; } spin_unlock_irqrestore(&vib->lock, flags); #endif /* */ if(unlikely(debug_mask)) printk(KERN_INFO "pm8xxx_vib_enable value:%d\n",value); retry: spin_lock_irqsave(&vib->lock, flags); if (hrtimer_try_to_cancel(&vib->vib_timer) < 0) { spin_unlock_irqrestore(&vib->lock, flags); cpu_relax(); goto retry; } #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE if (hrtimer_try_to_cancel(&vib->vib_overdrive_timer) < 0) { spin_unlock_irqrestore(&vib->lock, flags); cpu_relax(); goto retry; } #endif /* */ origin_value = value; if (value == 0) vib->state = 0; else { /* Set Min Timeout for normal fuction */ #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT value = (value < vib->min_timeout_ms ? vib->min_timeout_ms : value); #endif value = (value > vib->pdata->max_timeout_ms ? vib->pdata->max_timeout_ms : value); vib->state = 1; #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT vib->pre_value = value; #endif #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE if(vib->overdrive_ms > 0 && value <= vib->overdrive_range_ms) { vib->remain_vib_ms = value - over_ms; vib->level = vib->max_level_mv / 100; vib->active_level = vib->request_level; if(unlikely(debug_mask)) printk(KERN_INFO "start overdrive over_level:%d over_ms:%d \n",vib->level,over_ms); #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_REST_POWER do_gettimeofday(¤t_tv); if(vib->vib_state) { // vibrator is working now. struct timeval min_timeout_tv; min_timeout_tv.tv_sec = vib->min_timeout_ms / 1000; min_timeout_tv.tv_usec = (vib->min_timeout_ms % 1000) * 1000; get_timeval_interval(¤t_tv, &(vib->start_tv), &interval_tv); if(unlikely(debug_mask)) { printk(KERN_INFO "vib_state is true, cur:%ld.%06ld, sta:%ld.%06ld, itv:%ld.%06ld\n", current_tv.tv_sec, current_tv.tv_usec, vib->start_tv.tv_sec, vib->start_tv.tv_usec, interval_tv.tv_sec, interval_tv.tv_usec ); } // if greater than min_timeout, no need over drive and min time. if(compare_timeval_interval(&interval_tv, &min_timeout_tv)==1) { value = origin_value; if(unlikely(debug_mask)) printk(KERN_INFO "interval greater than min_timeout, start normal vib %dms\n",value); goto NORMAL_VIB_START; } // if less than min_timeout, need corrected value else { int interval_ms; interval_ms = (interval_tv.tv_sec * 1000) + (interval_tv.tv_usec / 1000000); if(over_ms > interval_ms) { over_ms = over_ms - interval_ms; vib->remain_vib_ms = origin_value; if(unlikely(debug_mask)) printk(KERN_INFO "interval less than min_timeout, start overdrive %dms, remain %dms\n", over_ms, vib->remain_vib_ms); goto OVERDRIVE_VIB_START; } else { value = value - interval_ms; if(unlikely(debug_mask)) printk(KERN_INFO "interval less than min_timeout, start normal vib %dms\n",value); goto NORMAL_VIB_START; } } } else { // vibrator is not working now. struct timeval min_stop_tv; min_stop_tv.tv_sec = vib->min_stop_ms / 1000; min_stop_tv.tv_usec = (vib->min_stop_ms % 1000) * 1000; get_timeval_interval(¤t_tv, &(vib->stop_tv), &interval_tv); if(unlikely(debug_mask)) { printk(KERN_INFO "vib_state is false, cur:%ld.%06ld, sto:%ld.%06ld, itv:%ld.%06ld\n", current_tv.tv_sec, current_tv.tv_usec, vib->stop_tv.tv_sec, vib->stop_tv.tv_usec, interval_tv.tv_sec, interval_tv.tv_usec ); } // if greater than min_stop_tv, start vibration over drive and value. if(compare_timeval_interval(&interval_tv, &min_stop_tv)==1) { if(unlikely(debug_mask)) printk(KERN_INFO "greater than min_stop_timeout, start overdrive %dms, remain %dms\n", over_ms, vib->remain_vib_ms); goto OVERDRIVE_VIB_START; } // if less than min_stop_tv, reduce over drive time. else { int interval_ms; interval_ms = (interval_tv.tv_sec * 1000) + (interval_tv.tv_usec / 1000); over_ms = interval_ms / (vib->min_stop_ms / vib->overdrive_ms) / 2; vib->remain_vib_ms = (value - over_ms) / 2; if(unlikely(debug_mask)) printk(KERN_INFO "less than min_stop_timeout, start overdrive %dms, remain %dms\n", over_ms, vib->remain_vib_ms); goto OVERDRIVE_VIB_START; } } #else goto OVERDRIVE_VIB_START; #endif } else #endif { goto NORMAL_VIB_START; } } NORMAL_VIB_START: #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_VOL vib->level = vib->request_level; #else vib->level = vib->default_level; #endif hrtimer_start(&vib->vib_timer, ktime_set(value / 1000, (value % 1000) * 1000000), HRTIMER_MODE_REL); goto FINISH_VIB_ENABLE; #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE OVERDRIVE_VIB_START: hrtimer_start(&vib->vib_overdrive_timer, ktime_set(over_ms / 1000, (over_ms % 1000) * 1000000), HRTIMER_MODE_REL); #endif FINISH_VIB_ENABLE: spin_unlock_irqrestore(&vib->lock, flags); schedule_work(&vib->work); }
static int lbee9qmb_rfkill_btwake_probe(struct platform_device *pdev) { struct lbee9qmb_platform_data *plat = pdev->dev.platform_data; struct rfkill *rfkill; int rc; int irq; int ret; int host_wake; if (!plat) { dev_err(&pdev->dev, "no platform data\n"); return -ENOSYS; } wake_lock_init(&bt_lpm.bt_wake_lock, WAKE_LOCK_SUSPEND, "bt_wake"); #ifdef BRCM_HOST_WAKE wake_lock_init(&bt_lpm.host_wake_lock, WAKE_LOCK_SUSPEND, "host_wake"); bt_lpm.gpio_host_wake=plat->gpio_hostwake; //spin_lock_init(&bt_lpm.bt_lock); INIT_WORK(&bt_lpm.host_wake_work, brcm_host_wake_work_func); #endif rc = gpio_request(plat->gpio_btwake, "lbee9qmb_reset_btwake"); if (rc < 0) { dev_err(&pdev->dev, "gpio_request failed\n"); return rc; } rfkill = rfkill_alloc("lbee9qmb-rfkill_btwake", &pdev->dev, RFKILL_TYPE_BLUETOOTH, &lbee9qmb_rfkill_btwake_ops, pdev); if (!rfkill) { rc = -ENOMEM; goto fail_gpio; } platform_set_drvdata(pdev, rfkill); gpio_direction_output(plat->gpio_btwake, 1); rc = rfkill_register(rfkill); if (rc < 0) goto fail_alloc; #ifdef BRCM_HOST_WAKE rc = gpio_request(plat->gpio_hostwake, "lbee9qmb_reset_hostwake"); gpio_direction_input(plat->gpio_hostwake); host_wake=gpio_get_value(bt_lpm.gpio_host_wake); irq = gpio_to_irq(plat->gpio_hostwake); bt_lpm.host_wake_irq=irq; #ifdef BRCM_WAKELOCKTIMEOUT hrtimer_init(&bt_lpm.check_hostwakeup_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); bt_lpm.check_hostwakeup_delay = ktime_set(5, 0); /* 5 sec */ bt_lpm.check_hostwakeup_timer.function = check_hostwakeup; set_irq_type(irq, IRQ_TYPE_EDGE_RISING); ret = request_irq(irq, host_wake_isr, 0, "bt host_wake", NULL); #else ret = request_irq(irq, host_wake_isr, IRQF_TRIGGER_HIGH, "bt host_wake", NULL); #endif printk(KERN_ERR "BRCM_LPM: irq=%d ret=%d HOST_WAKE=%d\n",irq,ret,host_wake); #endif return 0; fail_alloc: rfkill_destroy(rfkill); fail_gpio: gpio_free(plat->gpio_btwake); return rc; }
int flashlight_control(int mode) { int ret = 0; uint32_t flash_ns = ktime_to_ns(ktime_get()); #if 0 /* disable flash_adj_value check now */ if (this_fl_str->flash_adj_value == 2) { printk(KERN_WARNING "%s: force disable function!\n", __func__); return -EIO; } #endif spin_lock_irqsave(&this_fl_str->spin_lock, this_fl_str->spinlock_flags); if (this_fl_str->mode_status == FL_MODE_FLASH) { hrtimer_cancel(&this_fl_str->timer); wake_unlock(&this_fl_str->wake_lock); flashlight_turn_off(); } switch (mode) { case FL_MODE_OFF: flashlight_turn_off(); break; case FL_MODE_TORCH: flashlight_hw_command(3, 1); flashlight_hw_command(0, 15); flashlight_hw_command(2, 4); this_fl_str->mode_status = FL_MODE_TORCH; this_fl_str->fl_lcdev.brightness = LED_HALF; break; case FL_MODE_TORCH_LED_A: flashlight_hw_command(3, 1); flashlight_hw_command(0, 15); flashlight_hw_command(2, 3); this_fl_str->mode_status = FL_MODE_TORCH_LED_A; this_fl_str->fl_lcdev.brightness = 1; break; case FL_MODE_TORCH_LED_B: flashlight_hw_command(3, 1); flashlight_hw_command(0, 15); flashlight_hw_command(2, 2); this_fl_str->mode_status = FL_MODE_TORCH_LED_B; this_fl_str->fl_lcdev.brightness = 2; break; case FL_MODE_FLASH: flashlight_hw_command(2, 4); gpio_direction_output(this_fl_str->gpio_flash, 1); this_fl_str->mode_status = FL_MODE_FLASH; this_fl_str->fl_lcdev.brightness = LED_FULL; hrtimer_start(&this_fl_str->timer, ktime_set(this_fl_str->flash_sw_timeout_ms / 1000, (this_fl_str->flash_sw_timeout_ms % 1000) * NSEC_PER_MSEC), HRTIMER_MODE_REL); wake_lock(&this_fl_str->wake_lock); break; case FL_MODE_PRE_FLASH: flashlight_hw_command(3, 1); flashlight_hw_command(0, 9); flashlight_hw_command(2, 4); this_fl_str->mode_status = FL_MODE_PRE_FLASH; this_fl_str->fl_lcdev.brightness = LED_HALF + 1; break; case FL_MODE_TORCH_LEVEL_1: flashlight_hw_command(3, 8); flashlight_hw_command(0, 15); flashlight_hw_command(2, 4); this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_1; this_fl_str->fl_lcdev.brightness = LED_HALF - 2; break; case FL_MODE_TORCH_LEVEL_2: flashlight_hw_command(3, 4); flashlight_hw_command(0, 15); flashlight_hw_command(2, 4); this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_2; this_fl_str->fl_lcdev.brightness = LED_HALF - 1; break; case FL_MODE_DEATH_RAY: pr_info("%s: death ray\n", __func__); hrtimer_cancel(&this_fl_str->timer); gpio_direction_output(this_fl_str->gpio_flash, 0); udelay(40); gpio_direction_output(this_fl_str->gpio_flash, 1); this_fl_str->mode_status = 0; this_fl_str->fl_lcdev.brightness = 3; wake_lock(&this_fl_str->wake_lock); break; default: printk(KERN_ERR "%s: unknown flash_light flags: %d\n", __func__, mode); ret = -EINVAL; break; } printk(KERN_DEBUG "%s: mode: %d, %u\n", FLASHLIGHT_NAME, mode, flash_ns/(1000*1000)); spin_unlock_irqrestore(&this_fl_str->spin_lock, this_fl_str->spinlock_flags); return ret; }
static void msmfb_pan_update(struct fb_info *info, uint32_t left, uint32_t top, uint32_t eright, uint32_t ebottom, uint32_t yoffset, int pan_display) { struct msmfb_info *msmfb = info->par; struct msm_panel_data *panel = msmfb->panel; unsigned long irq_flags; int sleeping; int retry = 1; DLOG(SHOW_UPDATES, "update %d %d %d %d %d %d\n", left, top, eright, ebottom, yoffset, pan_display); restart: spin_lock_irqsave(&msmfb->update_lock, irq_flags); /* if we are sleeping, on a pan_display wait 10ms (to throttle back * drawing otherwise return */ if (msmfb->sleeping == SLEEPING) { DLOG(SUSPEND_RESUME, "drawing while asleep\n"); spin_unlock_irqrestore(&msmfb->update_lock, irq_flags); if (pan_display) wait_event_interruptible_timeout(msmfb->frame_wq, msmfb->sleeping != SLEEPING, HZ/10); return; } sleeping = msmfb->sleeping; /* on a full update, if the last frame has not completed, wait for it */ if ((pan_display && msmfb->frame_requested != msmfb->frame_done) || sleeping == UPDATING) { int ret; spin_unlock_irqrestore(&msmfb->update_lock, irq_flags); ret = wait_event_interruptible_timeout(msmfb->frame_wq, msmfb->frame_done == msmfb->frame_requested && msmfb->sleeping != UPDATING, 5 * HZ); if (ret <= 0 && (msmfb->frame_requested != msmfb->frame_done || msmfb->sleeping == UPDATING)) { if (retry && panel->request_vsync && (sleeping == AWAKE)) { panel->request_vsync(panel, &msmfb->vsync_callback); retry = 0; printk(KERN_WARNING "msmfb_pan_display timeout " "rerequest vsync\n"); } else { printk(KERN_WARNING "msmfb_pan_display timeout " "waiting for frame start, %d %d\n", msmfb->frame_requested, msmfb->frame_done); return; } } goto restart; } msmfb->frame_requested++; /* if necessary, update the y offset, if this is the * first full update on resume, set the sleeping state */ if (pan_display) { msmfb->yoffset = yoffset; if (left == 0 && top == 0 && eright == info->var.xres && ebottom == info->var.yres) { if (sleeping == WAKING) { msmfb->update_frame = msmfb->frame_requested; DLOG(SUSPEND_RESUME, "full update starting\n"); msmfb->sleeping = UPDATING; } } } /* set the update request */ if (left < msmfb->update_info.left) msmfb->update_info.left = left; if (top < msmfb->update_info.top) msmfb->update_info.top = top; if (eright > msmfb->update_info.eright) msmfb->update_info.eright = eright; if (ebottom > msmfb->update_info.ebottom) msmfb->update_info.ebottom = ebottom; DLOG(SHOW_UPDATES, "update queued %d %d %d %d %d\n", msmfb->update_info.left, msmfb->update_info.top, msmfb->update_info.eright, msmfb->update_info.ebottom, msmfb->yoffset); spin_unlock_irqrestore(&msmfb->update_lock, irq_flags); /* if the panel is all the way on wait for vsync, otherwise sleep * for 16 ms (long enough for the dma to panel) and then begin dma */ msmfb->vsync_request_time = ktime_get(); if (panel->request_vsync && (sleeping == AWAKE)) { panel->request_vsync(panel, &msmfb->vsync_callback); } else { if (!hrtimer_active(&msmfb->fake_vsync)) { hrtimer_start(&msmfb->fake_vsync, ktime_set(0, NSEC_PER_SEC/60), HRTIMER_MODE_REL); } } }
static void null_cmd_end_timer(struct nullb_cmd *cmd) { ktime_t kt = ktime_set(0, completion_nsec); hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); }
static enum hrtimer_restart sbd_tx_timer_func(struct hrtimer *timer) { struct mem_link_device *mld; struct link_device *ld; struct modem_ctl *mc; struct sbd_link_device *sl; int i; bool need_schedule; u16 mask; unsigned long flags = 0; mld = container_of(timer, struct mem_link_device, sbd_tx_timer); ld = &mld->link_dev; mc = ld->mc; sl = &mld->sbd_link_dev; need_schedule = false; mask = 0; spin_lock_irqsave(&mc->lock, flags); if (unlikely(!ipc_active(mld))) { spin_unlock_irqrestore(&mc->lock, flags); goto exit; } spin_unlock_irqrestore(&mc->lock, flags); if (mld->link_active) { if (!mld->link_active(mld)) { need_schedule = true; goto exit; } } for (i = 0; i < sl->num_channels; i++) { struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, TX); int ret; ret = tx_frames_to_rb(rb); if (unlikely(ret < 0)) { if (ret == -EBUSY || ret == -ENOSPC) { need_schedule = true; mask = MASK_SEND_DATA; continue; } else { modemctl_notify_event(MDM_CRASH_INVALID_RB); need_schedule = false; goto exit; } } if (ret > 0) mask = MASK_SEND_DATA; if (!skb_queue_empty(&rb->skb_q)) need_schedule = true; } if (!need_schedule) { for (i = 0; i < sl->num_channels; i++) { struct sbd_ring_buffer *rb; rb = sbd_id2rb(sl, i, TX); if (!rb_empty(rb)) { need_schedule = true; break; } } } if (mask) { spin_lock_irqsave(&mc->lock, flags); if (unlikely(!ipc_active(mld))) { spin_unlock_irqrestore(&mc->lock, flags); need_schedule = false; goto exit; } send_ipc_irq(mld, mask2int(mask)); spin_unlock_irqrestore(&mc->lock, flags); } exit: if (need_schedule) { ktime_t ktime = ktime_set(0, ms2ns(TX_PERIOD_MS)); hrtimer_start(timer, ktime, HRTIMER_MODE_REL); } return HRTIMER_NORESTART; }
static void optjoy_spi_work_func(struct work_struct *work) { struct optjoy_drv_data *optjoy_data = container_of(work, struct optjoy_drv_data, work); uint8_t nx, ny, mot_val; u8 sq,pxsum, shu_up,shu_dn; u16 shutter; int8_t dx, dy; int i; unsigned int keycode = 0; bool check_env = false; u16 oj_sht_tbl[12]= {0,500,750,1000,1250,1500,1750,2000,2250,2500,2750,3000}; u8 oj_pxsum_tbl[12] = {0,0,40,50,60,70,80,80,80,90,90,90}; u8 oj_sq_tbl[12] = {0,0,35,35,35,35,35,35,35,35,35,35}; /* reading motion */ mot_val = optjoy_spi_read_byte(OJT_MOT); if(!(mot_val & 0x80) || lock_oj_event) return; sq = optjoy_spi_read_byte(OJT_SQ); shu_up = optjoy_spi_read_byte(OJT_SHUTTER_UP); shu_dn = optjoy_spi_read_byte(OJT_SHUTTER_DOWN); shutter = (shu_up << 8) | shu_dn; pxsum = optjoy_spi_read_byte(OJT_PIXEL_SUM); nx = optjoy_spi_read_byte(OJT_DELT_Y); ny = optjoy_spi_read_byte(OJT_DELT_X); for(i=1;i<12;i++) { if( ((oj_sht_tbl[i-1] <= shutter) && (shutter < oj_sht_tbl[i])) && ((oj_pxsum_tbl[i]<=pxsum) && (oj_sq_tbl[i] <=sq)) ) { gprintk("[OJ_KEY] valid environment \n"); check_env = true; break; } } if(!check_env) { gprintk("[OJ_KEY] invalid environment \n"); return; } dx = (int8_t)nx; dy = ((int8_t)ny) * (-1); sum_x = sum_x + dx; sum_y = sum_y + dy; gprintk("dx=%d, dy=%d , sum_x = %d , sum_y = %d \n",dx, dy,sum_x ,sum_y); if(sum_x > SUM_X_THRESHOLD) keycode = SEC_KEY_DOWN; else if(sum_x < -SUM_X_THRESHOLD) keycode = SEC_KEY_UP; else if(sum_y > SUM_Y_THRESHOLD) keycode = SEC_KEY_LEFT; else if(sum_y < -SUM_Y_THRESHOLD) keycode = SEC_KEY_RIGHT; else keycode = 0; if (keycode) { input_report_key(optjoy_data->input_dev, keycode, 1); input_report_key(optjoy_data->input_dev, keycode, 0); input_sync(optjoy_data->input_dev); hrtimer_cancel(&optjoy_data->timer_touchlock); printk("[opt_joy] key code: %d (sum_x: %d, sum_y: %d)\n",keycode,sum_x,sum_y); sum_x = sum_y = 0; sending_oj_event = ACTIVE; hrtimer_start(&optjoy_data->timer_touchlock, ktime_set(0,500000000), HRTIMER_MODE_REL); } if (optjoy_data->use_irq) enable_irq(IRQ_OJT_INT); }
/** * shm_write_msg() - write message to shared memory * @shrm: pointer to the shrm device information structure * @l2_header: L2 header * @addr: pointer to the message * @length: length of the message to be written * * This function is called from net or char interface driver write operation. * Prior to calling this function the message is copied from the user space * buffer to the kernel buffer. This function based on the l2 header routes * the message to the respective channel and FIFO. Then makes a call to the * fifo write function where the message is written to the physical device. */ int shm_write_msg(struct shrm_dev *shrm, u8 l2_header, void *addr, u32 length) { u8 channel = 0; int ret; dev_dbg(shrm->dev, "%s IN\n", __func__); if (boot_state != BOOT_DONE) { dev_err(shrm->dev, "error after boot done call this fn\n"); ret = -ENODEV; goto out; } if ((l2_header == L2_HEADER_ISI) || (l2_header == L2_HEADER_RPC) || (l2_header == L2_HEADER_SECURITY) || (l2_header == L2_HEADER_COMMON_SIMPLE_LOOPBACK) || (l2_header == L2_HEADER_COMMON_ADVANCED_LOOPBACK) || (l2_header == L2_HEADER_IPCCTRL) || (l2_header == L2_HEADER_IPCDATA)) { channel = 0; if (shrm_common_tx_state == SHRM_SLEEP_STATE) shrm_common_tx_state = SHRM_PTR_FREE; else if (shrm_common_tx_state == SHRM_IDLE) shrm_common_tx_state = SHRM_PTR_FREE; } else if ((l2_header == L2_HEADER_AUDIO) || (l2_header == L2_HEADER_AUDIO_SIMPLE_LOOPBACK) || (l2_header == L2_HEADER_AUDIO_ADVANCED_LOOPBACK)) { if (shrm_audio_tx_state == SHRM_SLEEP_STATE) shrm_audio_tx_state = SHRM_PTR_FREE; else if (shrm_audio_tx_state == SHRM_IDLE) shrm_audio_tx_state = SHRM_PTR_FREE; channel = 1; } else { ret = -ENODEV; goto out; } ret = shm_write_msg_to_fifo(shrm, channel, l2_header, addr, length); if (ret < 0) { dev_err(shrm->dev, "write message to fifo failed\n"); if (ret == -EAGAIN) { /* Start a timer so as to handle this gently */ if(!atomic_read(&fifo_full)) { atomic_set(&fifo_full, 1); hrtimer_start(&fifo_full_timer, ktime_set( FIFO_FULL_TIMEOUT, 0), HRTIMER_MODE_REL); } } return ret; } /* * notify only if new msg copied is the only unread one * otherwise it means that reading process is ongoing */ if (is_the_only_one_unread_message(shrm, channel, length)) { /* Send Message Pending Noitication to CMT */ if (channel == 0) queue_work(shrm->shm_common_ch_wr_wq, &shrm->send_ac_msg_pend_notify_0); else queue_work(shrm->shm_audio_ch_wr_wq, &shrm->send_ac_msg_pend_notify_1); } dev_dbg(shrm->dev, "%s OUT\n", __func__); return 0; out: return ret; }
static int __devinit optjoy_spi_probe(struct platform_device *pdev) { struct optjoy_drv_data *optjoy_data; int ret = 0; gprintk("start.\n"); optjoy_gpio_init(); optjoy_workqueue = create_singlethread_workqueue("optjoy_workqueue"); if (optjoy_workqueue == NULL){ printk(KERN_ERR "[optjoy_spi_probe] create_singlethread_workqueue failed.\n"); ret = -ENOMEM; goto err_create_workqueue_failed; } /* alloc driver data */ optjoy_data = kzalloc(sizeof(struct optjoy_drv_data), GFP_KERNEL); if (!optjoy_data) { printk(KERN_ERR "[optjoy_spi_probe] kzalloc error\n"); ret = -ENOMEM; goto err_alloc_data_failed; } optjoy_data->input_dev = input_allocate_device(); if (optjoy_data->input_dev == NULL) { printk(KERN_ERR "[optjoy_spi_probe] Failed to allocate input device\n"); ret = -ENOMEM; goto err_input_dev_alloc_failed; } /* workqueue initialize */ INIT_WORK(&optjoy_data->work, optjoy_spi_work_func); optjoy_hw_init(); optjoy_data->input_dev->name = "optjoy_device"; //optjoy_data->input_dev->phys = "optjoy_device/input2"; set_bit(EV_KEY, optjoy_data->input_dev->evbit); set_bit(SEC_KEY_LEFT, optjoy_data->input_dev->keybit); set_bit(SEC_KEY_RIGHT, optjoy_data->input_dev->keybit); set_bit(SEC_KEY_UP, optjoy_data->input_dev->keybit); set_bit(SEC_KEY_DOWN, optjoy_data->input_dev->keybit); optjoy_data->input_dev->keycode = optjoy_keycode; ret = input_register_device(optjoy_data->input_dev); if (ret) { printk(KERN_ERR "[optjoy_spi_probe] Unable to register %s input device\n", optjoy_data->input_dev->name); goto err_input_register_device_failed; } #if 0 //TEMP /* IRQ setting */ ret = request_irq(IRQ_OJT_INT, optjoy_spi_irq_handler, 0, "optjoy_device", optjoy_data); if (!ret) { optjoy_data->use_irq = 1; gprintk("Start INTERRUPT mode!\n"); } else { gprintk(KERN_ERR "[optjoy_spi_probe] unable to request_irq\n"); optjoy_data->use_irq = 0; } #else optjoy_data->use_irq = 0; #endif /* timer init & start (if not INTR mode...) */ if (!optjoy_data->use_irq) { hrtimer_init(&optjoy_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); optjoy_data->timer.function = optjoy_spi_timer_func; hrtimer_start(&optjoy_data->timer, ktime_set(1, 0), HRTIMER_MODE_REL); } hrtimer_init(&optjoy_data->timer_touchlock, CLOCK_MONOTONIC, HRTIMER_MODE_REL); optjoy_data->timer_touchlock.function = optjoy_spi_timer_func_touchlock; #ifdef CONFIG_HAS_EARLYSUSPEND optjoy_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 2; optjoy_data->early_suspend.suspend = optjoy_spi_early_suspend; optjoy_data->early_suspend.resume = optjoy_spi_late_resume; register_early_suspend(&optjoy_data->early_suspend); #endif /* CONFIG_HAS_EARLYSUSPEND */ return 0; err_input_register_device_failed: input_free_device(optjoy_data->input_dev); err_input_dev_alloc_failed: kfree(optjoy_data); err_create_workqueue_failed: err_alloc_data_failed: return ret; }
static int __init msm_serial_probe(struct platform_device *pdev) { struct msm_port *msm_port; struct resource *resource; struct uart_port *port; #if defined(CONFIG_KERNEL_MOTOROLA) struct vreg *vreg; #endif /* defined(CONFIG_KERNEL_MOTOROLA) */ if (unlikely(pdev->id < 0 || pdev->id >= UART_NR)) return -ENXIO; printk(KERN_INFO "msm_serial: detected port #%d\n", pdev->id); #ifdef CONFIG_MACH_CALGARY /* Calgary uses VREG_USIM (RUIM1) for the UART3 block */ if (pdev->id == 2) { vreg = vreg_get(0, "ruim"); if (IS_ERR(vreg)) printk(KERN_ERR "%s: vreg get failed for VREG_RUIM\n", __func__); else if (vreg_set_level(vreg, 2200)) printk(KERN_ERR "%s: vreg set level failed for VREG_RUIM\n", __func__); else if (vreg_enable(vreg)) printk(KERN_ERR "%s: vreg enable failed for VREG_RUIM\n", __func__); else printk(KERN_INFO "%s: VREG_RUIM enabled for RS232\n", __func__); } #endif #if defined(CONFIG_KERNEL_MOTOROLA) /* Calgary uses VREG_USIM (RUIM1) for the UART3 block */ if (pdev->id == 2) { vreg = vreg_get(0, "ruim"); if (IS_ERR(vreg)) printk(KERN_ERR "%s: vreg get failed for VREG_RUIM\n", __func__); else if (vreg_set_level(vreg, 2200)) printk(KERN_ERR "%s: vreg set level failed for VREG_RUIM\n", __func__); else if (vreg_enable(vreg)) printk(KERN_ERR "%s: vreg enable failed for VREG_RUIM\n", __func__); else printk(KERN_INFO "%s: VREG_RUIM enabled for RS232\n", __func__); } #endif /* defined(CONFIG_KERNEL_MOTOROLA) */ port = get_port_from_line(pdev->id); port->dev = &pdev->dev; msm_port = UART_TO_MSM(port); msm_port->clk = clk_get(&pdev->dev, "uart_clk"); if (unlikely(IS_ERR(msm_port->clk))) return PTR_ERR(msm_port->clk); port->uartclk = clk_get_rate(msm_port->clk); resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(!resource)) return -ENXIO; port->mapbase = resource->start; port->irq = platform_get_irq(pdev, 0); if (unlikely(port->irq < 0)) return -ENXIO; platform_set_drvdata(pdev, port); if (unlikely(set_irq_wake(port->irq, 1))) return -ENXIO; #ifdef CONFIG_SERIAL_MSM_RX_WAKEUP if (port->line == 0) /* BT is serial device 0 */ if (unlikely(set_irq_wake(MSM_GPIO_TO_INT(45), 1))) return -ENXIO; #endif #ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL msm_port->clk_state = MSM_CLK_PORT_OFF; hrtimer_init(&msm_port->clk_off_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); msm_port->clk_off_timer.function = msm_serial_clock_off; msm_port->clk_off_delay = ktime_set(0, 1000000); /* 1 ms */ #endif return uart_add_one_port(&msm_uart_driver, port); }
/* * This function gets called when a POSIX.1b interval timer expires. It * is used as a callback from the kernel internal timer. The * run_timer_list code ALWAYS calls with interrupts on. * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. */ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) { struct k_itimer *timr; unsigned long flags; int si_private = 0; enum hrtimer_restart ret = HRTIMER_NORESTART; timr = container_of(timer, struct k_itimer, it.real.timer); spin_lock_irqsave(&timr->it_lock, flags); if (timr->it.real.interval.tv64 != 0) si_private = ++timr->it_requeue_pending; if (posix_timer_event(timr, si_private)) { /* * signal was not sent because of sig_ignor * we will not get a call back to restart it AND * it should be restarted. */ if (timr->it.real.interval.tv64 != 0) { ktime_t now = hrtimer_cb_get_time(timer); /* * FIXME: What we really want, is to stop this * timer completely and restart it in case the * SIG_IGN is removed. This is a non trivial * change which involves sighand locking * (sigh !), which we don't want to do late in * the release cycle. * * For now we just let timers with an interval * less than a jiffie expire every jiffie to * avoid softirq starvation in case of SIG_IGN * and a very small interval, which would put * the timer right back on the softirq pending * list. By moving now ahead of time we trick * hrtimer_forward() to expire the timer * later, while we still maintain the overrun * accuracy, but have some inconsistency in * the timer_gettime() case. This is at least * better than a starved softirq. A more * complex fix which solves also another related * inconsistency is already in the pipeline. */ #ifdef CONFIG_HIGH_RES_TIMERS { ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); if (timr->it.real.interval.tv64 < kj.tv64) now = ktime_add(now, kj); } #endif timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, timr->it.real.interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; } } unlock_timer(timr, flags); return ret; }
/* * row_get_ioprio_class_to_serve() - Return the next I/O priority * class to dispatch requests from * @rd: pointer to struct row_data * @force: flag indicating if forced dispatch * * This function returns the next I/O priority class to serve * {IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE}. * If there are no more requests in scheduler or if we're idling on some queue * IOPRIO_CLASS_NONE will be returned. * If idling is scheduled on a lower priority queue than the one that needs * to be served, it will be canceled. * */ static int row_get_ioprio_class_to_serve(struct row_data *rd, int force) { int i; int ret = IOPRIO_CLASS_NONE; if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) { row_log(rd->dispatch_queue, "No more requests in scheduler"); goto check_idling; } /* First, go over the high priority queues */ for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) { if (!list_empty(&rd->row_queues[i].fifo)) { if (hrtimer_active(&rd->rd_idle_data.hr_timer)) { if (hrtimer_try_to_cancel( &rd->rd_idle_data.hr_timer) >= 0) { row_log(rd->dispatch_queue, "Canceling delayed work on %d. RT pending", rd->rd_idle_data.idling_queue_idx); rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO; } } if (row_regular_req_pending(rd) && (rd->reg_prio_starvation.starvation_counter >= rd->reg_prio_starvation.starvation_limit)) ret = IOPRIO_CLASS_BE; else if (row_low_req_pending(rd) && (rd->low_prio_starvation.starvation_counter >= rd->low_prio_starvation.starvation_limit)) ret = IOPRIO_CLASS_IDLE; else ret = IOPRIO_CLASS_RT; goto done; } } /* * At the moment idling is implemented only for READ queues. * If enabled on WRITE, this needs updating */ if (hrtimer_active(&rd->rd_idle_data.hr_timer)) { row_log(rd->dispatch_queue, "Delayed work pending. Exiting"); goto done; } check_idling: /* Check for (high priority) idling and enable if needed */ for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) { if (rd->row_queues[i].idle_data.begin_idling && row_queues_def[i].idling_enabled) goto initiate_idling; } /* Regular priority queues */ for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) { if (list_empty(&rd->row_queues[i].fifo)) { /* We can idle only if this is not a forced dispatch */ if (rd->row_queues[i].idle_data.begin_idling && !force && row_queues_def[i].idling_enabled) goto initiate_idling; } else { if (row_low_req_pending(rd) && (rd->low_prio_starvation.starvation_counter >= rd->low_prio_starvation.starvation_limit)) ret = IOPRIO_CLASS_IDLE; else ret = IOPRIO_CLASS_BE; goto done; } } if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE]) ret = IOPRIO_CLASS_IDLE; goto done; initiate_idling: hrtimer_start(&rd->rd_idle_data.hr_timer, ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC), HRTIMER_MODE_REL); rd->rd_idle_data.idling_queue_idx = i; row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i); done: return ret; }
int acer_hs_butt_init(void) { int ret = 0; hr = kzalloc(sizeof(struct hs_butt_data), GFP_KERNEL); if (!hr) return -ENOMEM; hr->btn_debounce_time = ktime_set(0, DEBOUNCE_TIME); /* init work queue*/ hr->sdev.name = "acer_hs_butt"; hr->det = GPIO_HS_DET; hr->butt = GPIO_HS_BUTT; hr->irq = MSM_GPIO_TO_INT(hr->butt); ret = switch_dev_register(&hr->sdev); if (ret < 0) { pr_err("switch_dev fail!\n"); goto err_switch_dev_register; } else { pr_debug("### hs_butt_switch_dev success register ###\n"); } if (gpio_is_valid(hr->butt)) { ret = gpio_request(hr->butt, "hs_butt_detect"); if (ret) { pr_err("%s: unable to request reset gpio %d\n", __func__, hr->butt); goto err_request_butt_irq; } ret = gpio_direction_input(hr->butt); if (ret) { pr_err("%s: unable to set direction for gpio %d\n", __func__, hr->butt); goto err_request_butt_gpio; } } INIT_WORK(&work, rpc_call_work); hrtimer_init(&hr->btn_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hr->btn_timer.function = button_event_timer_func; ret = request_irq(hr->irq, hs_butt_interrupt, IRQF_TRIGGER_FALLING, "hs_butt", NULL); if (ret < 0) { pr_err("err_request_butt_irq fail!\n"); goto err_request_butt_irq; } else { pr_debug("[HS-BUTT] IRQ_%d already request_butt_irq in use\n", hr->irq); } pr_debug("[HS-BUTT] Probe done\n"); return 0; err_request_butt_irq: free_irq(hr->irq, 0); err_request_butt_gpio: gpio_free(hr->butt); err_switch_dev_register: pr_err("[HS-BUTT] Probe error\n"); return ret; }