/** * hdaps_set_power - enable or disable power to the accelerometer. * Returns zero on success and negative error code on failure. Can sleep. */ static int hdaps_set_power(int on) { struct thinkpad_ec_row args = { .mask = 0x0003, .val = {0x14, on?0x01:0x00} }; struct thinkpad_ec_row data = { .mask = 0x8000 }; int ret = thinkpad_ec_read_row(&args, &data); if (ret) return ret; if (data.val[0xF] != 0x00) return -EIO; return 0; } /** * hdaps_set_ec_config - set accelerometer parameters. * @ec_rate: embedded controller sampling rate * @order: embedded controller running average filter order * (Normally we have @ec_rate = sampling_rate * oversampling_ratio.) * Returns zero on success and negative error code on failure. Can sleep. */ static int hdaps_set_ec_config(int ec_rate, int order) { struct thinkpad_ec_row args = { .mask = 0x000F, .val = {0x10, (u8)ec_rate, (u8)(ec_rate>>8), order} }; struct thinkpad_ec_row data = { .mask = 0x8000 }; int ret = thinkpad_ec_read_row(&args, &data); printk(KERN_DEBUG "hdaps: setting ec_rate=%d, filter_order=%d\n", ec_rate, order); if (ret) return ret; if (data.val[0xF] == 0x03) { printk(KERN_WARNING "hdaps: config param out of range\n"); return -EINVAL; } if (data.val[0xF] == 0x06) { printk(KERN_WARNING "hdaps: config change already pending\n"); return -EBUSY; } if (data.val[0xF] != 0x00) { printk(KERN_WARNING "hdaps: config change error, ret=%d\n", data.val[0xF]); return -EIO; } return 0; } /** * hdaps_get_ec_config - get accelerometer parameters. * @ec_rate: embedded controller sampling rate * @order: embedded controller running average filter order * Returns zero on success and negative error code on failure. Can sleep. */ static int hdaps_get_ec_config(int *ec_rate, int *order) { const struct thinkpad_ec_row args = { .mask = 0x0003, .val = {0x17, 0x82} }; struct thinkpad_ec_row data = { .mask = 0x801F }; int ret = thinkpad_ec_read_row(&args, &data); if (ret) return ret; if (data.val[0xF] != 0x00) return -EIO; if (!(data.val[0x1] & 0x01)) return -ENXIO; /* accelerometer polling not enabled */ if (data.val[0x1] & 0x02) return -EBUSY; /* config change in progress, retry later */ *ec_rate = data.val[0x2] | ((int)(data.val[0x3]) << 8); *order = data.val[0x4]; return 0; } /** * hdaps_get_ec_mode - get EC accelerometer mode * Returns zero on success and negative error code on failure. Can sleep. */ static int hdaps_get_ec_mode(u8 *mode) { const struct thinkpad_ec_row args = { .mask = 0x0001, .val = {0x13} }; struct thinkpad_ec_row data = { .mask = 0x8002 }; int ret = thinkpad_ec_read_row(&args, &data); if (ret) return ret; if (data.val[0xF] != 0x00) { printk(KERN_WARNING "accelerometer not implemented (0x%02x)\n", data.val[0xF]); return -EIO; } *mode = data.val[0x1]; return 0; } /** * hdaps_check_ec - checks something about the EC. * Follows the clean-room spec for HDAPS; we don't know what it means. * Returns zero on success and negative error code on failure. Can sleep. */ static int hdaps_check_ec(void) { const struct thinkpad_ec_row args = { .mask = 0x0003, .val = {0x17, 0x81} }; struct thinkpad_ec_row data = { .mask = 0x800E }; int ret = thinkpad_ec_read_row(&args, &data); if (ret) return ret; if (!((data.val[0x1] == 0x00 && data.val[0x2] == 0x60) || /* cleanroom spec */ (data.val[0x1] == 0x01 && data.val[0x2] == 0x00)) || /* seen on T61 */ data.val[0x3] != 0x00 || data.val[0xF] != 0x00) { printk(KERN_WARNING "hdaps_check_ec: bad response (0x%x,0x%x,0x%x,0x%x)\n", data.val[0x1], data.val[0x2], data.val[0x3], data.val[0xF]); return -EIO; } return 0; } /** * hdaps_device_init - initialize the accelerometer. * * Call several embedded controller functions to test and initialize the * accelerometer. * Returns zero on success and negative error code on failure. Can sleep. */ #define FAILED_INIT(msg) printk(KERN_ERR "hdaps init failed at: %s\n", msg) static int hdaps_device_init(void) { int ret; u8 mode; ret = thinkpad_ec_lock(); if (ret) return ret; if (hdaps_get_ec_mode(&mode)) { FAILED_INIT("hdaps_get_ec_mode failed"); goto bad; } printk(KERN_DEBUG "hdaps: initial mode latch is 0x%02x\n", mode); if (mode == 0x00) { FAILED_INIT("accelerometer not available"); goto bad; } if (hdaps_check_ec()) { FAILED_INIT("hdaps_check_ec failed"); goto bad; } if (hdaps_set_power(1)) { FAILED_INIT("hdaps_set_power failed"); goto bad; } if (hdaps_set_ec_config(sampling_rate*oversampling_ratio, running_avg_filter_order)) { FAILED_INIT("hdaps_set_ec_config failed"); goto bad; } thinkpad_ec_invalidate(); udelay(200); /* Just prefetch instead of reading, to avoid ~1sec delay on load */ ret = thinkpad_ec_prefetch_row(&ec_accel_args); if (ret) { FAILED_INIT("initial prefetch failed"); goto bad; } goto good; bad: thinkpad_ec_invalidate(); ret = -ENXIO; good: stale_readout = 1; thinkpad_ec_unlock(); return ret; } /** * hdaps_device_shutdown - power off the accelerometer * Returns nonzero on failure. Can sleep. */ static int hdaps_device_shutdown(void) { int ret; ret = hdaps_set_power(0); if (ret) { printk(KERN_WARNING "hdaps: cannot power off\n"); return ret; } ret = hdaps_set_ec_config(0, 1); if (ret) printk(KERN_WARNING "hdaps: cannot stop EC sampling\n"); return ret; } /* Device model stuff */ static int hdaps_probe(struct platform_device *dev) { int ret; ret = hdaps_device_init(); if (ret) return ret; printk(KERN_INFO "hdaps: device successfully initialized.\n"); return 0; } static int hdaps_suspend(struct platform_device *dev, pm_message_t state) { /* Don't do hdaps polls until resume re-initializes the sensor. */ del_timer_sync(&hdaps_timer); hdaps_device_shutdown(); /* ignore errors, effect is negligible */ return 0; } static int hdaps_resume(struct platform_device *dev) { int ret = hdaps_device_init(); if (ret) return ret; mutex_lock(&hdaps_users_mtx); if (hdaps_users) mod_timer(&hdaps_timer, jiffies + HZ/sampling_rate); mutex_unlock(&hdaps_users_mtx); return 0; } static struct platform_driver hdaps_driver = { .probe = hdaps_probe, .suspend = hdaps_suspend, .resume = hdaps_resume, .driver = { .name = "hdaps", .owner = THIS_MODULE, }, }; /** * hdaps_calibrate - set our "resting" values. * Does its own locking. */ static void hdaps_calibrate(void) { needs_calibration = 1; hdaps_update(); /* If that fails, the mousedev poll will take care of things later. */ } /* Timer handler for updating the input device. Runs in softirq context, * so avoid lenghty or blocking operations. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) static void hdaps_mousedev_poll(unsigned long unused) #else static void hdaps_mousedev_poll(struct timer_list *unused) #endif { int ret; stale_readout = 1; /* Cannot sleep. Try nonblockingly. If we fail, try again later. */ if (thinkpad_ec_try_lock()) goto keep_active; ret = __hdaps_update(1); /* fast update, we're in softirq context */ thinkpad_ec_unlock(); /* Any of "successful", "not yet ready" and "not prefetched"? */ if (ret != 0 && ret != -EBUSY && ret != -ENODATA) { printk(KERN_ERR "hdaps: poll failed, disabling updates\n"); return; } keep_active: /* Even if we failed now, pos_x,y may have been updated earlier: */ input_report_abs(hdaps_idev, ABS_X, pos_x - rest_x); input_report_abs(hdaps_idev, ABS_Y, pos_y - rest_y); input_sync(hdaps_idev); input_report_abs(hdaps_idev_raw, ABS_X, pos_x); input_report_abs(hdaps_idev_raw, ABS_Y, pos_y); input_sync(hdaps_idev_raw); mod_timer(&hdaps_timer, jiffies + HZ/sampling_rate); } /* Sysfs Files */ static ssize_t hdaps_position_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret = hdaps_update(); if (ret) return ret; return sprintf(buf, "(%d,%d)\n", pos_x, pos_y); }
static void rx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; skb_put (skb, urb->actual_length); entry->state = rx_done; entry->urb = NULL; switch (urb_status) { /* success */ case 0: if (skb->len < dev->net->hard_header_len) { entry->state = rx_cleanup; dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; if (netif_msg_rx_err (dev)) devdbg (dev, "rx length %d", skb->len); } break; /* stalls need manual reset. this is rare ... except that * when going through USB 2.0 TTs, unplug appears this way. * we avoid the highspeed version of the ETIMEDOUT/EILSEQ * storm, recovering as needed. */ case -EPIPE: dev->net->stats.rx_errors++; usbnet_defer_kevent (dev, EVENT_RX_HALT); // FALLTHROUGH /* software-driven interface shutdown */ case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware gone */ if (netif_msg_ifdown (dev)) devdbg (dev, "rx shutdown, code %d", urb_status); goto block; /* we get controller i/o faults during khubd disconnect() delays. * throttle down resubmits, to avoid log floods; just temporarily, * so we still recover when the fault isn't a khubd delay. */ case -EPROTO: case -ETIME: case -EILSEQ: dev->net->stats.rx_errors++; if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); if (netif_msg_link (dev)) devdbg (dev, "rx throttle %d", urb_status); } block: entry->state = rx_cleanup; entry->urb = urb; urb = NULL; break; /* data overrun ... flush fifo? */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; // FALLTHROUGH default: entry->state = rx_cleanup; dev->net->stats.rx_errors++; if (netif_msg_rx_err (dev)) devdbg (dev, "rx status %d", urb_status); break; } defer_bh(dev, skb, &dev->rxq); if (urb) { if (netif_running (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags)) { rx_submit (dev, urb, GFP_ATOMIC); return; } usb_free_urb (urb); } if (netif_msg_rx_err (dev)) devdbg (dev, "no read resubmitted"); }
void add_timer(struct timer_list *t) { mod_timer(t, t->expires); }
static void po188_work_func(struct work_struct *work) { /*static int voltage[TIMES];//remember the last 3 value static int iVolIndex=0; static int iSumCount=0; int i = 0; int iSum = 0; int iAveVol = 0;*/ int iVoltage_new = 0; int iVoltage_last = 0; int ret = 0; ret = po188_read_adc(PO188_ADC_CHANNEL); if (ret < 0) //read value failed. invalid value, don't report. { PO188_ERRMSG("po188 have not read adc value"); msleep(5000); //sleep 5s //delay 1s to read. mod_timer(&po188_driver.timer, jiffies + msecs_to_jiffies(po188_driver.delay_time)); return; } //iVoltage_new = po188_get_converted_level(ret); iVoltage_new = ret; /* voltage[iVolIndex]=po188_driver.voltage_now; iVolIndex = (iVolIndex+1)%TIMES; if (iSumCount<TIMES) { iSumCount++; } for(i=0; i<iSumCount; i++)// count the current average value { iSum += voltage[i]; } //remember the last 3 average voltage value iAveVol = iSum / iSumCount; */ spin_lock(&po188_driver.s_lock); po188_driver.voltage_now = iVoltage_new; iVoltage_last = po188_driver.last_voltage; spin_unlock(&po188_driver.s_lock); //check if it's reach the threshold data and report to hal level /* if ( (po188_driver.last_voltage - iAveVol >= PO188_REPORT_THRESHOLD) || (iAveVol - po188_driver.last_voltage >= PO188_REPORT_THRESHOLD) ) { po188_report_data(); } */ // if ( (iVoltage_last - iVoltage_new >= PO188_REPORT_THRESHOLD) || // (iVoltage_new - iVoltage_last >= PO188_REPORT_THRESHOLD) ) { po188_report_data(iVoltage_new); spin_lock(&po188_driver.s_lock); po188_driver.last_voltage = iVoltage_new; spin_unlock(&po188_driver.s_lock); } mod_timer(&po188_driver.timer, jiffies + msecs_to_jiffies(po188_driver.delay_time)); }
/* Thermal check routine */ static inline void dovetemp_checktemp(unsigned long data) { int temp = dovetemp_read_temp(); u32 mc, mc2, reg; unsigned long ints; if (temp >= LIMIT_SHUTDOWN) { /* Shutdown as much as possible */ printk(KERN_CRIT "Reached maximum thermal allowed. Shutting down\n"); local_irq_save(ints); /* Disable external USB current limiter */ gpio_direction_output(1, 0); local_cpu_freq_scale(CPU_CLOCK_SLOW); /* eSata - 40mA */ writel(0x009B1215, DOVE_SB_REGS_VIRT_BASE + 0x0a2050); /* PCI-E clocks - 10mA */ writel(0x0003007f, DOVE_SB_REGS_VIRT_BASE + 0x0d0058); /* SMI power down phy - ~20mA on fast ethernet */ writel(0x00010800, DOVE_SB_REGS_VIRT_BASE + 0x072004); msleep(100); /* gigE MAC - digital part */ writel(0x00000002, DOVE_SB_REGS_VIRT_BASE + 0x0720b0); /* PCI-E clocks and gigE I/Os = 10mA */ writel(0x0003007f, DOVE_SB_REGS_VIRT_BASE + 0x0d0058); /* disable GPU and Video engine isolators */ reg = readl(PMU_ISO_CTRL_REG); reg &= ~(PMU_ISO_GPU_MASK | PMU_ISO_VIDEO_MASK); writel(reg, PMU_ISO_CTRL_REG); /* reset GPU and video engine units */ reg = readl(PMU_SW_RST_CTRL_REG); reg &= ~(PMU_SW_RST_GPU_MASK | PMU_SW_RST_VIDEO_MASK); writel(reg, PMU_SW_RST_CTRL_REG); /* power off GPU and video engine*/ reg = readl(PMU_PWR_SUPLY_CTRL_REG); reg |= (PMU_PWR_GPU_PWR_DWN_MASK | PMU_PWR_VPU_PWR_DWN_OFFS); writel(reg, PMU_PWR_SUPLY_CTRL_REG); /* Set PWM to no-select - Will reduce LED power consumption */ reg = readl(DOVE_SB_REGS_VIRT_BASE + 0x0d0208); reg &= ~0xf00; reg |= 0x100; writel(reg, DOVE_SB_REGS_VIRT_BASE + 0x0d0208); /* Set GPIO 18 to 1 */ reg = readl(DOVE_SB_REGS_VIRT_BASE + 0x0d0400); reg |= (1 << 18); writel(reg, DOVE_SB_REGS_VIRT_BASE + 0x0d0400); /* USB 0 and 1 phy shutdown */ writel(0xff000160, DOVE_SB_REGS_VIRT_BASE + 0x050400); writel(0xff000160, DOVE_SB_REGS_VIRT_BASE + 0x051400); /* Clock gating of unused south bridge units */ writel(0xff3800c4, DOVE_SB_REGS_VIRT_BASE + 0x0d0038); /* Disable all interrupts besides uart0 */ mc = readl(IRQ_MASK_LOW); mc2 = readl(IRQ_MASK_HIGH); writel(IRQ_MASK_LOW, 0x80); writel(IRQ_MASK_HIGH, 0x0); printk(KERN_CRIT "Just before entering deep sleep\n"); /* The following will put DDR in self-refresh mode */ #if 0 /* Currently unsupported */ /* Put DDR in self-refresh */ #endif __asm__ __volatile__("wfi\n"); /* Should never reach here */ panic("Should never reach here function\n"); local_irq_restore(ints); } if (temp >= LIMIT1_UP && !warning_flag) { warning_flag = 1; printk(KERN_WARNING "Die temperature exceeded limit (%d mili C)\n", temp); /* Limit CPU speed to DDR speed */ local_cpu_freq_scale(CPU_CLOCK_SLOW); } if (temp <= LIMIT1_DOWN && warning_flag) { warning_flag = 0; printk(KERN_INFO "Die temperature went below limit (%d mili C)\n", temp); /* Remove CPU speed limit */ local_cpu_freq_scale(CPU_CLOCK_TURBO); } if (warning_flag) { /* * Make sure external frequency scaling mechanism, if being * used, didn't change CPU freq. */ local_cpu_freq_scale(CPU_CLOCK_SLOW); } mod_timer(&thermal_check, jiffies + (HZ)); }
static int __init corgikbd_probe(struct platform_device *pdev) { struct corgikbd *corgikbd; struct input_dev *input_dev; int i, err = -ENOMEM; corgikbd = kzalloc(sizeof(struct corgikbd), GFP_KERNEL); input_dev = input_allocate_device(); if (!corgikbd || !input_dev) goto fail; platform_set_drvdata(pdev, corgikbd); corgikbd->input = input_dev; spin_lock_init(&corgikbd->lock); /* Init Keyboard rescan timer */ init_timer(&corgikbd->timer); corgikbd->timer.function = corgikbd_timer_callback; corgikbd->timer.data = (unsigned long) corgikbd; /* Init Hinge Timer */ init_timer(&corgikbd->htimer); corgikbd->htimer.function = corgikbd_hinge_timer; corgikbd->htimer.data = (unsigned long) corgikbd; corgikbd->suspend_jiffies=jiffies; memcpy(corgikbd->keycode, corgikbd_keycode, sizeof(corgikbd->keycode)); input_dev->name = "Corgi Keyboard"; input_dev->phys = "corgikbd/input0"; input_dev->id.bustype = BUS_HOST; input_dev->id.vendor = 0x0001; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &pdev->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_PWR) | BIT_MASK(EV_SW); input_dev->keycode = corgikbd->keycode; input_dev->keycodesize = sizeof(unsigned char); input_dev->keycodemax = ARRAY_SIZE(corgikbd_keycode); for (i = 0; i < ARRAY_SIZE(corgikbd_keycode); i++) set_bit(corgikbd->keycode[i], input_dev->keybit); clear_bit(0, input_dev->keybit); set_bit(SW_LID, input_dev->swbit); set_bit(SW_TABLET_MODE, input_dev->swbit); set_bit(SW_HEADPHONE_INSERT, input_dev->swbit); err = input_register_device(corgikbd->input); if (err) goto fail; mod_timer(&corgikbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL)); /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */ for (i = 0; i < CORGI_KEY_SENSE_NUM; i++) { pxa_gpio_mode(CORGI_GPIO_KEY_SENSE(i) | GPIO_IN); if (request_irq(CORGI_IRQ_GPIO_KEY_SENSE(i), corgikbd_interrupt, IRQF_DISABLED | IRQF_TRIGGER_RISING, "corgikbd", corgikbd)) printk(KERN_WARNING "corgikbd: Can't get IRQ: %d!\n", i); } /* Set Strobe lines as outputs - set high */ for (i = 0; i < CORGI_KEY_STROBE_NUM; i++) pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH); /* Setup the headphone jack as an input */ pxa_gpio_mode(CORGI_GPIO_AK_INT | GPIO_IN); return 0; fail: input_free_device(input_dev); kfree(corgikbd); return err; }
static int __init s3c_keypad_probe(struct platform_device *pdev) { struct resource *res, *keypad_mem, *keypad_irq; struct input_dev *input_dev; struct s3c_keypad *s3c_keypad; int ret, size; int key, code; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev,"no memory resource specified\n"); return -ENOENT; } size = (res->end - res->start) + 1; keypad_mem = request_mem_region(res->start, size, pdev->name); if (keypad_mem == NULL) { dev_err(&pdev->dev, "failed to get memory region\n"); ret = -ENOENT; goto err_req; } key_base = ioremap(res->start, size); if (key_base == NULL) { printk(KERN_ERR "Failed to remap register block\n"); ret = -ENOMEM; goto err_map; } keypad_clock = clk_get(&pdev->dev, "keypad"); if (IS_ERR(keypad_clock)) { dev_err(&pdev->dev, "failed to find keypad clock source\n"); ret = PTR_ERR(keypad_clock); goto err_clk; } clk_enable(keypad_clock); s3c_keypad = kzalloc(sizeof(struct s3c_keypad), GFP_KERNEL); input_dev = input_allocate_device(); if (!s3c_keypad || !input_dev) { ret = -ENOMEM; goto err_alloc; } platform_set_drvdata(pdev, s3c_keypad); s3c_keypad->dev = input_dev; writel(KEYIFCON_INIT, key_base+S3C_KEYIFCON); writel(KEYIFFC_DIV, key_base+S3C_KEYIFFC); /* Set GPIO Port for keypad mode and pull-up disable*/ s3c_setup_keypad_cfg_gpio(KEYPAD_ROWS, KEYPAD_COLUMNS); writel(KEYIFCOL_CLEAR, key_base+S3C_KEYIFCOL); /* create and register the input driver */ set_bit(EV_KEY, input_dev->evbit); /*Commenting the generation of repeat events*/ //set_bit(EV_REP, input_dev->evbit); s3c_keypad->nr_rows = KEYPAD_ROWS; s3c_keypad->no_cols = KEYPAD_COLUMNS; s3c_keypad->total_keys = MAX_KEYPAD_NR; for(key = 0; key < s3c_keypad->total_keys; key++){ code = s3c_keypad->keycodes[key] = keypad_keycode[key]; if(code<=0) continue; set_bit(code & KEY_MAX, input_dev->keybit); } #if defined(CONFIG_ARIES_VER_B3) set_bit(26 & KEY_MAX, input_dev->keybit); #endif input_dev->name = DEVICE_NAME; input_dev->phys = "s3c-keypad/input0"; input_dev->id.bustype = BUS_HOST; input_dev->id.vendor = 0x0001; input_dev->id.product = 0x0001; input_dev->id.version = 0x0001; input_dev->keycode = keypad_keycode; ret = input_register_device(input_dev); if (ret) { printk("Unable to register s3c-keypad input device!!!\n"); goto out; } /* Scan timer init */ init_timer(&keypad_timer); keypad_timer.function = keypad_timer_handler; keypad_timer.data = (unsigned long)s3c_keypad; /* For IRQ_KEYPAD */ keypad_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (keypad_irq == NULL) { dev_err(&pdev->dev, "no irq resource specified\n"); ret = -ENOENT; goto err_irq; } if(HWREV < 0x2) //yhkim block it for Verizon ATLAS. { ret = request_irq(keypad_irq->start, s3c_keypad_isr, IRQF_DISABLED , DEVICE_NAME, (void *) pdev); if (ret) { printk("request_irq failed (IRQ_KEYPAD) !!!\n"); ret = -EIO; goto err_irq; } keypad_timer.expires = jiffies + (HZ/10); if (is_timer_on == FALSE) { add_timer(&keypad_timer); is_timer_on = TRUE; } else { mod_timer(&keypad_timer,keypad_timer.expires); } } s3c_keygpio_isr_setup((void *)s3c_keypad); printk( DEVICE_NAME " Initialized\n"); if (device_create_file(&pdev->dev, &dev_attr_key_pressed) < 0) { printk("%s s3c_keypad_probe\n",__FUNCTION__); pr_err("Failed to create device file(%s)!\n", dev_attr_key_pressed.attr.name); } return 0; out: free_irq(keypad_irq->start, input_dev); free_irq(keypad_irq->end, input_dev); err_irq: input_free_device(input_dev); kfree(s3c_keypad); err_alloc: clk_disable(keypad_clock); clk_put(keypad_clock); err_clk: iounmap(key_base); err_map: release_resource(keypad_mem); kfree(keypad_mem); err_req: return ret; }
/* * Assumption: * AC power can't be switched to USB w/o system reboot * and vice-versa */ static void state_machine_work(struct work_struct *work) { struct stmp3xxx_info *info = container_of(work, struct stmp3xxx_info, sm_work); mutex_lock(&info->sm_lock); handle_battery_voltage_changes(info); check_and_handle_5v_connection(info); if ((info->sm_5v_connection_status != _5v_connected_verified) || !(info->regulator)) { mod_timer(&info->sm_timer, jiffies + msecs_to_jiffies(100)); goto out; } /* if we made it here, we have a verified 5v connection */ if (is_ac_online()) { if (info->is_ac_online) goto done; /* ac supply connected */ dev_info(info->dev, "changed power connection to ac/5v.\n)"); dev_info(info->dev, "5v current limit set to %u.\n", NON_USB_5V_SUPPLY_CURRENT_LIMIT_MA); info->is_ac_online = 1; info->is_usb_online = 0; ddi_power_set_4p2_ilimit( NON_USB_5V_SUPPLY_CURRENT_LIMIT_MA); ddi_bc_SetCurrentLimit( NON_USB_5V_SUPPLY_CURRENT_LIMIT_MA /*mA*/); if (regulator_set_current_limit(info->regulator, 0, NON_USB_5V_SUPPLY_CURRENT_LIMIT_MA*1000)) { dev_err(info->dev, "reg_set_current(%duA) failed\n", NON_USB_5V_SUPPLY_CURRENT_LIMIT_MA*1000); } ddi_bc_SetEnable(); goto done; } if (!is_usb_online()) goto out; if (info->is_usb_online & USB_REG_SET) goto done; info->is_ac_online = 0; info->is_usb_online |= USB_ONLINE; if (!(info->is_usb_online & USB_N_SEND)) { info->is_usb_online |= USB_N_SEND; } dev_dbg(info->dev, "%s: charge current set to %dmA\n", __func__, POWERED_USB_5V_CURRENT_LIMIT_MA); if (regulator_set_current_limit(info->regulator, 0, POWERED_USB_5V_CURRENT_LIMIT_MA*1000)) { dev_err(info->dev, "reg_set_current(%duA) failed\n", POWERED_USB_5V_CURRENT_LIMIT_MA*1000); } else { ddi_bc_SetCurrentLimit(POWERED_USB_5V_CURRENT_LIMIT_MA/*mA*/); ddi_bc_SetEnable(); } if (info->is_usb_online & USB_SM_RESTART) { info->is_usb_online &= ~USB_SM_RESTART; ddi_bc_SetEnable(); } info->is_usb_online |= USB_REG_SET; dev_info(info->dev, "changed power connection to usb/5v present\n"); done: ddi_bc_StateMachine(); out: mutex_unlock(&info->sm_lock); }
static void mpc8xxx_wdt_timer_ping(unsigned long arg) { mpc8xxx_wdt_keepalive(); /* We're pinging it twice faster than needed, just to be sure. */ mod_timer(&wdt_timer, jiffies + HZ * timeout_sec / 2); }
static int __devinit tsc2005_setup(struct tsc2005 *ts, struct tsc2005_platform_data *pdata) { int r; int x_max; int y_max; mutex_init(&ts->mutex); tsc2005_setup_spi_xfer(ts); init_timer(&ts->penup_timer); setup_timer(&ts->penup_timer, tsc2005_penup_timer, (unsigned long)ts); INIT_WORK(&ts->penup_work, tsc2005_penup_work); ts->fudge_x = pdata->ts_x_fudge ? : 4; ts->fudge_y = pdata->ts_y_fudge ? : 8; ts->fudge_p = pdata->ts_pressure_fudge ? : 2; x_max = pdata->ts_x_max ? : MAX_12BIT; y_max = pdata->ts_y_max ? : MAX_12BIT; ts->p_max = pdata->ts_pressure_max ? : MAX_12BIT; ts->ts_pressure = 1200; ts->x_plate_ohm = pdata->ts_x_plate_ohm; ts->esd_timeout = pdata->esd_timeout_ms; ts->set_reset = pdata->set_reset; ts->idev = input_allocate_device(); if (ts->idev == NULL) return -ENOMEM; ts->idev->name = "TSC2005 touchscreen"; snprintf(ts->phys, sizeof(ts->phys), "%s/input-ts", dev_name(&ts->spi->dev)); ts->idev->phys = ts->phys; ts->idev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY); ts->idev->absbit[0] = BIT(ABS_X) | BIT(ABS_Y) | BIT(ABS_PRESSURE); ts->idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(ts->idev, ABS_X, 0, x_max, ts->fudge_x, 0); input_set_abs_params(ts->idev, ABS_Y, 0, y_max, ts->fudge_y, 0); input_set_abs_params(ts->idev, ABS_PRESSURE, 0, ts->p_max, ts->fudge_p, 0); r = request_threaded_irq(ts->spi->irq, tsc2005_irq_handler, tsc2005_irq_thread, IRQF_TRIGGER_RISING, "tsc2005", ts); if (r) { dev_err(&ts->spi->dev, "request_threaded_irq(): %d\n", r); goto err1; } set_irq_wake(ts->spi->irq, 1); r = input_register_device(ts->idev); if (r) { dev_err(&ts->spi->dev, "input_register_device(): %d\n", r); goto err2; } r = sysfs_create_group(&ts->spi->dev.kobj, &tsc2005_attr_group); if (r) dev_warn(&ts->spi->dev, "sysfs entry creation failed: %d\n", r); tsc2005_start_scan(ts); if (!ts->esd_timeout || !ts->set_reset) goto done; /* start the optional ESD watchdog */ setup_timer(&ts->esd_timer, tsc2005_esd_timer, (unsigned long)ts); INIT_WORK(&ts->esd_work, tsc2005_esd_work); mod_timer(&ts->esd_timer, round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout))); done: return 0; err2: free_irq(ts->spi->irq, ts); err1: input_free_device(ts->idev); return r; }
void can_stat_update(struct timer_list *t) { struct net *net = from_timer(net, t, can.can_stattimer); struct s_stats *can_stats = net->can.can_stats; unsigned long j = jiffies; /* snapshot */ /* restart counting in timer context on user request */ if (user_reset) can_init_stats(net); /* restart counting on jiffies overflow */ if (j < can_stats->jiffies_init) can_init_stats(net); /* prevent overflow in calc_rate() */ if (can_stats->rx_frames > (ULONG_MAX / HZ)) can_init_stats(net); /* prevent overflow in calc_rate() */ if (can_stats->tx_frames > (ULONG_MAX / HZ)) can_init_stats(net); /* matches overflow - very improbable */ if (can_stats->matches > (ULONG_MAX / 100)) can_init_stats(net); /* calc total values */ if (can_stats->rx_frames) can_stats->total_rx_match_ratio = (can_stats->matches * 100) / can_stats->rx_frames; can_stats->total_tx_rate = calc_rate(can_stats->jiffies_init, j, can_stats->tx_frames); can_stats->total_rx_rate = calc_rate(can_stats->jiffies_init, j, can_stats->rx_frames); /* calc current values */ if (can_stats->rx_frames_delta) can_stats->current_rx_match_ratio = (can_stats->matches_delta * 100) / can_stats->rx_frames_delta; can_stats->current_tx_rate = calc_rate(0, HZ, can_stats->tx_frames_delta); can_stats->current_rx_rate = calc_rate(0, HZ, can_stats->rx_frames_delta); /* check / update maximum values */ if (can_stats->max_tx_rate < can_stats->current_tx_rate) can_stats->max_tx_rate = can_stats->current_tx_rate; if (can_stats->max_rx_rate < can_stats->current_rx_rate) can_stats->max_rx_rate = can_stats->current_rx_rate; if (can_stats->max_rx_match_ratio < can_stats->current_rx_match_ratio) can_stats->max_rx_match_ratio = can_stats->current_rx_match_ratio; /* clear values for 'current rate' calculation */ can_stats->tx_frames_delta = 0; can_stats->rx_frames_delta = 0; can_stats->matches_delta = 0; /* restart timer (one second) */ mod_timer(&net->can.can_stattimer, round_jiffies(jiffies + HZ)); }
static irqreturn_t tsc2005_irq_thread(int irq, void *_ts) { struct tsc2005 *ts = _ts; unsigned int pressure, pressure_limit, inside_rect; u32 x; u32 y; u32 z1; u32 z2; mutex_lock(&ts->mutex); if (unlikely(ts->disable_depth)) goto out; /* read the coordinates */ spi_sync(ts->spi, &ts->spi_read_msg); x = ts->spi_x.spi_rx; y = ts->spi_y.spi_rx; z1 = ts->spi_z1.spi_rx; z2 = ts->spi_z2.spi_rx; /* validate position */ if (unlikely(x > MAX_12BIT || y > MAX_12BIT)) goto out; /* skip coords if the pressure components are out of range */ if (unlikely(z1 == 0 || z2 > MAX_12BIT || z1 >= z2)) goto out; /* skip point if this is a pen down with the exact same values as * the value before pen-up - that implies SPI fed us stale data */ if (!ts->pen_down && ts->in_x == x && ts->in_y == y && ts->in_z1 == z1 && ts->in_z2 == z2) goto out; /* At this point we are happy we have a valid and useful reading. * Remember it for later comparisons. We may now begin downsampling */ ts->in_x = x; ts->in_y = y; ts->in_z1 = z1; ts->in_z2 = z2; /* don't run average on the "pen down" event */ if (ts->sample_sent) { ts->avg_x += x; ts->avg_y += y; ts->avg_z1 += z1; ts->avg_z2 += z2; if (++ts->sample_cnt < TS_SAMPLES) goto out; x = ts->avg_x / TS_SAMPLES; y = ts->avg_y / TS_SAMPLES; z1 = ts->avg_z1 / TS_SAMPLES; z2 = ts->avg_z2 / TS_SAMPLES; } ts->sample_cnt = 0; ts->avg_x = 0; ts->avg_y = 0; ts->avg_z1 = 0; ts->avg_z2 = 0; /* compute touch pressure resistance using equation #1 */ pressure = x * (z2 - z1) / z1; pressure = pressure * ts->x_plate_ohm / 4096; pressure_limit = ts->sample_sent ? ts->p_max : ts->ts_pressure; if (unlikely(pressure > pressure_limit)) { /* printk(KERN_ERR "skipping ts event, pressure(%u) > pressure_limit(%u)\n", pressure, pressure_limit); */ goto out; } /* Discard the event if it still is within the previous rect - * unless the pressure is clearly harder, but then use previous * x,y position. If any coordinate deviates enough, fudging * of all three will still take place in the input layer. */ inside_rect = (ts->sample_sent && x > (int)ts->out_x - ts->fudge_x && x < (int)ts->out_x + ts->fudge_x && y > (int)ts->out_y - ts->fudge_y && y < (int)ts->out_y + ts->fudge_y); if (inside_rect) x = ts->out_x, y = ts->out_y; if (!inside_rect || pressure < (ts->out_p - ts->fudge_p)) { tsc2005_update_pen_state(ts, x, y, pressure); ts->sample_sent = 1; ts->out_x = x; ts->out_y = y; ts->out_p = pressure; } if (ts->sample_sent) { /* set the penup timer */ mod_timer(&ts->penup_timer, jiffies + msecs_to_jiffies(TSC2005_PENUP_TIME_MS)); if (ts->esd_timeout) { /* update the watchdog timer */ mod_timer(&ts->esd_timer, round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout))); } } out: mutex_unlock(&ts->mutex); return IRQ_HANDLED; }
void ieee80211_process_addba_resp(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { struct tid_ampdu_tx *tid_tx; struct ieee80211_txq *txq; u16 capab, tid; u8 buf_size; bool amsdu; capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes); txq = sta->sta.txq[tid]; if (!amsdu && txq) set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags); mutex_lock(&sta->ampdu_mlme.mtx); tid_tx = rcu_dereference_protected_tid_tx(sta, tid); if (!tid_tx) goto out; if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n", sta->sta.addr, tid); goto out; } del_timer_sync(&tid_tx->addba_resp_timer); ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n", sta->sta.addr, tid); /* * addba_resp_timer may have fired before we got here, and * caused WANT_STOP to be set. If the stop then was already * processed further, STOPPING might be set. */ if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { ht_dbg(sta->sdata, "got addBA resp for %pM tid %d but we already gave up\n", sta->sta.addr, tid); goto out; } /* * IEEE 802.11-2007 7.3.1.14: * In an ADDBA Response frame, when the Status Code field * is set to 0, the Buffer Size subfield is set to a value * of at least 1. */ if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) == WLAN_STATUS_SUCCESS && buf_size) { if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { /* ignore duplicate response */ goto out; } tid_tx->buf_size = buf_size; tid_tx->amsdu = amsdu; if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) ieee80211_agg_tx_operational(local, sta, tid); sta->ampdu_mlme.addba_req_num[tid] = 0; if (tid_tx->timeout) { mod_timer(&tid_tx->session_timer, TU_TO_EXP_TIME(tid_tx->timeout)); tid_tx->last_tx = jiffies; } } else { ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED); } out: mutex_unlock(&sta->ampdu_mlme.mtx); }
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) { struct tid_ampdu_tx *tid_tx; struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_ampdu_params params = { .sta = &sta->sta, .action = IEEE80211_AMPDU_TX_START, .tid = tid, .buf_size = 0, .amsdu = false, .timeout = 0, }; int ret; tid_tx = rcu_dereference_protected_tid_tx(sta, tid); /* * Start queuing up packets for this aggregation session. * We're going to release them once the driver is OK with * that. */ clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); ieee80211_agg_stop_txq(sta, tid); /* * Make sure no packets are being processed. This ensures that * we have a valid starting sequence number and that in-flight * packets have been flushed out and no packets for this TID * will go into the driver during the ampdu_action call. */ synchronize_net(); params.ssn = sta->tid_seq[tid] >> 4; ret = drv_ampdu_action(local, sdata, ¶ms); if (ret) { ht_dbg(sdata, "BA request denied - HW unavailable for %pM tid %d\n", sta->sta.addr, tid); spin_lock_bh(&sta->lock); ieee80211_agg_splice_packets(sdata, tid_tx, tid); ieee80211_assign_tid_tx(sta, tid, NULL); ieee80211_agg_splice_finish(sdata, tid); spin_unlock_bh(&sta->lock); ieee80211_agg_start_txq(sta, tid, false); kfree_rcu(tid_tx, rcu_head); return; } /* activate the timer for the recipient's addBA response */ mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n", sta->sta.addr, tid); spin_lock_bh(&sta->lock); sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; sta->ampdu_mlme.addba_req_num[tid]++; spin_unlock_bh(&sta->lock); /* send AddBA request */ ieee80211_send_addba_request(sdata, sta->sta.addr, tid, tid_tx->dialog_token, params.ssn, IEEE80211_MAX_AMPDU_BUF, tid_tx->timeout); } /* * After accepting the AddBA Response we activated a timer, * resetting it after each frame that we send. */ static void sta_tx_agg_session_timer_expired(unsigned long data) { /* not an elegant detour, but there is no choice as the timer passes * only one argument, and various sta_info are needed here, so init * flow in sta_info_create gives the TID as data, while the timer_to_id * array gives the sta through container_of */ u8 *ptid = (u8 *)data; u8 *timer_to_id = ptid - *ptid; struct sta_info *sta = container_of(timer_to_id, struct sta_info, timer_to_tid[0]); struct tid_ampdu_tx *tid_tx; unsigned long timeout; rcu_read_lock(); tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]); if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { rcu_read_unlock(); return; } timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout); if (time_is_after_jiffies(timeout)) { mod_timer(&tid_tx->session_timer, timeout); rcu_read_unlock(); return; } rcu_read_unlock(); ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n", sta->sta.addr, (u16)*ptid); ieee80211_stop_tx_ba_session(&sta->sta, *ptid); }
static void on_receive_block(struct r3964_info *pInfo) { unsigned int length; struct r3964_client_info *pClient; struct r3964_block_header *pBlock; length = pInfo->rx_position; /* compare byte checksum characters: */ if (pInfo->flags & R3964_BCC) { if (pInfo->bcc != pInfo->last_rx) { TRACE_PE("checksum error - got %x but expected %x", pInfo->last_rx, pInfo->bcc); pInfo->flags |= R3964_CHECKSUM; } } /* check for errors (parity, overrun,...): */ if (pInfo->flags & R3964_ERROR) { TRACE_PE("on_receive_block - transmission failed error %x", pInfo->flags & R3964_ERROR); put_char(pInfo, NAK); flush(pInfo); if (pInfo->nRetry < R3964_MAX_RETRIES) { pInfo->state = R3964_WAIT_FOR_RX_REPEAT; pInfo->nRetry++; mod_timer(&pInfo->tmr, jiffies + R3964_TO_RX_PANIC); } else { TRACE_PE("on_receive_block - failed after max retries"); pInfo->state = R3964_IDLE; } return; } /* received block; submit DLE: */ put_char(pInfo, DLE); flush(pInfo); del_timer_sync(&pInfo->tmr); TRACE_PS(" rx success: got %d chars", length); /* prepare struct r3964_block_header: */ pBlock = kmalloc(length + sizeof(struct r3964_block_header), GFP_KERNEL); TRACE_M("on_receive_block - kmalloc %p", pBlock); if (pBlock == NULL) return; pBlock->length = length; pBlock->data = ((unsigned char *)pBlock) + sizeof(struct r3964_block_header); pBlock->locks = 0; pBlock->next = NULL; pBlock->owner = NULL; memcpy(pBlock->data, pInfo->rx_buf, length); /* queue block into rx_queue: */ add_rx_queue(pInfo, pBlock); /* notify attached client processes: */ for (pClient = pInfo->firstClient; pClient; pClient = pClient->next) { if (pClient->sig_flags & R3964_SIG_DATA) { add_msg(pClient, R3964_MSG_DATA, length, R3964_OK, pBlock); } } wake_up_interruptible(&pInfo->tty->read_wait); pInfo->state = R3964_IDLE; trigger_transmit(pInfo); }
/* * We've spun up the disk and we're in laptop mode: schedule writeback * of all dirty data a few seconds from now. If the flush is already scheduled * then push it back - the user is still using the disk. */ void laptop_io_completion(struct backing_dev_info *info) { mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); }
static void receive_char(struct r3964_info *pInfo, const unsigned char c) { switch (pInfo->state) { case R3964_TX_REQUEST: if (c == DLE) { TRACE_PS("TX_REQUEST - got DLE"); pInfo->state = R3964_TRANSMITTING; pInfo->tx_position = 0; transmit_block(pInfo); } else if (c == STX) { if (pInfo->nRetry == 0) { TRACE_PE("TX_REQUEST - init conflict"); if (pInfo->priority == R3964_SLAVE) { goto start_receiving; } } else { TRACE_PE("TX_REQUEST - secondary init " "conflict!? Switching to SLAVE mode " "for next rx."); goto start_receiving; } } else { TRACE_PE("TX_REQUEST - char != DLE: %x", c); retry_transmit(pInfo); } break; case R3964_TRANSMITTING: if (c == NAK) { TRACE_PE("TRANSMITTING - got NAK"); retry_transmit(pInfo); } else { TRACE_PE("TRANSMITTING - got invalid char"); pInfo->state = R3964_WAIT_ZVZ_BEFORE_TX_RETRY; mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ); } break; case R3964_WAIT_FOR_TX_ACK: if (c == DLE) { TRACE_PS("WAIT_FOR_TX_ACK - got DLE"); remove_from_tx_queue(pInfo, R3964_OK); pInfo->state = R3964_IDLE; trigger_transmit(pInfo); } else { retry_transmit(pInfo); } break; case R3964_WAIT_FOR_RX_REPEAT: /* FALLTHROUGH */ case R3964_IDLE: if (c == STX) { /* Prevent rx_queue from overflow: */ if (pInfo->blocks_in_rx_queue >= R3964_MAX_BLOCKS_IN_RX_QUEUE) { TRACE_PE("IDLE - got STX but no space in " "rx_queue!"); pInfo->state = R3964_WAIT_FOR_RX_BUF; mod_timer(&pInfo->tmr, jiffies + R3964_TO_NO_BUF); break; } start_receiving: /* Ok, start receiving: */ TRACE_PS("IDLE - got STX"); pInfo->rx_position = 0; pInfo->last_rx = 0; pInfo->flags &= ~R3964_ERROR; pInfo->state = R3964_RECEIVING; mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ); pInfo->nRetry = 0; put_char(pInfo, DLE); flush(pInfo); pInfo->bcc = 0; } break; case R3964_RECEIVING: if (pInfo->rx_position < RX_BUF_SIZE) { pInfo->bcc ^= c; if (c == DLE) { if (pInfo->last_rx == DLE) { pInfo->last_rx = 0; goto char_to_buf; } pInfo->last_rx = DLE; break; } else if ((c == ETX) && (pInfo->last_rx == DLE)) { if (pInfo->flags & R3964_BCC) { pInfo->state = R3964_WAIT_FOR_BCC; mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ); } else { on_receive_block(pInfo); } } else { pInfo->last_rx = c; char_to_buf: pInfo->rx_buf[pInfo->rx_position++] = c; mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ); } } /* else: overflow-msg? BUF_SIZE>MTU; should not happen? */ break; case R3964_WAIT_FOR_BCC: pInfo->last_rx = c; on_receive_block(pInfo); break; } }
static int __init ws2401_dpi_spi_probe(struct spi_device *spi) { int ret = 0; struct ws2401_dpi *lcd = container_of(spi->dev.driver, struct ws2401_dpi, spi_drv.driver); dev_dbg(&spi->dev, "panel ws2401_dpi spi being probed\n"); dev_set_drvdata(&spi->dev, lcd); /* ws2401_dpi lcd panel uses 3-wire 9bits SPI Mode. */ spi->bits_per_word = 9; ret = spi_setup(spi); if (ret < 0) { dev_err(&spi->dev, "spi setup failed.\n"); goto out; } lcd->spi = spi; /* * if lcd panel was on from bootloader like u-boot then * do not lcd on. */ if (!lcd->pd->platform_enabled) { /* * if lcd panel was off from bootloader then * current lcd status is powerdown and then * it enables lcd panel. */ lcd->power = FB_BLANK_POWERDOWN; ws2401_dpi_power(lcd, FB_BLANK_UNBLANK); } else { lcd->power = FB_BLANK_UNBLANK; lcd->ldi_state = LDI_STATE_ON; } #ifdef ESD_OPERATION lcd->esd_workqueue = create_singlethread_workqueue("esd_workqueue"); if (!lcd->esd_workqueue) { dev_info(lcd->dev, "esd_workqueue create fail\n"); return -ENOMEM; } INIT_WORK(&(lcd->esd_work), esd_work_func); lcd->esd_port = ESD_PORT_NUM; if (request_threaded_irq(GPIO_TO_IRQ(lcd->esd_port), NULL, esd_interrupt_handler, IRQF_TRIGGER_RISING, "esd_interrupt", lcd)) { dev_info(lcd->dev, "esd irq request fail\n"); free_irq(GPIO_TO_IRQ(lcd->esd_port), NULL); lcd->lcd_connected = 0; } else { /* low is normal. On PBA esd_port coule be HIGH */ if (!gpio_get_value(lcd->esd_port)) { dev_info(lcd->dev, "esd irq enabled on booting\n"); lcd->esd_enable = 1; lcd->lcd_connected = 1; } else { dev_info(lcd->dev, "esd irq disabled on booting\n"); disable_irq(GPIO_TO_IRQ(lcd->esd_port)); lcd->esd_enable = 0; lcd->lcd_connected = 0; } } dev_info(lcd->dev, "%s esd work success\n"); #ifdef ESD_TEST pdpi = lcd; setup_timer(&lcd->esd_test_timer, est_test_timer_func, 0); mod_timer(&lcd->esd_test_timer, jiffies + (3*HZ)); #endif #endif dev_dbg(&spi->dev, "ws2401_dpi spi has been probed.\n"); out: return ret; }
static void cpufreq_interactive_timer(unsigned long data) { u64 delta_idle; u64 update_time; u64 *cpu_time_in_idle; u64 *cpu_idle_exit_time; struct timer_list *t; u64 now_idle = get_cpu_idle_time_us(data, &update_time); cpu_time_in_idle = &per_cpu(time_in_idle, data); cpu_idle_exit_time = &per_cpu(idle_exit_time, data); if (update_time == *cpu_idle_exit_time) return; delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle); /* Scale up if there were no idle cycles since coming out of idle */ if (delta_idle == 0) { if (policy->cur == policy->max) return; if (nr_running() < 1) return; target_freq = policy->max; cpumask_set_cpu(data, &work_cpumask); queue_work(up_wq, &freq_scale_work); return; } /* * There is a window where if the cpu utlization can go from low to high * between the timer expiring, delta_idle will be > 0 and the cpu will * be 100% busy, preventing idle from running, and this timer from * firing. So setup another timer to fire to check cpu utlization. * Do not setup the timer if there is no scheduled work. */ t = &per_cpu(cpu_timer, data); if (!timer_pending(t) && nr_running() > 0) { *cpu_time_in_idle = get_cpu_idle_time_us( data, cpu_idle_exit_time); mod_timer(t, jiffies + 2); } if (policy->cur == policy->min) return; /* * Do not scale down unless we have been at this frequency for the * minimum sample time. */ if (cputime64_sub(update_time, freq_change_time) < min_sample_time) return; target_freq = policy->min; cpumask_set_cpu(data, &work_cpumask); queue_work(down_wq, &freq_scale_work); }
void mydev_timer_func(unsigned long data) { struct mydev_struct *dev= (struct mydev_struct*) data; pr_info("TIMER: current: %lu, time spent %lu\n", jiffies, jiffies - loading_time); mod_timer(&dev->timer, jiffies + dev->timeout); }
/* * start timer which controls leds state */ static void pcan_start_led_timer(struct pcan_pccard *card) { if (!timer_pending(&card->led_timer)) mod_timer(&card->led_timer, jiffies + HZ); }
void bl_timer_callback(unsigned long data) { schedule_work(&blink_work); mod_timer(&blink_timer, jiffies + msecs_to_jiffies(BLINK_INTERVAL)); }
static long po188_ioctl(struct file* file, unsigned int cmd, unsigned long param) { int ret = 0; int flags = 0; void __user *argp = (void __user *)param; int iDelayTime = 0; switch (cmd) { case LIGHTSENSOR_IOCTL_ENABLE: if (copy_from_user(&flags, argp, sizeof(flags))) { ret = -EFAULT; } else { if ( 0==flags ) { PO188_DMSG("active disable pol88\n"); spin_lock(&po188_driver.s_lock); po188_driver.status_on = false; spin_unlock(&po188_driver.s_lock); cancel_work_sync(&po188_cb_work); flush_workqueue(po188_driver.po188_wq); del_timer(&po188_driver.timer); ret = k3_adc_close_channal(PO188_ADC_CHANNEL); if (ret < 0) { PO188_ERRMSG("k3_adc_close_channal error\n"); } ret = regulator_disable(gPo188Regulator); if (ret < 0) { PO188_ERRMSG("disable po188 vcc drive error"); } } else if (1 == flags) { PO188_DMSG("active enable pol88\n"); ret = regulator_enable(gPo188Regulator); if (ret < 0) { PO188_ERRMSG("enable po188 vcc drive error"); return ret;//regulator_enable error, return. } ret = k3_adc_open_channel(PO188_ADC_CHANNEL); if (ret < 0) { PO188_ERRMSG("k3_adc_open_channel error\n"); regulator_disable(gPo188Regulator); return ret; } mod_timer(&po188_driver.timer, jiffies + msecs_to_jiffies(po188_driver.delay_time)); spin_lock(&po188_driver.s_lock); po188_driver.status_on = true; spin_unlock(&po188_driver.s_lock); } } break; case LIGHTSENSOR_IOCTL_GET_ENABLED: spin_lock(&po188_driver.s_lock); flags = po188_driver.status_on; spin_unlock(&po188_driver.s_lock); if (copy_to_user(argp, &flags, sizeof(flags))) { ret = -EFAULT; } break; case LIGHTSENSOR_IOCTL_GET_DELAY: spin_lock(&po188_driver.s_lock); iDelayTime = po188_driver.delay_time; spin_unlock(&po188_driver.s_lock); if (copy_to_user(argp, &iDelayTime, sizeof(iDelayTime))) { ret = -EFAULT; } break; case LIGHTSENSOR_IOCTL_SET_DELAY: if (copy_from_user(&iDelayTime, argp, sizeof(iDelayTime))) { ret = -EFAULT; } else { spin_lock(&po188_driver.s_lock); po188_driver.delay_time = iDelayTime; spin_unlock(&po188_driver.s_lock); } break; default: PO188_ERRMSG("CMD INVALID.\n"); ret = -EINVAL; break; } return ret; }
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) { struct tid_ampdu_tx *tid_tx; struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; u16 start_seq_num; int ret; tid_tx = rcu_dereference_protected_tid_tx(sta, tid); /* * Start queuing up packets for this aggregation session. * We're going to release them once the driver is OK with * that. */ clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); /* * Make sure no packets are being processed. This ensures that * we have a valid starting sequence number and that in-flight * packets have been flushed out and no packets for this TID * will go into the driver during the ampdu_action call. */ synchronize_net(); start_seq_num = sta->tid_seq[tid] >> 4; ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, &sta->sta, tid, &start_seq_num, 0); if (ret) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "BA request denied - HW unavailable for" " tid %d\n", tid); #endif spin_lock_bh(&sta->lock); ieee80211_agg_splice_packets(local, tid_tx, tid); ieee80211_assign_tid_tx(sta, tid, NULL); ieee80211_agg_splice_finish(local, tid); spin_unlock_bh(&sta->lock); kfree_rcu(tid_tx, rcu_head); return; } /* activate the timer for the recipient's addBA response */ mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); #endif spin_lock_bh(&sta->lock); sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; sta->ampdu_mlme.addba_req_num[tid]++; spin_unlock_bh(&sta->lock); /* send AddBA request */ ieee80211_send_addba_request(sdata, sta->sta.addr, tid, tid_tx->dialog_token, start_seq_num, local->hw.max_tx_aggregation_subframes, tid_tx->timeout); }
static void musb_port_suspend(struct musb *musb, bool do_suspend) { struct usb_otg *otg = musb->xceiv->otg; u8 power; void __iomem *mbase = musb->mregs; if (!is_host_active(musb)) return; /* NOTE: this doesn't necessarily put PHY into low power mode, * turning off its clock; that's a function of PHY integration and * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect * SE0 changing to connect (J) or wakeup (K) states. */ power = musb_readb(mbase, MUSB_POWER); if (do_suspend) { int retries = 10000; power &= ~MUSB_POWER_RESUME; power |= MUSB_POWER_SUSPENDM; musb_writeb(mbase, MUSB_POWER, power); /* Needed for OPT A tests */ power = musb_readb(mbase, MUSB_POWER); while (power & MUSB_POWER_SUSPENDM) { power = musb_readb(mbase, MUSB_POWER); if (retries-- < 1) break; } dev_dbg(musb->controller, "Root port suspended, power %02x\n", power); musb->port1_status |= USB_PORT_STAT_SUSPEND; switch (musb->xceiv->state) { case OTG_STATE_A_HOST: musb->xceiv->state = OTG_STATE_A_SUSPEND; musb->is_active = otg->host->b_hnp_enable; if (musb->is_active) mod_timer(&musb->otg_timer, jiffies + msecs_to_jiffies( OTG_TIME_A_AIDL_BDIS)); musb_platform_try_idle(musb, 0); break; case OTG_STATE_B_HOST: musb->xceiv->state = OTG_STATE_B_WAIT_ACON; musb->is_active = otg->host->b_hnp_enable; musb_platform_try_idle(musb, 0); break; default: dev_dbg(musb->controller, "bogus rh suspend? %s\n", usb_otg_state_string(musb->xceiv->state)); } } else if (power & MUSB_POWER_SUSPENDM) { power &= ~MUSB_POWER_SUSPENDM; power |= MUSB_POWER_RESUME; musb_writeb(mbase, MUSB_POWER, power); dev_dbg(musb->controller, "Root port resuming, power %02x\n", power); /* later, GetPortStatus will stop RESUME signaling */ musb->port1_status |= MUSB_PORT_STAT_RESUME; musb->rh_timer = jiffies + msecs_to_jiffies(20); } }
void ieee80211_process_addba_resp(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { struct tid_ampdu_tx *tid_tx; u16 capab, tid; u8 buf_size; capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; mutex_lock(&sta->ampdu_mlme.mtx); tid_tx = rcu_dereference_protected_tid_tx(sta, tid); if (!tid_tx) goto out; if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); #endif goto out; } del_timer_sync(&tid_tx->addba_resp_timer); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); #endif /* * addba_resp_timer may have fired before we got here, and * caused WANT_STOP to be set. If the stop then was already * processed further, STOPPING might be set. */ if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "got addBA resp for tid %d but we already gave up\n", tid); #endif goto out; } /* * IEEE 802.11-2007 7.3.1.14: * In an ADDBA Response frame, when the Status Code field * is set to 0, the Buffer Size subfield is set to a value * of at least 1. */ if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) == WLAN_STATUS_SUCCESS && buf_size) { if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { /* ignore duplicate response */ goto out; } tid_tx->buf_size = buf_size; if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) ieee80211_agg_tx_operational(local, sta, tid); sta->ampdu_mlme.addba_req_num[tid] = 0; if (tid_tx->timeout) mod_timer(&tid_tx->session_timer, TU_TO_EXP_TIME(tid_tx->timeout)); } else { ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, true); } out: mutex_unlock(&sta->ampdu_mlme.mtx); }
static void rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) { queue->timer_list.expires = expires; mod_timer(&queue->timer_list.timer, expires); }
void rx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; enum skb_state state; skb_put (skb, urb->actual_length); state = rx_done; entry->urb = NULL; if (enable_tx_rx_debug && (urb_status != -ECONNRESET)) netdev_info(dev->net, "[RMNET_D]rx_c, status: %d\n", urb_status); switch (urb_status) { case 0: if (skb->len < dev->net->hard_header_len) { state = rx_cleanup; dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); if (enable_tx_rx_debug) netdev_info(dev->net, "[RMNET_D] Error: rx length %d\n", skb->len); } break; case -EPIPE: dev->net->stats.rx_errors++; usbnet_defer_kevent (dev, EVENT_RX_HALT); case -ECONNRESET: case -ESHUTDOWN: netif_dbg(dev, ifdown, dev->net, "rx shutdown, code %d\n", urb_status); goto block; case -EPROTO: case -ETIME: case -EILSEQ: dev->net->stats.rx_errors++; if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); netif_dbg(dev, link, dev->net, "rx throttle %d\n", urb_status); } block: state = rx_cleanup; entry->urb = urb; urb = NULL; break; case -EOVERFLOW: dev->net->stats.rx_over_errors++; default: state = rx_cleanup; dev->net->stats.rx_errors++; netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); break; } state = defer_bh(dev, skb, &dev->rxq, state); if (urb) { if (netif_running (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && state != unlink_start) { rx_submit (dev, urb, GFP_ATOMIC); #ifdef HTC_PM_DBG if (usb_pm_debug_enabled) usb_mark_intf_last_busy(dev->intf, true); #endif usb_mark_last_busy(dev->udev); return; } usb_free_urb (urb); } netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); }
void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs) { struct qib_devdata *dd = ppd->dd; unsigned long flags; u32 lstate; u8 ltstate; enum ib_event_type ev = 0; lstate = dd->f_iblink_state(ibcs); /* linkstate */ ltstate = dd->f_ibphys_portstate(ibcs); /* * If linkstate transitions into INIT from any of the various down * states, or if it transitions from any of the up (INIT or better) * states into any of the down states (except link recovery), then * call the chip-specific code to take appropriate actions. * * ppd->lflags could be 0 if this is the first time the interrupt * handlers has been called but the link is already up. */ if (lstate >= IB_PORT_INIT && (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) && ltstate == IB_PHYSPORTSTATE_LINKUP) { /* transitioned to UP */ if (dd->f_ib_updown(ppd, 1, ibcs)) goto skip_ibchange; /* chip-code handled */ } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) { if (ltstate != IB_PHYSPORTSTATE_LINKUP && ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN && dd->f_ib_updown(ppd, 0, ibcs)) goto skip_ibchange; /* chip-code handled */ qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT); } if (lstate != IB_PORT_DOWN) { /* lstate is INIT, ARMED, or ACTIVE */ if (lstate != IB_PORT_ACTIVE) { *ppd->statusp &= ~QIB_STATUS_IB_READY; if (ppd->lflags & QIBL_LINKACTIVE) ev = IB_EVENT_PORT_ERR; spin_lock_irqsave(&ppd->lflags_lock, flags); if (lstate == IB_PORT_ARMED) { ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV; ppd->lflags &= ~(QIBL_LINKINIT | QIBL_LINKDOWN | QIBL_LINKACTIVE); } else { ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV; ppd->lflags &= ~(QIBL_LINKARMED | QIBL_LINKDOWN | QIBL_LINKACTIVE); } spin_unlock_irqrestore(&ppd->lflags_lock, flags); /* start a 75msec timer to clear symbol errors */ mod_timer(&ppd->symerr_clear_timer, msecs_to_jiffies(75)); } else if (ltstate == IB_PHYSPORTSTATE_LINKUP && !(ppd->lflags & QIBL_LINKACTIVE)) { /* active, but not active defered */ qib_hol_up(ppd); /* useful only for 6120 now */ *ppd->statusp |= QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF; qib_clear_symerror_on_linkup((unsigned long)ppd); spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV; ppd->lflags &= ~(QIBL_LINKINIT | QIBL_LINKDOWN | QIBL_LINKARMED); spin_unlock_irqrestore(&ppd->lflags_lock, flags); if (dd->flags & QIB_HAS_SEND_DMA) qib_sdma_process_event(ppd, qib_sdma_event_e30_go_running); ev = IB_EVENT_PORT_ACTIVE; dd->f_setextled(ppd, 1); } } else { /* down */ if (ppd->lflags & QIBL_LINKACTIVE) ev = IB_EVENT_PORT_ERR; spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV; ppd->lflags &= ~(QIBL_LINKINIT | QIBL_LINKACTIVE | QIBL_LINKARMED); spin_unlock_irqrestore(&ppd->lflags_lock, flags); *ppd->statusp &= ~QIB_STATUS_IB_READY; } skip_ibchange: ppd->lastibcstat = ibcs; if (ev) signal_ib_event(ppd, ev); }
static irqreturn_t ts_interrupt(int irq, void *dev_id) { u32 avgs, x, y, lx, ly; u32 num_op, num_samp; u32 status; struct ts *ts = dev_id; status = readl_relaxed(TSSC_REG(STATUS)); avgs = readl_relaxed(TSSC_REG(AVG12)); x = avgs & 0xFFFF; y = avgs >> 16; /* For pen down make sure that the data just read is still valid. * The DATA bit will still be set if the ARM9 hasn't clobbered * the TSSC. If it's not set, then it doesn't need to be cleared * here, so just return. */ if (!(readl_relaxed(TSSC_REG(CTL)) & TSSC_CTL_DATA)) goto out; /* Data has been read, OK to clear the data flag */ writel_relaxed(TSSC_CTL_STATE, TSSC_REG(CTL)); /* barrier: Write to complete before the next sample */ mb(); /* Valid samples are indicated by the sample number in the status * register being the number of expected samples and the number of * samples collected being zero (this check is due to ADC contention). */ num_op = (status & TSSC_STS_OPN_BMSK) >> TSSC_STS_OPN_SHIFT; num_samp = (status & TSSC_STS_NUMSAMP_BMSK) >> TSSC_STS_NUMSAMP_SHFT; if ((num_op == TSSC_NUMBER_OF_OPERATIONS) && (num_samp == 0)) { /* TSSC can do Z axis measurment, but driver doesn't support * this yet. */ /* * REMOVE THIS: * These x, y co-ordinates adjustments will be removed once * Android framework adds calibration framework. */ #ifdef CONFIG_ANDROID_TOUCHSCREEN_MSM_HACKS lx = ts->x_max - x; ly = ts->y_max - y; #else lx = x; ly = y; #endif ts_update_pen_state(ts, lx, ly, 255); /* kick pen up timer - to make sure it expires again(!) */ mod_timer(&ts->timer, jiffies + msecs_to_jiffies(TS_PENUP_TIMEOUT_MS)); } else printk(KERN_INFO "Ignored interrupt: {%3d, %3d}," " op = %3d samp = %3d\n", x, y, num_op, num_samp); out: return IRQ_HANDLED; }