static int s5p_idpram_prepare_suspend(struct dpram_link_device *dpld) { struct link_device *ld = &dpld->ld; struct idpram_pm_data *pm_data = &dpld->pm_data; struct modem_ctl *mc = dpld->ld.mc; struct completion *cmpl; unsigned long timeout; unsigned long rest; int cnt = 0; u16 cmd = INT_CMD(INT_CMD_IDPRAM_SUSPEND_REQ); mif_info("+++\n"); pm_data->pm_state = IDPRAM_PM_SUSPEND_PREPARE; pm_data->last_msg = 0; s5p_idpram_set_pm_lock(dpld, 1); /* * Because, if dpram was powered down, cp dpram random intr was * ocurred. so, fixed by muxing cp dpram intr pin to GPIO output * high,.. */ gpio_set_value(dpld->gpio_int2cp, 1); s3c_gpio_cfgpin(dpld->gpio_int2cp, S3C_GPIO_OUTPUT); /* prevent PDA_ACTIVE status is low */ gpio_set_value(mc->gpio_pda_active, 1); cmpl = &pm_data->down_cmpl; timeout = IDPRAM_SUSPEND_REQ_TIMEOUT; cnt = 0; do { init_completion(cmpl); mif_info("send IDPRAM_SUSPEND_REQ (0x%X)\n", cmd); dpld->send_intr(dpld, cmd); rest = wait_for_completion_timeout(cmpl, timeout); if (rest == 0) { cnt++; mif_err("timeout!!! (count = %d)\n", cnt); if (cnt >= 3) { mif_err("ERR! no response from CP\n"); break; } } } while (rest == 0); switch (pm_data->last_msg) { case INT_CMD(INT_CMD_IDPRAM_SUSPEND_ACK): mif_info("recv IDPRAM_SUSPEND_ACK (0x%X)\n", pm_data->last_msg); pm_data->pm_state = IDPRAM_PM_DPRAM_POWER_DOWN; break; default: mif_err("ERR! %s down or not ready!!! (intr 0x%04X)\n", ld->name, dpld->recv_intr(dpld)); timeout = msecs_to_jiffies(500); wake_lock_timeout(&pm_data->hold_wlock, timeout); s5p_idpram_set_pm_lock(dpld, 0); break; } mif_info("---\n"); return 0; }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err = 0; int extend_wakelock = 0; #ifdef CONFIG_MMC_PARANOID_SD_INIT int retries = 2; #endif mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) { host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; } mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; retry: mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: #ifdef CONFIG_MMC_PARANOID_SD_INIT if (err && (err != -ENOMEDIUM) && retries) { printk(KERN_INFO "%s: Re-scan card rc = %d (retries = %d)\n", mmc_hostname(host), err, retries); retries--; goto retry; } #endif if (extend_wakelock) wake_lock_timeout(&host->wakelock, 5 * HZ); else wake_unlock(&host->wakelock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; int extend_wakelock = 0; mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) { if(host->ops->get_cd && host->ops->get_cd(host) == 0) { if(host->bus_ops->remove) host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); mmc_release_host(host); } else host->bus_ops->detect(host); } /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ printk(KERN_DEBUG "*** DEBUG : First we search for SDIO...(%d)***\n", host->index); err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ printk(KERN_DEBUG "*** DEBUG : ...then normal SD...(%d) ***\n", host->index); err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ printk(KERN_DEBUG "*** DEBUG : ...and finally MMC. (%d)***\n", host->index); err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: if (--wakelock_refs > 0) { pr_debug("%s: other host want the wakelock\n", mmc_hostname(host)); } else { #if defined(CONFIG_MMC_BLOCK_DEFERRED_RESUME) && defined(CONFIG_MACH_S5PC110_P1) wake_lock_timeout(&mmc_delayed_work_wake_lock, 3*HZ); #else /* CONFIG_MMC_BLOCK_DEFERRED_RESUME && CONFIG_MACH_S5PC110_P1 */ if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); #endif /* CONFIG_MMC_BLOCK_DEFERRED_RESUME && CONFIG_MACH_S5PC110_P1 */ } if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; int extend_wakelock = 0; mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) { if(host->ops->get_cd && host->ops->get_cd(host) == 0) { if(host->bus_ops->remove) host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); mmc_release_host(host); } else host->bus_ops->detect(host); } /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ printk(KERN_DEBUG "*** DEBUG : First we search for SDIO...(%d)***\n", host->index); err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ printk(KERN_DEBUG "*** DEBUG : ...then normal SD...(%d) ***\n", host->index); err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ printk(KERN_DEBUG "*** DEBUG : ...and finally MMC. (%d)***\n", host->index); err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: #if 0 //if (extend_wakelock) // wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); //else // wake_unlock(&mmc_delayed_work_wake_lock); #else if (atomic_dec_return(&wakelock_refs) > 0) { printk(KERN_DEBUG "Another host want the wakelock : %d\n", atomic_read(&wakelock_refs)); } else { printk(KERN_DEBUG "mmc%d: wake_lock_timeout 1sec %d\n", host->index, atomic_read(&wakelock_refs)); wake_lock_timeout(&mmc_delayed_work_wake_lock, msecs_to_jiffies(1000)); } #endif if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
int ccci_df_to_ccci_callback(unsigned int rxq_no) { int ret, hc_ret; bool is_xcmd = false; struct sk_buff * skb = NULL; CCCI_BUFF_T *ccci_h = NULL; XBOOT_CMD *p_xcmd = NULL; KAL_UINT32 port_id = CCCI_PORT_CTRL; static KAL_UINT32 rx_err_cnt[CCCI_PORT_NUM_MAX] = {0}; #ifdef __EEMCS_EXPT_SUPPORT__ EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) KAL_INT16 channel, seq_num, assert_bit; #endif DEBUG_LOG_FUNCTION_ENTRY; /* Step 1. read skb from swq */ skb = hif_dl_read_swq(rxq_no); if(skb == NULL) { DBGLOG(CCCI, DBG, "ccci_df_to_ccci_callback read NULL skb on %d", rxq_no); if(is_exception_mode(&mode)) return KAL_FAIL; else KAL_ASSERT(NULL != skb); } /* Step 2. call handle complete */ hc_ret = hif_dl_pkt_handle_complete(rxq_no); KAL_ASSERT(0 == hc_ret); wake_lock_timeout(&eemcs_wake_lock, HZ/2); // Using 0.5s wake lock /* Step 3. buffer type */ if (rxq_no == RXQ_Q0) { //is_xcmd = is_xboot_command(skb); p_xcmd = (XBOOT_CMD *)skb->data; if (p_xcmd->magic == (KAL_UINT32)MAGIC_MD_CMD) { if (check_device_state() >= EEMCS_MOLY_HS_P1) { DBGLOG(CCCI, ERR, "can't recv xBoot cmd when EEMCS state=%d", check_device_state()); } else { is_xcmd = true; } } } if (is_xcmd) { /* Step 4. callback to xBoot */ CDEV_LOG(port_id, CCCI, INF, "XBOOT_CMD: 0x%08X, 0x%08X, 0x%08X, 0x%08X",\ p_xcmd->magic, p_xcmd->msg_id, p_xcmd->status, p_xcmd->reserved[0]); ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } else { ccci_h = (CCCI_BUFF_T *)skb->data; port_id = ccci_ch_to_port(ccci_h->channel); CDEV_LOG(port_id, CCCI, INF, "CCCI_H: 0x%08X, 0x%08X, 0x%08X, 0x%08X",\ ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->reserved); /*check rx sequence number for expect*/ #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) channel = ccci_h->channel; seq_num = ccci_h->seq_num; assert_bit = ccci_h->assert_bit; DBGLOG(CCCI, DBG, "Port%d CCCI_H: data[0]=0x%08X, data[1]=0x%08X, ch=0x%02X, seqno=0x%02X, assert=%d, resv=0x%08X(0x%08X, 0x%08X, 0x%08X)",\ port_id, ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->seq_num, \ ccci_h->assert_bit, ccci_h->reserved, channel, seq_num, assert_bit); if(((seq_num - ccci_seqno_tbl[channel].seqno[RX]) & 0x7FFF) != 1 && assert_bit) { DBGLOG(CCCI, ERR, "Port%d seqno out-of-order(0x%02X->0x%02X): data[0]=0x%08X, data[1]=0x%08X, ch=0x%02X, seqno=0x%02X, assert=%d, resv=0x%08X", \ port_id, seq_num, ccci_seqno_tbl[channel].seqno[RX], ccci_h->data[0], ccci_h->data[1], \ ccci_h->channel, ccci_h->seq_num, ccci_h->assert_bit, ccci_h->reserved); hif_force_md_assert_swint(); } ccci_seqno_tbl[channel].seqno[RX] = seq_num; #endif /* Step 4. callback to CCCI device */ if(NULL != ccci_port_info[port_id].ch.rx_cb){ #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(!is_valid_exception_port(port_id, true)) { ret = KAL_FAIL; dev_kfree_skb(skb); eemcs_ccci_release_rx_skb(port_id, 1, skb); eemcs_expt_ccci_rx_drop(port_id); DBGLOG(CCCI, ERR, "PKT DROP when PORT%d(rxq=%d) at md exception", \ port_id, rxq_no); goto _end; } else { ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } } else #endif { ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } rx_err_cnt[port_id] = 0; } else { ret = KAL_FAIL; dev_kfree_skb(skb); eemcs_ccci_release_rx_skb(port_id, 1, skb); if (rx_err_cnt[port_id]%20 == 0) { DBGLOG(CCCI, ERR, "PKT DROP when PORT%d rx callback(ch=%d) not registered", \ port_id, ccci_h->channel); } rx_err_cnt[port_id]++; eemcs_update_statistics(0, port_id, RX, DROP); } eemcs_update_statistics(0, port_id, RX, NORMAL); } _end: DEBUG_LOG_FUNCTION_LEAVE; return ret; }
static int ccmni_rx_callback(int md_id, int rx_ch, struct sk_buff *skb, void *priv_data) { ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[md_id]; // struct ccci_header *ccci_h = (struct ccci_header*)skb->data; ccmni_instance_t *ccmni = NULL; struct net_device *dev = NULL; int pkt_type, skb_len, ccmni_idx; if (unlikely(ctlb == NULL || ctlb->ccci_ops == NULL)) { CCMNI_ERR_MSG(md_id, "invalid CCMNI ctrl/ops struct for RX_CH(%d)\n", rx_ch); dev_kfree_skb(skb); return -1; } ccmni_idx = get_ccmni_idx_from_ch(md_id, rx_ch); if (unlikely(ccmni_idx < 0)) { CCMNI_ERR_MSG(md_id, "CCMNI rx(%d) skb ch error\n", rx_ch); dev_kfree_skb(skb); return -1; } ccmni = ctlb->ccmni_inst[ccmni_idx]; dev = ccmni->dev; // skb_pull(skb, sizeof(struct ccci_header)); pkt_type = skb->data[0] & 0xF0; ccmni_make_etherframe(skb->data-ETH_HLEN, dev->dev_addr, pkt_type); skb_set_mac_header(skb, -ETH_HLEN); skb->dev = dev; if(pkt_type == 0x60) { skb->protocol = htons(ETH_P_IPV6); } else { skb->protocol = htons(ETH_P_IP); } skb->ip_summed = CHECKSUM_NONE; skb_len = skb->len; if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_RX)) { CCMNI_INF_MSG(md_id, "[RX]CCMNI%d(rx_ch=%d) recv data_len=%d\n", ccmni_idx, rx_ch, skb->len); } if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_RX_SKB)) { ccmni_dbg_skb_header(ccmni->md_id, false, skb); } if(likely(ctlb->ccci_ops->md_ability & MODEM_CAP_NAPI)) { netif_receive_skb(skb); } else { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) if(!in_interrupt()) { netif_rx_ni(skb); } else { netif_rx(skb); } #else netif_rx(skb); #endif } dev->stats.rx_packets++; dev->stats.rx_bytes += skb_len; wake_lock_timeout(&ctlb->ccmni_wakelock, HZ); return 0; }
void android_ar6k_check_wow_status(AR_SOFTC_T *ar, struct sk_buff *skb, A_BOOL isEvent) { AR_SOFTC_DEV_T *arPriv; A_UINT8 i; A_BOOL needWake = FALSE; for(i = 0; i < num_device; i++) { arPriv = ar->arDev[i]; if ( #ifdef CONFIG_HAS_EARLYSUSPEND screen_is_off && #endif skb && arPriv->arConnected) { if (isEvent) { if (A_NETBUF_LEN(skb) >= sizeof(A_UINT16)) { A_UINT16 cmd = *(const A_UINT16 *)A_NETBUF_DATA(skb); switch (cmd) { case WMI_CONNECT_EVENTID: case WMI_DISCONNECT_EVENTID: needWake = TRUE; break; default: /* dont wake lock the system for other event */ break; } } } else if (A_NETBUF_LEN(skb) >= sizeof(ATH_MAC_HDR)) { ATH_MAC_HDR *datap = (ATH_MAC_HDR *)A_NETBUF_DATA(skb); if (!IEEE80211_IS_MULTICAST(datap->dstMac)) { switch (A_BE2CPU16(datap->typeOrLen)) { case 0x0800: /* IP */ case 0x888e: /* EAPOL */ case 0x88c7: /* RSN_PREAUTH */ case 0x88b4: /* WAPI */ needWake = TRUE; break; case 0x0806: /* ARP is not important to hold wake lock */ needWake = (arPriv->arNetworkType==AP_NETWORK); break; default: break; } } else if ( !IEEE80211_IS_BROADCAST(datap->dstMac) ) { if (A_NETBUF_LEN(skb)>=14+20 ) { /* check if it is mDNS packets */ A_UINT8 *dstIpAddr = (A_UINT8*)(A_NETBUF_DATA(skb)+14+20-4); struct net_device *ndev = arPriv->arNetDev; needWake = ((dstIpAddr[3] & 0xf8) == 0xf8) && (arPriv->arNetworkType==AP_NETWORK || (ndev->flags & IFF_ALLMULTI || ndev->flags & IFF_MULTICAST)); } }else if (arPriv->arNetworkType==AP_NETWORK) { switch (A_BE2CPU16(datap->typeOrLen)) { case 0x0800: /* IP */ if (A_NETBUF_LEN(skb)>=14+20+2) { A_UINT16 dstPort = *(A_UINT16*)(A_NETBUF_DATA(skb)+14+20); dstPort = A_BE2CPU16(dstPort); needWake = (dstPort == 0x43); /* dhcp request */ } break; case 0x0806: needWake = TRUE; default: break; } } } } } if (needWake) { #ifdef CONFIG_HAS_WAKELOCK /* keep host wake up if there is any event and packate comming in*/ wake_lock_timeout(&ar6k_wow_wake_lock, 3*HZ); #endif if (wowledon) { char buf[32]; int len = sprintf(buf, "on"); android_readwrite_file("/sys/power/state", NULL, buf, len); len = sprintf(buf, "%d", 127); android_readwrite_file("/sys/class/leds/lcd-backlight/brightness", NULL, buf,len); } } }
extern void pm8058_drvx_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct pm8058_led_data *ldata; int *pduties; int id, mode; int lut_flag; int milliamps; int enable = 0; ldata = container_of(led_cdev, struct pm8058_led_data, ldev); pwm_disable(ldata->pwm_led); cancel_delayed_work_sync(&ldata->led_delayed_work); id = bank_to_id(ldata->bank); mode = (id == PM_PWM_LED_KPD) ? PM_PWM_CONF_PWM1 : PM_PWM_CONF_PWM1 + (ldata->bank - 4); brightness = (brightness > LED_FULL) ? LED_FULL : brightness; brightness = (brightness < LED_OFF) ? LED_OFF : brightness; LED_INFO_LOG("%s: bank %d brightness %d +\n", __func__, ldata->bank, brightness); enable = (brightness) ? 1 : 0; if (strcmp(ldata->ldev.name, "charming-led") == 0) charming_led_enable(enable); lut_flag = ldata->lut_flag & ~(PM_PWM_LUT_LOOP | PM_PWM_LUT_REVERSE); virtual_key_state = enable; if (flag_hold_virtual_key == 1) { LED_INFO_LOG("%s, Return control by button_backlight flash \n", __func__); return; } if (brightness) { milliamps = (ldata->flags & PM8058_LED_DYNAMIC_BRIGHTNESS_EN) ? ldata->out_current * brightness / LED_FULL : ldata->out_current; pm8058_pwm_config_led(ldata->pwm_led, id, mode, milliamps); if (ldata->flags & PM8058_LED_LTU_EN) { pduties = &duty_array[ldata->start_index]; pm8058_pwm_lut_config(ldata->pwm_led, ldata->period_us, pduties, ldata->duty_time_ms, ldata->start_index, ldata->duites_size, 0, 0, lut_flag); pm8058_pwm_lut_enable(ldata->pwm_led, 0); pm8058_pwm_lut_enable(ldata->pwm_led, 1); } else { pwm_config(ldata->pwm_led, 64000, 64000); pwm_enable(ldata->pwm_led); } } else { if (ldata->flags & PM8058_LED_LTU_EN) { wake_lock_timeout(&pmic_led_wake_lock,HZ*2); pduties = &duty_array[ldata->start_index + ldata->duites_size]; pm8058_pwm_lut_config(ldata->pwm_led, ldata->period_us, pduties, ldata->duty_time_ms, ldata->start_index + ldata->duites_size, ldata->duites_size, 0, 0, lut_flag); pm8058_pwm_lut_enable(ldata->pwm_led, 1); queue_delayed_work(g_led_work_queue, &ldata->led_delayed_work, msecs_to_jiffies(ldata->duty_time_ms * ldata->duites_size)); LED_INFO_LOG("%s: bank %d fade out brightness %d -\n", __func__, ldata->bank, brightness); return; } else pwm_disable(ldata->pwm_led); pm8058_pwm_config_led(ldata->pwm_led, id, mode, 0); } LED_INFO_LOG("%s: bank %d brightness %d -\n", __func__, ldata->bank, brightness); }
static irqreturn_t usb_detect_irq_handler(int irq, void *dev_id) { wake_lock_timeout(&usb_wakelock, 10 * HZ); schedule_delayed_work(&wakeup_work, HZ / 10); return IRQ_HANDLED; }
/* this function holds xmm_lock */ void baseband_xmm_set_power_status(unsigned int status) { struct baseband_power_platform_data *data = xmm_power_drv_data.pdata; int value = 0; unsigned long flags; if (baseband_xmm_get_power_status() == status) return; /* avoid prints inside spinlock */ if (status <= BBXMM_PS_L2) pr_info("%s\n", status == BBXMM_PS_L0 ? "L0" : "L2"); spin_lock_irqsave(&xmm_lock, flags); switch (status) { case BBXMM_PS_L0: baseband_xmm_powerstate = status; if (!wake_lock_active(&wakelock)) wake_lock_timeout(&wakelock, HZ*2); /* pull hsic_active high for enumeration */ value = gpio_get_value(data->modem.xmm.ipc_hsic_active); if (!value) { pr_debug("L0 gpio set ipc_hsic_active=1 ->\n"); gpio_set_value(data->modem.xmm.ipc_hsic_active, 1); } if (modem_power_on) { modem_power_on = false; baseband_modem_power_on(data); } /* cp acknowledgment for ap L2->L0 wake */ if (!modem_acked_resume) pr_err("%s: CP didn't ack usb-resume\n", __func__); value = gpio_get_value(data->modem.xmm.ipc_bb_wake); if (value) { /* clear the slave wakeup request */ gpio_set_value(data->modem.xmm.ipc_bb_wake, 0); pr_debug("gpio bb_wake done low\n"); } break; case BBXMM_PS_L2: modem_acked_resume = false; if (wakeup_pending) { spin_unlock_irqrestore(&xmm_lock, flags); pr_debug("%s: wakeup pending\n", __func__); xmm_power_l2_resume(); spin_lock_irqsave(&xmm_lock, flags); break; } else { if (wake_lock_active(&wakelock)) wake_unlock(&wakelock); modem_sleep_flag = true; } baseband_xmm_powerstate = status; break; case BBXMM_PS_L2TOL0: pr_debug("L2TOL0\n"); system_suspending = false; wakeup_pending = false; /* do this only from L2 state */ if (baseband_xmm_powerstate == BBXMM_PS_L2) { baseband_xmm_powerstate = status; spin_unlock_irqrestore(&xmm_lock, flags); xmm_power_l2_resume(); spin_lock_irqsave(&xmm_lock, flags); } baseband_xmm_powerstate = status; break; default: baseband_xmm_powerstate = status; break; } spin_unlock_irqrestore(&xmm_lock, flags); pr_debug("BB XMM POWER STATE = %d\n", status); }
static void msm_hsusb_request_host(void *handle, int request) { struct msmusb_hcd *mhcd = handle; struct usb_hcd *hcd = mhcd_to_hcd(mhcd); struct msm_usb_host_platform_data *pdata = mhcd->pdata; struct msm_otg *otg = container_of(mhcd->xceiv, struct msm_otg, phy); #ifdef CONFIG_USB_OTG struct usb_device *udev = hcd->self.root_hub; #endif struct device *dev = hcd->self.controller; switch (request) { #ifdef CONFIG_USB_OTG case REQUEST_HNP_SUSPEND: /* disable Root hub auto suspend. As hardware is configured * for peripheral mode, mark hardware is not available. */ if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) { pm_runtime_disable(&udev->dev); /* Mark root hub as disconnected. This would * protect suspend/resume via sysfs. */ udev->state = USB_STATE_NOTATTACHED; clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); hcd->state = HC_STATE_HALT; pm_runtime_put_noidle(dev); pm_runtime_suspend(dev); } break; case REQUEST_HNP_RESUME: if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) { pm_runtime_get_noresume(dev); pm_runtime_resume(dev); disable_irq(hcd->irq); ehci_msm_reset(hcd); ehci_msm_run(hcd); set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); pm_runtime_enable(&udev->dev); udev->state = USB_STATE_CONFIGURED; enable_irq(hcd->irq); } break; #endif case REQUEST_RESUME: usb_hcd_resume_root_hub(hcd); break; case REQUEST_START: if (mhcd->running) break; pm_runtime_get_noresume(dev); pm_runtime_resume(dev); wake_lock(&mhcd->wlock); msm_xusb_pm_qos_update(mhcd, 1); msm_xusb_enable_clks(mhcd); if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) if (otg->set_clk) otg->set_clk(mhcd->xceiv, 1); if (pdata->vbus_power) pdata->vbus_power(pdata->phy_info, 1); if (pdata->config_gpio) pdata->config_gpio(1); usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); mhcd->running = 1; if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) if (otg->set_clk) otg->set_clk(mhcd->xceiv, 0); break; case REQUEST_STOP: if (!mhcd->running) break; mhcd->running = 0; /* come out of lpm before deregistration */ if (PHY_TYPE(pdata->phy_info) == USB_PHY_SERIAL_PMIC) { usb_lpm_exit(hcd); if (cancel_work_sync(&(mhcd->lpm_exit_work))) usb_lpm_exit_w(&mhcd->lpm_exit_work); } usb_remove_hcd(hcd); if (pdata->config_gpio) pdata->config_gpio(0); if (pdata->vbus_power) pdata->vbus_power(pdata->phy_info, 0); msm_xusb_disable_clks(mhcd); wake_lock_timeout(&mhcd->wlock, HZ/2); msm_xusb_pm_qos_update(mhcd, 0); pm_runtime_put_noidle(dev); pm_runtime_suspend(dev); break; } }
/* Do the work for AP/CP initiated L2->L0 */ static void xmm_power_l2_resume(void) { struct baseband_power_platform_data *pdata = xmm_power_drv_data.pdata; struct xmm_power_data *drv = &xmm_power_drv_data; int value; int delay = 1000; /* maxmum delay in msec */ unsigned long flags; int ret, rcount = 0; pr_debug("%s\n", __func__); if (!pdata) return; /* erroneous remote-wakeup might call this from irq */ if (in_interrupt() || in_atomic()) { pr_err("%s: not allowed in interrupt\n", __func__); return; } /* claim the wakelock here to avoid any system suspend */ if (!wake_lock_active(&wakelock)) wake_lock_timeout(&wakelock, HZ*2); spin_lock_irqsave(&xmm_lock, flags); modem_sleep_flag = false; wakeup_pending = false; value = gpio_get_value(pdata->modem.xmm.ipc_ap_wake); if (value) { /* set the slave wakeup request - bb_wake high */ drv->hostwake = 0; gpio_set_value(pdata->modem.xmm.ipc_bb_wake, 1); spin_unlock_irqrestore(&xmm_lock, flags); pr_info("AP L2->L0\n"); retry: /* wait for cp */ pr_debug("waiting for host wakeup from CP...\n"); ret = wait_event_interruptible_timeout(drv->bb_wait, drv->hostwake == 1, msecs_to_jiffies(delay)); if (ret == 0) { pr_info("!!AP L2->L0 Failed\n"); return; } if (ret == -ERESTARTSYS) { if (rcount >= 5) { pr_info("!!AP L2->L0 Failed\n"); return; } pr_debug("%s: caught signal\n", __func__); rcount++; goto retry; } pr_debug("Get gpio host wakeup low <-\n"); } else { cp_initiated_l2tol0 = false; queue_work(workqueue, &l2_resume_work); spin_unlock_irqrestore(&xmm_lock, flags); pr_info("CP L2->L0\n"); } }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; int extend_wakelock = 0; mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: // FIH_FOX, BillHJChang { 20090904, Modify for UMS currect handle if(host->index == 0) { if(host->bus_ops == NULL) storage_state = false; else storage_state = true; printk(KERN_INFO"%s: (storage_state : %d)\n",__func__,storage_state); } // FIH_F0X, BillHJChang } /* FIH, BillHJChang, 2009/11/20 { */ /* [FXX_CR], issue of card detect fail in suspend mode */ #ifdef CONFIG_FIH_FXX if(host->index == 0) { wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ * 2); wake_lock_timeout(&sdcard_idle_wake_lock, HZ * 2); } else { wake_unlock(&mmc_delayed_work_wake_lock); wake_unlock(&sdcard_idle_wake_lock); } #else if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); #endif /* } FIH, BillHJChang, 2009/11/20 */ if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, enum wl_rx_buf_align rx_align, u8 *hlid) { struct wl1271_rx_descriptor *desc; struct sk_buff *skb; struct ieee80211_hdr *hdr; u8 *buf; u8 beacon = 0; u8 is_data = 0; u8 reserved = 0, offset_to_data = 0; u16 seq_num; u32 pkt_data_len; /* * In PLT mode we seem to get frames and mac80211 warns about them, * workaround this by not retrieving them at all. */ if (unlikely(wl->plt)) return -EINVAL; pkt_data_len = wlcore_hw_get_rx_packet_len(wl, data, length); if (!pkt_data_len) { wl1271_error("Invalid packet arrived from HW. length %d", length); return -EINVAL; } if (rx_align == WLCORE_RX_BUF_UNALIGNED) reserved = RX_BUF_ALIGN; else if (rx_align == WLCORE_RX_BUF_PADDED) offset_to_data = RX_BUF_ALIGN; /* the data read starts with the descriptor */ desc = (struct wl1271_rx_descriptor *) data; if (desc->packet_class == WL12XX_RX_CLASS_LOGGER) { size_t len = length - sizeof(*desc); wl12xx_copy_fwlog(wl, data + sizeof(*desc), len); wake_up_interruptible(&wl->fwlog_waitq); return 0; } /* discard corrupted packets */ if (desc->status & WL1271_RX_DESC_DECRYPT_FAIL) { hdr = (void *)(data + sizeof(*desc) + offset_to_data); wl1271_warning("corrupted packet in RX: status: 0x%x len: %d", desc->status & WL1271_RX_DESC_STATUS_MASK, pkt_data_len); wl1271_dump((DEBUG_RX|DEBUG_CMD), "PKT: ", data + sizeof(*desc), min(pkt_data_len, ieee80211_hdrlen(hdr->frame_control))); return -EINVAL; } /* skb length not including rx descriptor */ skb = __dev_alloc_skb(pkt_data_len + reserved, GFP_KERNEL); if (!skb) { wl1271_error("Couldn't allocate RX frame"); return -ENOMEM; } /* reserve the unaligned payload(if any) */ skb_reserve(skb, reserved); buf = skb_put(skb, pkt_data_len); /* * Copy packets from aggregation buffer to the skbs without rx * descriptor and with packet payload aligned care. In case of unaligned * packets copy the packets in offset of 2 bytes guarantee IP header * payload aligned to 4 bytes. */ memcpy(buf, data + sizeof(*desc), pkt_data_len); if (rx_align == WLCORE_RX_BUF_PADDED) skb_pull(skb, RX_BUF_ALIGN); *hlid = desc->hlid; hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_beacon(hdr->frame_control)) beacon = 1; if (ieee80211_is_data_present(hdr->frame_control)) is_data = 1; wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); wlcore_hw_set_rx_csum(wl, desc, skb); seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb, skb->len - desc->pad_len, beacon ? "beacon" : "", seq_num, *hlid); skb_queue_tail(&wl->deferred_rx_queue, skb); queue_work(wl->freezable_wq, &wl->netstack_work); #ifdef CONFIG_HAS_WAKELOCK /* let the frame some time to propagate to user-space */ wake_lock_timeout(&wl->rx_wake, HZ); #endif return is_data; }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; mmc_bus_get(host); if (host->bus_ops == NULL) { /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); goto out; } mmc_release_host(host); mmc_power_off(host); } else { if (host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); mmc_bus_put(host); } out: /* give userspace some time to react */ wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
static void tab3_mic_id(void *data, u16 status) { struct wm1811_machine_priv *wm1811 = data; struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(wm1811->codec); int report; int reg; bool present; wake_lock_timeout(&wm1811->jackdet_wake_lock, 5 * HZ); /* Either nothing present or just starting detection */ if (!(status & WM8958_MICD_STS)) { if (!wm8994->jackdet) { /* If nothing present then clear our statuses */ dev_dbg(wm1811->codec->dev, "Detected open circuit\n"); wm8994->jack_mic = false; wm8994->mic_detecting = true; tab3_micd_set_rate(wm1811->codec); snd_soc_jack_report(wm8994->micdet[0].jack, 0, wm8994->btn_mask | SND_JACK_HEADSET); } /*ToDo*/ /*return;*/ } /* If the measurement is showing a high impedence we've got a * microphone. */ if (wm8994->mic_detecting && (status & 0x400)) { dev_info(wm1811->codec->dev, "Detected microphone\n"); wm8994->mic_detecting = false; wm8994->jack_mic = true; tab3_micd_set_rate(wm1811->codec); snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADSET, SND_JACK_HEADSET); } if (wm8994->mic_detecting && status & 0x4) { dev_info(wm1811->codec->dev, "Detected headphone\n"); wm8994->mic_detecting = false; tab3_micd_set_rate(wm1811->codec); snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADPHONE, SND_JACK_HEADSET); /* If we have jackdet that will detect removal */ if (wm8994->jackdet) { mutex_lock(&wm8994->accdet_lock); snd_soc_update_bits(wm1811->codec, WM8958_MIC_DETECT_1, WM8958_MICD_ENA, 0); if (wm8994->active_refcount) { snd_soc_update_bits(wm1811->codec, WM8994_ANTIPOP_2, WM1811_JACKDET_MODE_MASK, WM1811_JACKDET_MODE_AUDIO); } mutex_unlock(&wm8994->accdet_lock); if (wm8994->pdata->jd_ext_cap) { mutex_lock(&wm1811->codec->mutex); snd_soc_dapm_disable_pin(&wm1811->codec->dapm, "MICBIAS2"); snd_soc_dapm_sync(&wm1811->codec->dapm); mutex_unlock(&wm1811->codec->mutex); } } } }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; int extend_wakelock = 0; mmc_bus_get(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) { if (host->bus_ops && !host->bus_dead) { if (host->bus_ops->remove) host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); mmc_release_host(host); } extend_wakelock = 1; } else { if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) { if (host->index == 1) { if (!q_wlan_flag) host->bus_ops->detect(host); printk("[MMC]> host name %s, q_wlan_flag: %d\n", mmc_hostname(host), q_wlan_flag); } else { host->bus_ops->detect(host); printk("[MMC]> host name %s, q_wlan_flag: %d\n", mmc_hostname(host), q_wlan_flag); } } if (host->bus_dead) extend_wakelock = 1; } #if 0 /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; #endif mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); if (host->ops->notify_card_present) { if (host->card) { host->ops->notify_card_present(host, mmc_card_present(host->card)); } else { host->ops->notify_card_present(host, 0); } } if (host->ops->enable_cd_irq) { host->ops->enable_cd_irq(host); } }
int smdhsic_pm_resume_AP(void) { int r; int expire = 500; int pending_spin = 20; int suspended_spin = 20; struct completion done; struct device *dev; if (!g_usbdev.usbdev) { smdctl_request_connection_recover(true); return -ENODEV; } dev = &g_usbdev.usbdev->dev; retry: /* Hold Wake lock, when TX...*/ wake_lock_timeout(&g_usbdev.tx_wlock, msecs_to_jiffies(500)); /* dpm_suspending can be set during RPM STATUS changing */ if (g_usbdev.hsic && g_usbdev.hsic->dpm_suspending) return -EAGAIN; switch (dev->power.runtime_status) { case RPM_SUSPENDED: pr_debug("%s: HSIC suspended\n", __func__); init_completion(&done); r = smdctl_request_slave_wakeup(&done); if (r <= 0 && !wait_for_completion_timeout(&done, msecs_to_jiffies(expire))) { pr_err("%s: HSIC Resume timeout %d\n", __func__, expire); r = smdctl_request_slave_wakeup(NULL); if (r <= 0) { if (g_usbdev.hsic && g_usbdev.hsic->resume_failcnt++ > 5) { g_usbdev.hsic->resume_failcnt = 0; smdctl_request_connection_recover(true); } return -ETIMEDOUT; } } if (suspended_spin-- <= 0) { if (g_usbdev.hsic && g_usbdev.hsic->resume_failcnt++ > 5) { g_usbdev.hsic->resume_failcnt = 0; smdctl_request_connection_recover(true); } return -ETIMEDOUT; } msleep(100); goto retry; case RPM_SUSPENDING: case RPM_RESUMING: pr_debug("%s: HSIC status : %d spin: %d\n", __func__, dev->power.runtime_status, pending_spin); if (pending_spin == 0) { pr_err("%s: Modem runtime pm timeout\n", __func__); if (g_usbdev.hsic && g_usbdev.hsic->resume_failcnt++ > 5) { g_usbdev.hsic->resume_failcnt = 0; smdctl_request_connection_recover(true); } smdctl_reenumeration_control(); return -ETIMEDOUT; } pending_spin--; usleep_range(5000, 10000); goto retry; case RPM_ACTIVE: if (g_usbdev.hsic) g_usbdev.hsic->resume_failcnt = 0; /* For under autosuspend timer */ wake_lock_timeout(&g_usbdev.tx_wlock, msecs_to_jiffies(100)); break; default: return -EIO; } return 0; }
static void ccmni_md_state_callback(int md_id, int rx_ch, MD_STATE state) { ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[md_id]; ccmni_instance_t *ccmni = NULL; int ccmni_idx = 0; if (unlikely(ctlb == NULL)) { CCMNI_ERR_MSG(md_id, "invalid ccmni ctrl struct when rx_ch=%d md_sta=%d\n", rx_ch, state); return; } ccmni_idx = get_ccmni_idx_from_ch(md_id, rx_ch); if (unlikely(ccmni_idx < 0)) { CCMNI_ERR_MSG(md_id, "get error ccmni index when md_sta=%d\n", state); return; } ccmni = ctlb->ccmni_inst[ccmni_idx]; if ((state != TX_IRQ) && (state != TX_FULL) && (atomic_read(&ccmni->usage) > 0)) { CCMNI_INF_MSG(md_id, "md_state_cb: CCMNI%d, md_sta=%d, usage=%d\n", \ ccmni_idx, state, atomic_read(&ccmni->usage)); } switch(state) { case READY: netif_carrier_on(ccmni->dev); ccmni->tx_seq_num[0] = 0; ccmni->tx_seq_num[1] = 0; ccmni->rx_seq_num = 0; break; case EXCEPTION: case RESET: netif_carrier_off(ccmni->dev); break; case RX_IRQ: mod_timer(&ccmni->timer, jiffies+HZ); napi_schedule(&ccmni->napi); wake_lock_timeout(&ctlb->ccmni_wakelock, HZ); break; case TX_IRQ: if(netif_running(ccmni->dev) && netif_queue_stopped(ccmni->dev) && atomic_read(&ccmni->usage)>0) { netif_wake_queue(ccmni->dev); //ccmni->flags &= ~PORT_F_RX_FULLED; CCMNI_INF_MSG(md_id, "md_state_cb: CCMNI%d, md_sta=TX_IRQ, usage=%d\n", \ ccmni_idx, atomic_read(&ccmni->usage)); } break; case TX_FULL: netif_stop_queue(ccmni->dev); CCMNI_INF_MSG(md_id, "md_state_cb: CCMNI%d, md_sta=TX_FULL, usage=%d\n", \ ccmni_idx, atomic_read(&ccmni->usage)); //ccmni->flags |= PORT_F_RX_FULLED; // for convenient in traffic log break; default: break; } }
static void sdchg_exynos7420_cs_use_monitor(void *arg, __kernel_time_t curr_sec, bool skip_monitor) { struct sdchg_info_nochip_t *info = sdchg_info->nochip; struct sec_battery_info *battery = (struct sec_battery_info *)arg; int temperature = 0; static int battcond = 0; bool need_set_state = false; bool need_set_alarm = false; int set_alarm_time = 0; bool runned_by_sdchg_poll; #ifdef SDCHG_STATE_MACHINE_RETRY_AT_END_COND static bool state_machine_retry = false; #endif /******************************************/ runned_by_sdchg_poll = sdchg_check_polling_time(curr_sec); temperature = battery->temperature; if (skip_monitor) { #ifdef SDCHG_CHECK_TYPE_SOC value.intval = SEC_BATTERY_CURRENT_MA; psy_do_property(battery->pdata->fuelgauge_name, get, POWER_SUPPLY_PROP_CURRENT_NOW, value); battcond = value.intval; #else battcond = battery->voltage_now; #endif } else { #ifdef SDCHG_CHECK_TYPE_SOC battcond = (short)battery->current_now; #else battcond = (short)battery->voltage_avg; #endif } /******************************************/ if ( temperature >= sdchg_info->temp_start && battcond >= SDCHG_BATTCOND_START) { /******************************************/ if (!info->wake_lock_set) { wake_lock(&info->wake_lock); info->wake_lock_set = true; } /******************************************/ info->need_state = SDCHG_STATE_SET; } /******************************************/ else if (temperature <= sdchg_info->temp_end || battcond <= SDCHG_BATTCOND_END) { info->need_state = SDCHG_STATE_NONE; } /******************************************/ else { if (info->need_state != SDCHG_STATE_NONE) { info->need_state = SDCHG_STATE_SET; } } /******************************************/ /****************************************/ if (info->display_on) { if (info->need_state == SDCHG_STATE_SET) info->need_state = SDCHG_STATE_SET_DISPLAY_ON; } #ifdef SDCHG_STATE_MACHINE_RETRY_AT_END_COND if (info->need_state != SDCHG_STATE_NONE) state_machine_retry = false; #endif /****************************************/ if (info->set_state != info->need_state) { need_set_state = true; if (info->set_state == SDCHG_STATE_NONE) { // none -> discharing { //need_set_alarm = info->state_machine_run = true; //set_alarm_time = SDCHG_DISCHARGING_DELAY; need_set_alarm = false; // in discharing, according to original monitor work time info->state_machine_run = true; } } else { // prev : discharging if (info->need_state == SDCHG_STATE_NONE) { // discharging -> none if (sdchg_ta_attach(battery) || battcond >= SDCHG_BATTCOND_START) { need_set_alarm = info->state_machine_run = true; if (temperature < SDCHG_TEMP_FOR_BATT_CHECK_DELAY) set_alarm_time = SDCHG_BATT_CHECK_DELAY_NORMAL; else set_alarm_time = SDCHG_BATT_CHECK_DELAY_IMMINENT; #ifdef SDCHG_STATE_MACHINE_RETRY_AT_END_COND state_machine_retry = false; #endif } else { #ifdef SDCHG_STATE_MACHINE_RETRY_AT_END_COND #ifdef SDCHG_SELF_TEST need_set_alarm = info->state_machine_run = true; if (temperature < SDCHG_TEMP_FOR_BATT_CHECK_DELAY) set_alarm_time = SDCHG_BATT_CHECK_DELAY_NORMAL; else set_alarm_time = SDCHG_BATT_CHECK_DELAY_IMMINENT; #else if (state_machine_retry) { if (runned_by_sdchg_poll) { need_set_alarm = info->state_machine_run = false; state_machine_retry = false; } else { need_set_alarm = info->state_machine_run = true; if (temperature < SDCHG_TEMP_FOR_BATT_CHECK_DELAY) set_alarm_time = SDCHG_BATT_CHECK_DELAY_NORMAL; else set_alarm_time = SDCHG_BATT_CHECK_DELAY_IMMINENT; } } else { need_set_alarm = info->state_machine_run = true; if (temperature < SDCHG_TEMP_FOR_BATT_CHECK_DELAY) set_alarm_time = SDCHG_BATT_CHECK_DELAY_NORMAL; else set_alarm_time = SDCHG_BATT_CHECK_DELAY_IMMINENT; state_machine_retry = true; } #endif // #ifdef SDCHG_SELF_TEST #else need_set_alarm = info->state_machine_run = false; #endif // #ifdef SDCHG_STATE_MACHINE_RETRY_AT_END_COND } } else { // discharging -> discharging //need_set_alarm = info->state_machine_run = true; //set_alarm_time = SDCHG_DISCHARGING_DELAY; need_set_alarm = false; // in discharing, according to original monitor work time info->state_machine_run = true; } } } /****************************************/ else { //KEEP_GOING: need_set_state = false; if (info->need_state == SDCHG_STATE_NONE) { // prev : none if (sdchg_ta_attach(battery) || battcond >= SDCHG_BATTCOND_START) { need_set_alarm = info->state_machine_run = true; if (temperature < SDCHG_TEMP_FOR_BATT_CHECK_DELAY) set_alarm_time = SDCHG_BATT_CHECK_DELAY_NORMAL; else set_alarm_time = SDCHG_BATT_CHECK_DELAY_IMMINENT; #ifdef SDCHG_STATE_MACHINE_RETRY_AT_END_COND state_machine_retry = false; #endif } else { #ifdef SDCHG_STATE_MACHINE_RETRY_AT_END_COND #ifdef SDCHG_SELF_TEST need_set_alarm = info->state_machine_run = true; if (temperature < SDCHG_TEMP_FOR_BATT_CHECK_DELAY) set_alarm_time = SDCHG_BATT_CHECK_DELAY_NORMAL; else set_alarm_time = SDCHG_BATT_CHECK_DELAY_IMMINENT; #else if (state_machine_retry) { if (runned_by_sdchg_poll) { need_set_alarm = info->state_machine_run = false; state_machine_retry = false; } else { need_set_alarm = info->state_machine_run = true; if (temperature < SDCHG_TEMP_FOR_BATT_CHECK_DELAY) set_alarm_time = SDCHG_BATT_CHECK_DELAY_NORMAL; else set_alarm_time = SDCHG_BATT_CHECK_DELAY_IMMINENT; } } else { need_set_alarm = info->state_machine_run = true; if (temperature < SDCHG_TEMP_FOR_BATT_CHECK_DELAY) set_alarm_time = SDCHG_BATT_CHECK_DELAY_NORMAL; else set_alarm_time = SDCHG_BATT_CHECK_DELAY_IMMINENT; state_machine_retry = true; } #endif // #ifdef SDCHG_SELF_TEST #else need_set_alarm = info->state_machine_run = false; #endif // #ifdef SDCHG_STATE_MACHINE_RETRY_AT_END_COND } } else { // prev : discharging //need_set_alarm = info->state_machine_run = true; //set_alarm_time = SDCHG_DISCHARGING_DELAY; need_set_alarm = false; // in discharing, according to original monitor work time info->state_machine_run = true; } } /****************************************/ if (need_set_state) { current_source_set(info); } if (need_set_alarm) sdchg_set_polling_time(set_alarm_time); else sdchg_set_polling_time(0); #ifdef SDCHG_CHECK_TYPE_SOC pr_info("[SDCHG][%s] soc : %d , temp : %d, state : %s\n", __func__, battcond, temperature, sdchg_state_str[info->set_state]); #else pr_info("[SDCHG][%s] volt : %d , temp : %d, state : %s\n", __func__, battcond, temperature, sdchg_state_str[info->set_state]); #endif if (info->set_state == SDCHG_STATE_NONE) { if (info->wake_lock_set) { wake_lock_timeout(&info->wake_lock, HZ * 10); wake_unlock(&info->wake_lock); info->wake_lock_set = false; } } return; }
static int hym8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct i2c_client *client = to_i2c_client(dev); struct hym8563 *hym8563 = i2c_get_clientdata(client); struct rtc_time now, *tm = &alarm->time; u8 regs[4] = { 0, }; u8 mon_day; pr_debug("%4d-%02d-%02d(%d) %02d:%02d:%02d enabled %d\n", 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec, alarm->enabled); hym8563_read_datetime(client, &now); if (alarm->enabled && now.tm_year == tm->tm_year && now.tm_mon == tm->tm_mon && now.tm_mday == tm->tm_mday && now.tm_hour == tm->tm_hour && now.tm_min == tm->tm_min && tm->tm_sec > now.tm_sec) { long timeout = tm->tm_sec - now.tm_sec + 1; pr_info("stay awake %lds\n", timeout); wake_lock_timeout(&hym8563->wake_lock, timeout * HZ); } mutex_lock(&hym8563->mutex); hym8563->alarm = *alarm; regs[0] = 0x0; hym8563_i2c_set_regs(client, RTC_CTL2, regs, 1); mon_day = rtc_month_days(tm->tm_mon, tm->tm_year + 1900); hym8563_i2c_read_regs(client, RTC_A_MIN, regs, 4); if (tm->tm_min >= 60 || tm->tm_min < 0) //set min regs[0x00] = bin2bcd(0x00) & 0x7f; else regs[0x00] = bin2bcd(tm->tm_min) & 0x7f; if (tm->tm_hour >= 24 || tm->tm_hour < 0) //set hour regs[0x01] = bin2bcd(0x00) & 0x7f; else regs[0x01] = bin2bcd(tm->tm_hour) & 0x7f; regs[0x03] = bin2bcd (tm->tm_wday) & 0x7f; /* if the input month day is bigger than the biggest day of this month, set the biggest day */ if (tm->tm_mday > mon_day) regs[0x02] = bin2bcd(mon_day) & 0x7f; else if (tm->tm_mday > 0) regs[0x02] = bin2bcd(tm->tm_mday) & 0x7f; else if (tm->tm_mday <= 0) regs[0x02] = bin2bcd(0x01) & 0x7f; hym8563_i2c_set_regs(client, RTC_A_MIN, regs, 4); hym8563_i2c_read_regs(client, RTC_A_MIN, regs, 4); hym8563_i2c_read_regs(client, RTC_CTL2, regs, 1); if (alarm->enabled == 1) regs[0] |= AIE; else regs[0] &= 0x0; hym8563_i2c_set_regs(client, RTC_CTL2, regs, 1); hym8563_i2c_read_regs(client, RTC_CTL2, regs, 1); mutex_unlock(&hym8563->mutex); return 0; }
/* Called in soft-irq context */ static void smd_net_data_handler(unsigned long arg) { struct net_device *dev = (struct net_device *) arg; struct rmnet_private *p = netdev_priv(dev); struct sk_buff *skb; void *ptr = 0; int sz; u32 opmode = p->operation_mode; unsigned long flags; for (;;) { sz = smd_cur_packet_size(p->ch); if (sz == 0) break; if (smd_read_avail(p->ch) < sz) break; if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) : (sz > (dev->mtu + ETH_HLEN))) { pr_err("rmnet_recv() discarding %d len (%d mtu)\n", sz, RMNET_IS_MODE_IP(opmode) ? dev->mtu : (dev->mtu + ETH_HLEN)); ptr = 0; } else { skb = dev_alloc_skb(sz + NET_IP_ALIGN); if (skb == NULL) { pr_err("rmnet_recv() cannot allocate skb\n"); } else { skb->dev = dev; skb_reserve(skb, NET_IP_ALIGN); ptr = skb_put(skb, sz); wake_lock_timeout(&p->wake_lock, HZ / 2); if (smd_read(p->ch, ptr, sz) != sz) { pr_err("rmnet_recv() smd lied about avail?!"); ptr = 0; dev_kfree_skb_irq(skb); } else { /* Handle Rx frame format */ spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); if (RMNET_IS_MODE_IP(opmode)) { /* Driver in IP mode */ skb->protocol = rmnet_ip_type_trans(skb, dev); } else { /* Driver in Ethernet mode */ skb->protocol = eth_type_trans(skb, dev); } if (RMNET_IS_MODE_IP(opmode) || count_this_packet(ptr, skb->len)) { #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_rcv += rmnet_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } netif_rx(skb); } continue; } } if (smd_read(p->ch, ptr, sz) != sz) pr_err("rmnet_recv() smd lied about avail?!"); } }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; unsigned long flags; int extend_wakelock = 0; spin_lock_irqsave(&host->lock, flags); if (host->rescan_disable) { spin_unlock_irqrestore(&host->lock, flags); return; } spin_unlock_irqrestore(&host->lock, flags); mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) { if(host->ops->get_cd && host->ops->get_cd(host) == 0) { if(host->bus_ops->remove) host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); mmc_release_host(host); } else host->bus_ops->detect(host); } /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
static void pm8xxx_led_current_set_flagged(struct led_classdev *led_cdev, enum led_brightness brightness, int blink) { struct pm8xxx_led_data *led = container_of(led_cdev, struct pm8xxx_led_data, cdev); int rc, offset; u8 level; int *pduties; LED_INFO("%s, bank:%d, brightness:%d\n", __func__, led->bank, brightness); cancel_delayed_work_sync(&led->fade_delayed_work); virtual_key_state = brightness; if (flag_hold_virtual_key == 1) { LED_INFO("%s, key control \n", __func__); return; } if(brightness) { level = (led->out_current << PM8XXX_DRV_LED_CTRL_SHIFT) & PM8XXX_DRV_LED_CTRL_MASK; offset = PM8XXX_LED_OFFSET(led->id); led->reg &= ~PM8XXX_DRV_LED_CTRL_MASK; led->reg |= level; rc = pm8xxx_writeb(led->dev->parent, SSBI_REG_ADDR_LED_CTRL(offset), led->reg); if (rc) LED_ERR("%s can't set (%d) led value rc=%d\n", __func__, led->id, rc); if (led->function_flags & LED_BRETH_FUNCTION) { if (blink == 0) { buttons_led_is_on = 1; // no blink needed pduties = led->duties; pm8xxx_pwm_lut_config(led->pwm_led, led->period_us, pduties, led->duty_time_ms, led->start_index, led->duites_size, 0, 0, led->lut_flag); } else { pduties = led->duties; // LUT_LOOP for blinking pm8xxx_pwm_lut_config(led->pwm_led, led->period_us, pduties, led->duty_time_ms, // slower, 2x led->start_index, led->duites_size * 8, // 16 duty entries -> original size * 2, + 6 * 8 zeroes for pause 0, 0, PM_PWM_LUT_LOOP | PM_PWM_LUT_PAUSE_HI_EN); } pm8xxx_pwm_lut_enable(led->pwm_led, 0); pm8xxx_pwm_lut_enable(led->pwm_led, 1); } else { pwm_config(led->pwm_led, 64000, 64000); pwm_enable(led->pwm_led); } } else { if (led->function_flags & LED_BRETH_FUNCTION) { buttons_led_is_on = 0; wake_lock_timeout(&pmic_led_wake_lock, HZ*2); pduties = led->duties + led->duites_size; pm8xxx_pwm_lut_config(led->pwm_led, led->period_us, pduties, led->duty_time_ms, led->start_index, led->duites_size, 0, 0, led->lut_flag); pm8xxx_pwm_lut_enable(led->pwm_led, 0); pm8xxx_pwm_lut_enable(led->pwm_led, 1); queue_delayed_work(g_led_work_queue, &led->fade_delayed_work, msecs_to_jiffies(led->duty_time_ms*led->duites_size)); } else { pwm_disable(led->pwm_led); level = (0 << PM8XXX_DRV_LED_CTRL_SHIFT) & PM8XXX_DRV_LED_CTRL_MASK; offset = PM8XXX_LED_OFFSET(led->id); led->reg &= ~PM8XXX_DRV_LED_CTRL_MASK; led->reg |= level; rc = pm8xxx_writeb(led->dev->parent, SSBI_REG_ADDR_LED_CTRL(offset), led->reg); if (rc) LED_ERR("%s can't set (%d) led value rc=%d\n", __func__, led->id, rc); } } }
static irqreturn_t mdm6600_host_wake_irq_handler(int irq, void *ptr) { /* Keep us awake for a bit until RIL gets going */ wake_lock_timeout(&mdm6600_host_wakelock, (HZ * 1)); return IRQ_HANDLED; }
/*give the packet to TCP/IP*/ static void xmd_trans_packet( struct net_device *dev, int type, void *buf, int sz) { struct rmnet_private *p = netdev_priv(dev); struct sk_buff *skb; void *ptr = NULL; sz += RMNET_ETH_HDR_SIZE; #if defined (RMNET_CRITICAL_DEBUG) dynadbg_module(DYNADBG_CRIT|DYNADBG_TX,"\nRMNET: %d<\n",sz); // printk("\nRMNET: %d<\n",sz); #endif if (sz > (RMNET_MTU_SIZE + RMNET_ETH_HDR_SIZE)) { #if defined (RMNET_ERR) dynadbg_module(DYNADBG_WARN|DYNADBG_TX,"xmd_trans_packet() discarding %d pkt len\n", sz); // printk("xmd_trans_packet() discarding %d pkt len\n", sz); #endif ptr = 0; return; } else { skb = dev_alloc_skb(sz + NET_IP_ALIGN); if (skb == NULL) { #if defined (RMNET_ERR) dynadbg_module(DYNADBG_WARN|DYNADBG_TX,"xmd_trans_packet() cannot allocate skb\n"); // printk("xmd_trans_packet() cannot allocate skb\n"); #endif return; } else { skb->dev = dev; skb_reserve(skb, NET_IP_ALIGN); ptr = skb_put(skb, sz); wake_lock_timeout(&p->wake_lock, HZ / 2); /* adding ethernet header */ { /* struct ethhdr eth_hdr = {0xB6,0x91,0x24,0xa8,0x14,0x72,0xb6, 0x91,0x24,0xa8,0x14,0x72,0x08,0x0};*/ char temp[] = {0xB6,0x91,0x24,0xa8,0x14,0x72,0xb6,0x91,0x24, 0xa8,0x14,0x72,0x08,0x0}; struct ethhdr *eth_hdr = (struct ethhdr *) temp; if (type == RMNET_IPV6_VER) { eth_hdr->h_proto = 0x08DD; eth_hdr->h_proto = htons(eth_hdr->h_proto); } memcpy((void *)eth_hdr->h_dest, (void*)dev->dev_addr, sizeof(eth_hdr->h_dest)); memcpy((void *)ptr, (void *)eth_hdr, sizeof(struct ethhdr)); } memcpy(ptr + RMNET_ETH_HDR_SIZE, buf, sz - RMNET_ETH_HDR_SIZE); skb->protocol = eth_type_trans(skb, dev); if (count_this_packet(ptr, skb->len)) { #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_rcv += rmnet_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } netif_rx(skb); wake_unlock(&p->wake_lock); } } }
static void ts27010_uart_retran_worker(struct work_struct *work) { int ret; int j; u8 para[SLIDE_WINDOWS_SIZE_AP]; struct ts27010_retran_info_t *retran_info; FUNC_ENTER(); memset(para, 0, sizeof(u8) * SLIDE_WINDOWS_SIZE_AP); /* get timeout indexs and clean counter */ ts27010_get_timer_para(s_timer_para, para); ts27010_slidewindow_lock(s_slide_window); for (j = 0; j < SLIDE_WINDOWS_SIZE_AP; j++) { if (para[j] && ts27010_slidewindow_is_idx_in( s_slide_window, j)) { /* need retransfer j */ retran_info = ts27010_slidewindow_peek( s_slide_window, j); mux_print(MSG_INFO, "retransfering... index=%d, " "sn = %x, length=%d\n", j, ts27010_get_retran_sn(retran_info), ts27010_get_retran_len(retran_info)); ret = ts27010_uart_driver_send( ts27010_get_retran_data(retran_info), ts27010_get_retran_len(retran_info), &s_mux_resend_lock); ts27010_mod_retran_timer(retran_info); if (!ret) { /* this frame retransfered */ mux_print(MSG_INFO, "frame %x retransfered successful, " "length=%d\n", ts27010_get_retran_sn(retran_info), ts27010_get_retran_len(retran_info)); #ifdef CONFIG_WAKELOCK /* * Re-transfer happened, * so block system 1s for next re-send. */ wake_lock_timeout(&s_mux_resend_lock, HZ); #endif if (ts27010_inc_retran_count(retran_info) == MAX_RETRAN_TIMES) { /* retran 10 times, trigger panic */ mux_print(MSG_ERROR, "retrans frame %x(index %d) " "failed more than 10 times!\n", ts27010_get_retran_sn( retran_info), j); mux_uart_hexdump(MSG_ERROR, "dump frame", __func__, __LINE__, ts27010_get_retran_data( retran_info), ts27010_get_retran_len( retran_info)); /* TODO: trigger panic or reset BP? */ } } else { mux_print(MSG_ERROR, "retran interrupted because of " "ipc driver error\n"); goto EXIT; } } else if (para[j]) { /* * since j is not in slide window, * should stop this timer */ mux_print(MSG_INFO, "timer index %d is out of slide window " "head: %d, tail %d\n", j, ts27010_slidewindow_head(s_slide_window), ts27010_slidewindow_tail(s_slide_window)); ts27010_stop_retran_timer( ts27010_slidewindow_peek(s_slide_window, j)); ts27010_clear_timer_para(s_timer_para, j); } } EXIT: ts27010_slidewindow_unlock(s_slide_window); FUNC_EXIT(); }
static void fsa9480_work_cb(struct work_struct *work) { u8 intr, intr2, intr3; struct fsa9480_usbsw *usbsw = container_of(work, struct fsa9480_usbsw, work); struct i2c_client *client = usbsw->client; wake_lock_timeout(&mUSB_suspend_wake,1*HZ); /* clear interrupt */ if(muic_type==muicTypeTI6111) { msleep(200); fsa9480_read_reg(client, FSA9480_REG_INT1, &intr); fsa9480_read_reg(client, FSA9480_REG_INT2, &intr2); fsa9480_read_reg(client, FSA9480_REG_CHG_INT, &intr3); printk("[FSA9480] %s: intr=0x%x, intr2 = 0x%X, chg_intr=0x%x \n",__func__,intr,intr2, intr3); } else { fsa9480_read_reg(client, FSA9480_REG_INT1, &intr); fsa9480_read_reg(client, FSA9480_REG_INT2, &intr2); printk("[FSA9480] %s: intr=0x%x, intr2=0x%x \n",__func__,intr,intr2); } if(intr3 & CH_DONE) //EOC disable charger // Luke { //msleep(500); // Test only //fsa9480_read_reg(client, FSA9480_REG_DEV_T1, &val1); //if(val1&=DEV_VBUS) // Check if VBUS is valid //Luke //{ #if defined(CONFIG_BATTERY_D2083) if(spa_external_event) { pr_info("%s. Send D2083_EVENT_CHARGE_FULL event\n", __func__); spa_external_event(D2083_CATEGORY_BATTERY, D2083_EVENT_CHARGE_FULL); } //} #endif } intr &= 0xffff; /* device detection */ fsa9480_detect_dev(usbsw, intr); if((intr== 0x00) && (intr3 == 0)) { printk("[FSA9480] (intr== 0x00) in work_cb !!!!!\n"); fsa9480_read_adc_value(); #if 0 // TODO: if(muic_type==muicTypeTI6111) TI_SWreset(usbsw); #endif return; } if(!(intr3 & CH_DONE)) tsu8111_check_ovp(); if( intr==0x03) // INT error case fsa9480_reset_ic(); }
static int diagchar_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int index = -1, i = 0, ret = 0; int num_data = 0, data_type; #ifdef SDQXDM_DEBUG struct timeval t; #endif for (i = 0; i < driver->num_clients; i++) if (driver->client_map[i].pid == current->tgid) index = i; if (index == -1) { DIAG_ERR("%s:%s(parent:%s): tgid=%d " "Client PID not found in table\n", __func__, current->comm, current->parent->comm, current->tgid); for (i = 0; i < driver->num_clients; i++) DIAG_ERR("\t#%d: %d\n", i, driver->client_map[i].pid); return -EINVAL; } wait_event_interruptible(driver->wait_q, driver->data_ready[index]); if (diag7k_debug_mask) DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, current->comm, current->parent->comm, current->tgid); mutex_lock(&driver->diagchar_mutex); if ((driver->data_ready[index] & USER_SPACE_LOG_TYPE) && (driver-> logging_mode == MEMORY_DEVICE_MODE)) { /*Copy the type of data being passed*/ data_type = driver->data_ready[index] & USER_SPACE_LOG_TYPE; COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); /* place holder for number of data field */ ret += 4; for (i = 0; i < driver->poolsize_write_struct; i++) { if (driver->buf_tbl[i].length > 0) { #ifdef DIAG_DEBUG pr_debug("diag: WRITING the buf address " "and length is %x , %d\n", (unsigned int) (driver->buf_tbl[i].buf), driver->buf_tbl[i].length); #endif num_data++; /* Copy the length of data being passed */ if (copy_to_user(buf+ret, (void *)&(driver-> buf_tbl[i].length), 4)) { num_data--; goto drop; } ret += 4; /* Copy the actual data being passed */ if (copy_to_user(buf+ret, (void *)driver-> buf_tbl[i].buf, driver->buf_tbl[i].length)) { ret -= 4; num_data--; goto drop; } ret += driver->buf_tbl[i].length; drop: #ifdef DIAG_DEBUG pr_debug("diag: DEQUEUE buf address and" " length is %x,%d\n", (unsigned int) (driver->buf_tbl[i].buf), driver-> buf_tbl[i].length); #endif diagmem_free(driver, (unsigned char *) (driver->buf_tbl[i].buf), POOL_TYPE_HDLC); driver->buf_tbl[i].length = 0; driver->buf_tbl[i].buf = 0; } } /* copy modem data */ if (driver->in_busy_1 == 1) { num_data++; /*Copy the length of data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, (driver->write_ptr_1->length), 4); /*Copy the actual data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver->buf_in_1), driver->write_ptr_1->length); driver->in_busy_1 = 0; } if (driver->in_busy_2 == 1) { num_data++; /*Copy the length of data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, (driver->write_ptr_2->length), 4); /*Copy the actual data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver->buf_in_2), driver->write_ptr_2->length); driver->in_busy_2 = 0; } /* copy lpass data */ if (driver->in_busy_qdsp_1 == 1) { num_data++; /*Copy the length of data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, (driver->write_ptr_qdsp_1->length), 4); /*Copy the actual data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> buf_in_qdsp_1), driver->write_ptr_qdsp_1->length); driver->in_busy_qdsp_1 = 0; } if (driver->in_busy_qdsp_2 == 1) { num_data++; /*Copy the length of data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, (driver->write_ptr_qdsp_2->length), 4); /*Copy the actual data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> buf_in_qdsp_2), driver-> write_ptr_qdsp_2->length); driver->in_busy_qdsp_2 = 0; } /* copy wncss data */ if (driver->in_busy_wcnss == 1) { num_data++; /*Copy the length of data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, (driver->write_ptr_wcnss->length), 4); /*Copy the actual data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> buf_in_wcnss), driver->write_ptr_wcnss->length); driver->in_busy_wcnss = 0; } /* copy number of data fields */ COPY_USER_SPACE_OR_EXIT(buf+4, num_data, 4); ret -= 4; driver->data_ready[index] ^= USER_SPACE_LOG_TYPE; if (driver->ch) queue_work(driver->diag_wq, &(driver->diag_read_smd_work)); if (driver->chqdsp) queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_work)); if (driver->ch_wcnss) queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); APPEND_DEBUG('n'); goto exit; } else if (driver->data_ready[index] & USER_SPACE_LOG_TYPE) { /* In case, the thread wakes up and the logging mode is not memory device any more, the condition needs to be cleared */ driver->data_ready[index] ^= USER_SPACE_LOG_TYPE; } else if (driver->data_ready[index] & USERMODE_DIAGFWD) { data_type = USERMODE_DIAGFWD; driver->data_ready[index] ^= USERMODE_DIAGFWD; COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); #ifdef SDQXDM_DEBUG do_gettimeofday(&t); if (driver->in_busy_1 && t.tv_sec > driver->write_ptr_1->second + 2) pr_info("[diag-dbg] late pkt now: %ld.%04ld pkt: %d\n", t.tv_sec, t.tv_usec/1000, driver->write_ptr_1->second); if (driver->in_busy_2 && t.tv_sec > driver->write_ptr_2->second + 2) pr_info("[diag-dbg] late pkt now: %ld.%04ld pkt: %d\n", t.tv_sec, t.tv_usec/1000, driver->write_ptr_2->second); #endif #ifdef CONFIG_ARCH_MSM8960 for (i = 0; i < driver->poolsize_write_struct; i++) { if (driver->buf_tbl[i].length > 0) { #ifdef SDQXDM_DEBUG if (diag7k_debug_mask) printk(KERN_INFO "\n WRITING the buf address " "and length is %x , %d\n", (unsigned int) (driver->buf_tbl[i].buf), driver->buf_tbl[i].length); #endif if (copy_to_user(buf+ret, (void *)driver-> buf_tbl[i].buf, driver->buf_tbl[i].length)) break; ret += driver->buf_tbl[i].length; diagmem_free(driver, (unsigned char *) (driver->buf_tbl[i].buf), POOL_TYPE_HDLC); driver->buf_tbl[i].length = 0; driver->buf_tbl[i].buf = 0; } } #endif /* copy modem data */ if (driver->in_busy_1 == 1) { /*Copy the actual data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver->buf_in_1), driver->write_ptr_1->length); driver->in_busy_1 = 0; } if (driver->in_busy_2 == 1) { /*Copy the actual data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver->buf_in_2), driver->write_ptr_2->length); driver->in_busy_2 = 0; } /* copy q6 data */ if (driver->in_busy_qdsp_1 == 1) { /*Copy the actual data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> buf_in_qdsp_1), driver->write_ptr_qdsp_1->length); driver->in_busy_qdsp_1 = 0; } if (driver->in_busy_qdsp_2 == 1) { /*Copy the actual data being passed*/ COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> buf_in_qdsp_2), driver-> write_ptr_qdsp_2->length); driver->in_busy_qdsp_2 = 0; } if (driver->ch) queue_work(driver->diag_wq, &(driver->diag_read_smd_work)); if (driver->chqdsp) queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_work)); if (diag7k_debug_mask) pr_info("%s() return %d byte\n", __func__, ret); goto exit; } if (driver->data_ready[index] & DEINIT_TYPE) { /*Copy the type of data being passed*/ data_type = driver->data_ready[index] & DEINIT_TYPE; COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); driver->data_ready[index] ^= DEINIT_TYPE; goto exit; } if (driver->data_ready[index] & MSG_MASKS_TYPE) { /*Copy the type of data being passed*/ data_type = driver->data_ready[index] & MSG_MASKS_TYPE; COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->msg_masks), MSG_MASK_SIZE); driver->data_ready[index] ^= MSG_MASKS_TYPE; goto exit; } if (driver->data_ready[index] & EVENT_MASKS_TYPE) { /*Copy the type of data being passed*/ data_type = driver->data_ready[index] & EVENT_MASKS_TYPE; COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->event_masks), EVENT_MASK_SIZE); driver->data_ready[index] ^= EVENT_MASKS_TYPE; goto exit; } if (driver->data_ready[index] & LOG_MASKS_TYPE) { /*Copy the type of data being passed*/ data_type = driver->data_ready[index] & LOG_MASKS_TYPE; COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->log_masks), LOG_MASK_SIZE); driver->data_ready[index] ^= LOG_MASKS_TYPE; goto exit; } if (driver->data_ready[index] & PKT_TYPE) { /*Copy the type of data being passed*/ data_type = driver->data_ready[index] & PKT_TYPE; COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->pkt_buf), driver->pkt_length); driver->data_ready[index] ^= PKT_TYPE; goto exit; } exit: if (ret) wake_lock_timeout(&driver->wake_lock, HZ / 2); mutex_unlock(&driver->diagchar_mutex); return ret; }
static irqreturn_t s5p_ap_wakeup_irq_handler(int irq, void *data) { struct idpram_pm_data *pm_data = data; wake_lock_timeout(&pm_data->ap_wlock, HZ*5); return IRQ_HANDLED; }