/* SMSM reset Riva */ static void smsm_riva_reset(void) { /* per SS reset request bit is not available now, * all SS host modules are setting this bit * This is still under discussion*/ smsm_change_state(SMSM_APPS_STATE, SMSM_RESET, SMSM_RESET); }
/** * Crash shutdown function * called by the restart notifier * */ static void dsps_crash_shutdown(const struct subsys_desc *subsys) { pr_debug("%s\n", __func__); disable_irq_nosync(drv->wdog_irq); dsps_crash_shutdown_g = 1; smsm_change_state(SMSM_DSPS_STATE, SMSM_RESET, SMSM_RESET); }
static int rpcrouter_smd_remote_probe(struct platform_device *pdev) { int rc; smd_remote_xprt.xprt.name = "rpcrotuer_smd_xprt"; smd_remote_xprt.xprt.read_avail = rpcrouter_smd_remote_read_avail; smd_remote_xprt.xprt.read = rpcrouter_smd_remote_read; smd_remote_xprt.xprt.write_avail = rpcrouter_smd_remote_write_avail; smd_remote_xprt.xprt.write = rpcrouter_smd_remote_write; smd_remote_xprt.xprt.close = rpcrouter_smd_remote_close; smd_remote_xprt.xprt.priv = NULL; /* Open up SMD channel */ rc = smd_open("RPCCALL", &smd_remote_xprt.channel, NULL, rpcrouter_smd_remote_notify); if (rc < 0) return rc; smd_disable_read_intr(smd_remote_xprt.channel); msm_rpcrouter_xprt_notify(&smd_remote_xprt.xprt, RPCROUTER_XPRT_EVENT_OPEN); smsm_change_state(SMSM_APPS_STATE, 0, SMSM_RPCINIT); return 0; }
static void dsps_crash_shutdown(const struct subsys_desc *desc) { struct dsps_data *drv = desc_to_drv(desc); disable_irq_nosync(drv->wdog_irq); drv->crash = 1; smsm_change_state(SMSM_DSPS_STATE, SMSM_RESET, SMSM_RESET); }
static void loopback_probe_worker(struct work_struct *work) { if (!is_modem_smsm_inited()) schedule_delayed_work(&loopback_work, msecs_to_jiffies(1000)); else smsm_change_state(SMSM_APPS_STATE, 0, SMSM_SMD_LOOPBACK); }
/* Riva crash handler */ static void riva_crash_shutdown(const struct subsys_desc *desc) { struct riva_data *drv; drv = container_of(desc, struct riva_data, subsys_desc); pr_err("riva crash shutdown %d\n", drv->crash); if (drv->crash != true) smsm_change_state(SMSM_APPS_STATE, SMSM_RESET, SMSM_RESET); }
static void loopback_probe_worker(struct work_struct *work) { /* wait for modem to restart before requesting loopback server */ if (!is_modem_smsm_inited()) schedule_delayed_work(&loopback_work, msecs_to_jiffies(1000)); else smsm_change_state(SMSM_APPS_STATE, 0, SMSM_SMD_LOOPBACK); }
int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn) { int ret; wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L; wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H; wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L; wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H; wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L; wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H; wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L; wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H; wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L; wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H; wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD; wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD; wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB; wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB; wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L; wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H; wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L; wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H; /* DXE control block allocation */ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch); if (ret) goto out_err; ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch); if (ret) goto out_err; ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch); if (ret) goto out_err; ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch); if (ret) goto out_err; /* TODO most probably do not need this */ /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */ ret = smsm_change_state(SMSM_APPS_STATE, WCN36XX_SMSM_WLAN_TX_ENABLE, WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY); return 0; out_err: wcn36xx_error("Failed to allocate DXE control blocks"); wcn36xx_dxe_free_ctl_blks(wcn); return -ENOMEM; }
/* SMSM reset Riva */ static void smsm_riva_reset(void) { pr_info(MODULE_NAME ": smsm_riva_reset, smsm_change_state(SMSM_APPS_STATE, SMSM_RESET, SMSM_RESET).\n"); //ASUS_BSP+++ "for /data/log/ASUSEvtlog" ASUSEvtlog("[wcnss]: smsm_riva_reset, smsm_change_state(SMSM_APPS_STATE, SMSM_RESET, SMSM_RESET).\n"); //ASUS_BSP--- "for /data/log/ASUSEvtlog" /* per SS reset request bit is not available now, * all SS host modules are setting this bit * This is still under discussion*/ smsm_change_state(SMSM_APPS_STATE, SMSM_RESET, SMSM_RESET); }
static int smd_tty_open(struct tty_struct *tty, struct file *f) { int res = 0; int n = tty->index; struct smd_tty_info *info; const char *name; if (n == 0) name = "SMD_DS"; // else if (n == 7) // name = "DATA1"; // else if (n == 21) // name = "DATA21"; else if (n == 27) name = "SMD_GPSNMEA"; // else if (n == 36) // name = "LOOPBACK"; else return -ENODEV; info = smd_tty + n; mutex_lock(&smd_tty_lock); tty->driver_data = info; if (info->open_count++ == 0) { info->tty = tty; tasklet_init(&info->tty_tsklt, smd_tty_read, (unsigned long)info); wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND, name); if (!info->ch) { #if 0 if (n == 36) { /* set smsm state to SMSM_SMD_LOOPBACK state ** and wait allowing enough time for Modem side ** to open the loopback port (Currently, this is ** this is effecient than polling). */ smsm_change_state(SMSM_APPS_STATE, 0, SMSM_SMD_LOOPBACK); msleep(100); } #endif res = smd_open(name, &info->ch, info, smd_tty_notify); } } mutex_unlock(&smd_tty_lock); return res; }
/** @brief wpalNotifySmsm provides a mechansim for a client to notify SMSM to start DXE engine and/or condition of Tx ring buffer @param clrSt: bit(s) to be cleared on the MASK @param setSt: bit(s) to be set on the MASK @return SUCCESS if the operation is successful */ wpt_status wpalNotifySmsm ( wpt_uint32 clrSt, wpt_uint32 setSt ) { int rc; rc = smsm_change_state(SMSM_APPS_STATE, clrSt, setSt); if(0 != rc) { WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s: smsm_change_state failed", __func__); return eWLAN_PAL_STATUS_E_FAILURE; } return eWLAN_PAL_STATUS_SUCCESS; }
static int smd_tty_open(struct tty_struct *tty, struct file *f) { int res = 0; int n = tty->index; struct smd_tty_info *info; const char *name; if (n == 0) name = "DS"; else if (n == 7) name = "DATA1"; else if (n == 21) name = "DATA21"; else if (n == 27) name = "GPSNMEA"; else if (n == 36) name = "LOOPBACK"; else return -ENODEV; info = smd_tty + n; mutex_lock(&smd_tty_lock); tty->driver_data = info; if (info->open_count++ == 0) { info->tty = tty; tasklet_init(&info->tty_tsklt, smd_tty_read, (unsigned long)info); wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND, name); if (!info->ch) { if (n == 36) { smsm_change_state(SMSM_APPS_STATE, 0, SMSM_SMD_LOOPBACK); msleep(100); } res = smd_open(name, &info->ch, info, smd_tty_notify); } } mutex_unlock(&smd_tty_lock); return res; }
/* * Power collapse the Apps processor. This function executes the handshake * protocol with Modem. * * Return value: * -EAGAIN: modem reset occurred or early exit from power collapse * -EBUSY: modem not ready for our power collapse -- no power loss * -ETIMEDOUT: timed out waiting for modem's handshake -- no power loss * 0: success */ static int msm_pm_power_collapse (bool from_idle, uint32_t sleep_delay, uint32_t sleep_limit) { struct msm_pm_polled_group state_grps[2]; unsigned long saved_acpuclk_rate; int collapsed = 0; int ret; int val; int modem_early_exit = 0; MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE, KERN_INFO, "%s(): idle %d, delay %u, limit %u\n", __func__, (int)from_idle, sleep_delay, sleep_limit); if (!(smsm_get_state(SMSM_POWER_MASTER_DEM) & DEM_MASTER_SMSM_READY)) { MSM_PM_DPRINTK( MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE, KERN_INFO, "%s(): master not ready\n", __func__); ret = -EBUSY; goto power_collapse_bail; } memset(msm_pm_smem_data, 0, sizeof(*msm_pm_smem_data)); if (cpu_is_msm8625()) { /* Program the SPM */ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_POWER_COLLAPSE, false); WARN_ON(ret); } /* Call CPR suspend only for "idlePC" case */ if (msm_cpr_ops && from_idle) msm_cpr_ops->cpr_suspend(); msm_pm_irq_extns->enter_sleep1(true, from_idle, &msm_pm_smem_data->irq_mask); msm_sirc_enter_sleep(); msm_gpio_enter_sleep(from_idle); msm_pm_smem_data->sleep_time = sleep_delay; msm_pm_smem_data->resources_used = sleep_limit; /* Enter PWRC/PWRC_SUSPEND */ if (from_idle) smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_RUN, DEM_SLAVE_SMSM_PWRC); else smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_RUN, DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND); MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): PWRC"); MSM_PM_DEBUG_PRINT_SLEEP_INFO(); memset(state_grps, 0, sizeof(state_grps)); state_grps[0].group_id = SMSM_POWER_MASTER_DEM; state_grps[0].bits_all_set = DEM_MASTER_SMSM_RSA; state_grps[1].group_id = SMSM_MODEM_STATE; state_grps[1].bits_all_set = SMSM_RESET; ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps); if (ret < 0) { printk(KERN_EMERG "%s(): power collapse entry " "timed out waiting for Modem's response\n", __func__); msm_pm_timeout(); } if (ret == 1) { MSM_PM_DPRINTK( MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE, KERN_INFO, "%s(): msm_pm_poll_state detected Modem reset\n", __func__); goto power_collapse_early_exit; } /* DEM Master in RSA */ MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): PWRC RSA"); ret = msm_pm_irq_extns->enter_sleep2(true, from_idle); if (ret < 0) { MSM_PM_DPRINTK( MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE, KERN_INFO, "%s(): msm_irq_enter_sleep2 aborted, %d\n", __func__, ret); goto power_collapse_early_exit; } msm_pm_config_hw_before_power_down(); MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): pre power down"); saved_acpuclk_rate = acpuclk_power_collapse(); MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO, "%s(): change clock rate (old rate = %lu)\n", __func__, saved_acpuclk_rate); if (saved_acpuclk_rate == 0) { msm_pm_config_hw_after_power_up(); goto power_collapse_early_exit; } msm_pm_boot_config_before_pc(smp_processor_id(), virt_to_phys(msm_pm_collapse_exit)); #ifdef CONFIG_VFP if (from_idle) vfp_pm_suspend(); #endif #ifdef CONFIG_CACHE_L2X0 if (!cpu_is_msm8625()) l2cc_suspend(); else apps_power_collapse = 1; #endif collapsed = msm_pm_collapse(); /* * TBD: Currently recognise the MODEM early exit * path by reading the MPA5_GDFS_CNT_VAL register. */ if (cpu_is_msm8625()) { /* * on system reset, default value of MPA5_GDFS_CNT_VAL * is = 0x0, later modem reprogram this value to * 0x00030004. Once APPS did a power collapse and * coming out of it expected value of this register * always be 0x00030004. Incase if APPS sees the value * as 0x00030002 consider this case as a modem early * exit. */ val = __raw_readl(MSM_CFG_CTL_BASE + 0x38); if (val != 0x00030002) power_collapsed = 1; else modem_early_exit = 1; } #ifdef CONFIG_CACHE_L2X0 if (!cpu_is_msm8625()) l2cc_resume(); else apps_power_collapse = 0; #endif msm_pm_boot_config_after_pc(smp_processor_id()); if (collapsed) { #ifdef CONFIG_VFP if (from_idle) vfp_pm_resume(); #endif cpu_init(); local_fiq_enable(); } MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE, KERN_INFO, "%s(): msm_pm_collapse returned %d\n", __func__, collapsed); MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO, "%s(): restore clock rate to %lu\n", __func__, saved_acpuclk_rate); if (acpuclk_set_rate(smp_processor_id(), saved_acpuclk_rate, SETRATE_PC) < 0) printk(KERN_ERR "%s(): failed to restore clock rate(%lu)\n", __func__, saved_acpuclk_rate); msm_pm_irq_extns->exit_sleep1(msm_pm_smem_data->irq_mask, msm_pm_smem_data->wakeup_reason, msm_pm_smem_data->pending_irqs); msm_pm_config_hw_after_power_up(); MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): post power up"); memset(state_grps, 0, sizeof(state_grps)); state_grps[0].group_id = SMSM_POWER_MASTER_DEM; state_grps[0].bits_any_set = DEM_MASTER_SMSM_RSA | DEM_MASTER_SMSM_PWRC_EARLY_EXIT; state_grps[1].group_id = SMSM_MODEM_STATE; state_grps[1].bits_all_set = SMSM_RESET; ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps); if (ret < 0) { printk(KERN_EMERG "%s(): power collapse exit " "timed out waiting for Modem's response\n", __func__); msm_pm_timeout(); } if (ret == 1) { MSM_PM_DPRINTK( MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE, KERN_INFO, "%s(): msm_pm_poll_state detected Modem reset\n", __func__); goto power_collapse_early_exit; } /* Sanity check */ if (collapsed && !modem_early_exit) { BUG_ON(!(state_grps[0].value_read & DEM_MASTER_SMSM_RSA)); } else { BUG_ON(!(state_grps[0].value_read & DEM_MASTER_SMSM_PWRC_EARLY_EXIT)); goto power_collapse_early_exit; } /* Enter WFPI */ smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND, DEM_SLAVE_SMSM_WFPI); MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): WFPI"); memset(state_grps, 0, sizeof(state_grps)); state_grps[0].group_id = SMSM_POWER_MASTER_DEM; state_grps[0].bits_all_set = DEM_MASTER_SMSM_RUN; state_grps[1].group_id = SMSM_MODEM_STATE; state_grps[1].bits_all_set = SMSM_RESET; ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps); if (ret < 0) { printk(KERN_EMERG "%s(): power collapse WFPI " "timed out waiting for Modem's response\n", __func__); msm_pm_timeout(); } if (ret == 1) { MSM_PM_DPRINTK( MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE, KERN_INFO, "%s(): msm_pm_poll_state detected Modem reset\n", __func__); ret = -EAGAIN; goto power_collapse_restore_gpio_bail; } /* DEM Master == RUN */ MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): WFPI RUN"); MSM_PM_DEBUG_PRINT_SLEEP_INFO(); msm_pm_irq_extns->exit_sleep2(msm_pm_smem_data->irq_mask, msm_pm_smem_data->wakeup_reason, msm_pm_smem_data->pending_irqs); msm_pm_irq_extns->exit_sleep3(msm_pm_smem_data->irq_mask, msm_pm_smem_data->wakeup_reason, msm_pm_smem_data->pending_irqs); msm_gpio_exit_sleep(); msm_sirc_exit_sleep(); smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_WFPI, DEM_SLAVE_SMSM_RUN); MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): RUN"); smd_sleep_exit(); if (cpu_is_msm8625()) { ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false); WARN_ON(ret); } /* Call CPR resume only for "idlePC" case */ if (msm_cpr_ops && from_idle) msm_cpr_ops->cpr_resume(); return 0; power_collapse_early_exit: /* Enter PWRC_EARLY_EXIT */ smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND, DEM_SLAVE_SMSM_PWRC_EARLY_EXIT); MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): EARLY_EXIT"); memset(state_grps, 0, sizeof(state_grps)); state_grps[0].group_id = SMSM_POWER_MASTER_DEM; state_grps[0].bits_all_set = DEM_MASTER_SMSM_PWRC_EARLY_EXIT; state_grps[1].group_id = SMSM_MODEM_STATE; state_grps[1].bits_all_set = SMSM_RESET; ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps); MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): EARLY_EXIT EE"); if (ret < 0) { printk(KERN_EMERG "%s(): power collapse EARLY_EXIT " "timed out waiting for Modem's response\n", __func__); msm_pm_timeout(); } if (ret == 1) { MSM_PM_DPRINTK( MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE, KERN_INFO, "%s(): msm_pm_poll_state detected Modem reset\n", __func__); } /* DEM Master == RESET or PWRC_EARLY_EXIT */ ret = -EAGAIN; power_collapse_restore_gpio_bail: msm_gpio_exit_sleep(); msm_sirc_exit_sleep(); /* Enter RUN */ smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND | DEM_SLAVE_SMSM_PWRC_EARLY_EXIT, DEM_SLAVE_SMSM_RUN); MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): RUN"); if (collapsed) smd_sleep_exit(); /* Call CPR resume only for "idlePC" case */ if (msm_cpr_ops && from_idle) msm_cpr_ops->cpr_resume(); power_collapse_bail: if (cpu_is_msm8625()) { ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false); WARN_ON(ret); } return ret; }
/* * Initialize the power management subsystem. * * Return value: * -ENODEV: initialization failed * 0: success */ static int __init msm_pm_init(void) { int ret; int val; enum msm_pm_time_stats_id enable_stats[] = { MSM_PM_STAT_REQUESTED_IDLE, MSM_PM_STAT_IDLE_SPIN, MSM_PM_STAT_IDLE_WFI, MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE, MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE, MSM_PM_STAT_IDLE_POWER_COLLAPSE, MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE, MSM_PM_STAT_SUSPEND, MSM_PM_STAT_FAILED_SUSPEND, MSM_PM_STAT_NOT_IDLE, }; #ifdef CONFIG_CPU_V7 pgd_t *pc_pgd; pmd_t *pmd; unsigned long pmdval; unsigned long exit_phys; exit_phys = virt_to_phys(msm_pm_collapse_exit); /* Page table for cores to come back up safely. */ pc_pgd = pgd_alloc(&init_mm); if (!pc_pgd) return -ENOMEM; pmd = pmd_offset(pud_offset(pc_pgd + pgd_index(exit_phys), exit_phys), exit_phys); pmdval = (exit_phys & PGDIR_MASK) | PMD_TYPE_SECT | PMD_SECT_AP_WRITE; pmd[0] = __pmd(pmdval); pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); msm_saved_state_phys = allocate_contiguous_ebi_nomap(CPU_SAVED_STATE_SIZE * num_possible_cpus(), 4); if (!msm_saved_state_phys) return -ENOMEM; msm_saved_state = ioremap_nocache(msm_saved_state_phys, CPU_SAVED_STATE_SIZE * num_possible_cpus()); if (!msm_saved_state) return -ENOMEM; /* It is remotely possible that the code in msm_pm_collapse_exit() * which turns on the MMU with this mapping is in the * next even-numbered megabyte beyond the * start of msm_pm_collapse_exit(). * Map this megabyte in as well. */ pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1))); flush_pmd_entry(pmd); msm_pm_pc_pgd = virt_to_phys(pc_pgd); clean_caches((unsigned long)&msm_pm_pc_pgd, sizeof(msm_pm_pc_pgd), virt_to_phys(&msm_pm_pc_pgd)); #endif msm_pm_smem_data = smem_alloc(SMEM_APPS_DEM_SLAVE_DATA, sizeof(*msm_pm_smem_data)); if (msm_pm_smem_data == NULL) { printk(KERN_ERR "%s: failed to get smsm_data\n", __func__); return -ENODEV; } ret = msm_timer_init_time_sync(msm_pm_timeout); if (ret) return ret; ret = smsm_change_intr_mask(SMSM_POWER_MASTER_DEM, 0xFFFFFFFF, 0); if (ret) { printk(KERN_ERR "%s: failed to clear interrupt mask, %d\n", __func__, ret); return ret; } if (cpu_is_msm8625()) { target_type = TARGET_IS_8625; clean_caches((unsigned long)&target_type, sizeof(target_type), virt_to_phys(&target_type)); /* * Configure the MPA5_GDFS_CNT_VAL register for * DBGPWRUPEREQ_OVERRIDE[17:16] = Override the * DBGNOPOWERDN for each cpu. * MPA5_GDFS_CNT_VAL[9:0] = Delay counter for * GDFS control. */ val = 0x00030002; __raw_writel(val, (MSM_CFG_CTL_BASE + 0x38)); l2x0_base_addr = MSM_L2CC_BASE; } #ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE /* The wakeup_reason field is overloaded during initialization time to signal Modem that Apps will control the low power modes of the memory. */ msm_pm_smem_data->wakeup_reason = 1; smsm_change_state(SMSM_APPS_DEM, 0, DEM_SLAVE_SMSM_RUN); #endif BUG_ON(msm_pm_modes == NULL); suspend_set_ops(&msm_pm_ops); msm_pm_mode_sysfs_add(); msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats)); atomic_set(&msm_pm_init_done, 1); return 0; }
/* Time Slave State Bits */ #define SLAVE_TIME_REQUEST 0x0400 #define SLAVE_TIME_POLL 0x0800 #define SLAVE_TIME_INIT 0x1000 uint32_t *smem_clock; uint32_t smem_clock_val; uint32_t state; smem_clock = smem_alloc(SMEM_SMEM_SLOW_CLOCK_VALUE, sizeof(uint32_t)); if (smem_clock == NULL) { printk(KERN_ERR "no smem clock\n"); return 0; } state = smsm_get_state(SMSM_MODEM_STATE); if ((state & SMSM_INIT) == 0) { printk(KERN_ERR "smsm not initialized\n"); return 0; } time_start(data); while ((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) & MASTER_TIME_PENDING) { if (time_expired(data)) { printk(KERN_INFO "get_smem_clock: timeout 1 still " "invalid state %x\n", state); return 0; } } smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_POLL | SLAVE_TIME_INIT, SLAVE_TIME_REQUEST); time_start(data); while (!((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) & MASTER_TIME_PENDING)) { if (time_expired(data)) { printk(KERN_INFO "get_smem_clock: timeout 2 still " "invalid state %x\n", state); smem_clock_val = 0; goto sync_sclk_exit; } } smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST, SLAVE_TIME_POLL); time_start(data); do { smem_clock_val = *smem_clock; } while (smem_clock_val == 0 && !time_expired(data)); state = smsm_get_state(SMSM_TIME_MASTER_DEM); if (smem_clock_val) { if (update != NULL) update(data, smem_clock_val); if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC) printk(KERN_INFO "get_smem_clock: state %x clock %u\n", state, smem_clock_val); } else { printk(KERN_INFO "get_smem_clock: timeout state %x clock %u\n", state, smem_clock_val); } sync_sclk_exit: smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST | SLAVE_TIME_POLL, SLAVE_TIME_INIT); return smem_clock_val; } #else /* CONFIG_MSM_N_WAY_SMSM */ static uint32_t msm_timer_sync_sclk( void (*time_start)(struct msm_timer_sync_data_t *data), bool (*time_expired)(struct msm_timer_sync_data_t *data), void (*update)(struct msm_timer_sync_data_t *data, uint32_t clk_val), struct msm_timer_sync_data_t *data) { uint32_t *smem_clock; uint32_t smem_clock_val; uint32_t last_state; uint32_t state; smem_clock = smem_alloc(SMEM_SMEM_SLOW_CLOCK_VALUE, sizeof(uint32_t)); if (smem_clock == NULL) { printk(KERN_ERR "no smem clock\n"); return 0; } last_state = state = smsm_get_state(SMSM_MODEM_STATE); smem_clock_val = *smem_clock; if (smem_clock_val) { printk(KERN_INFO "get_smem_clock: invalid start state %x " "clock %u\n", state, smem_clock_val); smsm_change_state(SMSM_APPS_STATE, SMSM_TIMEWAIT, SMSM_TIMEINIT); time_start(data); while (*smem_clock != 0 && !time_expired(data)) ; smem_clock_val = *smem_clock; if (smem_clock_val) { printk(KERN_INFO "get_smem_clock: timeout still " "invalid state %x clock %u\n", state, smem_clock_val); return 0; } } time_start(data); smsm_change_state(SMSM_APPS_STATE, SMSM_TIMEINIT, SMSM_TIMEWAIT); do { smem_clock_val = *smem_clock; state = smsm_get_state(SMSM_MODEM_STATE); if (state != last_state) { last_state = state; if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC) printk(KERN_INFO "get_smem_clock: state %x clock %u\n", state, smem_clock_val); } } while (smem_clock_val == 0 && !time_expired(data)); if (smem_clock_val) { if (update != NULL) update(data, smem_clock_val); } else { printk(KERN_INFO "get_smem_clock: timeout state %x clock %u\n", state, smem_clock_val); } smsm_change_state(SMSM_APPS_STATE, SMSM_TIMEWAIT, SMSM_TIMEINIT); time_start(data); while (*smem_clock != 0 && !time_expired(data)) ; if (*smem_clock) printk(KERN_INFO "get_smem_clock: exit timeout state %x " "clock %u\n", state, *smem_clock); return smem_clock_val; }
static void smsm_riva_reset(void) { smsm_change_state(SMSM_APPS_STATE, SMSM_RESET, SMSM_RESET); }
/** * Crash shutdown function * called by the restart notifier * */ static void dsps_crash_shutdown(const struct subsys_data *subsys) { pr_debug("%s\n", __func__); dsps_crash_shutdown_g = 1; smsm_change_state(SMSM_DSPS_STATE, SMSM_RESET, SMSM_RESET); }
static int debug_test_smsm(char *buf, int max) { int i = 0; int test_num = 0; int ret; do { test_num++; SMSM_CB_TEST_INIT(); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 0); UT_EQ_INT(smsm_cb_data.cb_count, 0); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 1); UT_EQ_INT(smsm_cb_data.old_state & SMSM_SMDINIT, SMSM_SMDINIT); UT_EQ_INT(smsm_cb_data.new_state & SMSM_SMDINIT, 0x0); UT_EQ_INT((int)smsm_cb_data.data, 0x1234); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 2); UT_EQ_INT(smsm_cb_data.old_state & SMSM_SMDINIT, 0x0); UT_EQ_INT(smsm_cb_data.new_state & SMSM_SMDINIT, SMSM_SMDINIT); ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 2); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 2); i += scnprintf(buf + i, max - i, "Test %d - PASS\n", test_num); } while (0); do { test_num++; SMSM_CB_TEST_INIT(); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 0); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_INIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 1); INIT_COMPLETION(smsm_cb_completion); UT_EQ_INT(smsm_cb_data.cb_count, 0); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 1); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 2); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 3); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 4); ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 1); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 4); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 5); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 6); ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_INIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 2); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT); UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 6); i += scnprintf(buf + i, max - i, "Test %d - PASS\n", test_num); } while (0); do { test_num++; SMSM_CB_TEST_INIT(); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 0); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_INIT, smsm_state_cb, (void *)0x3456); UT_EQ_INT(ret, 0); INIT_COMPLETION(smsm_cb_completion); UT_EQ_INT(smsm_cb_data.cb_count, 0); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 1); UT_EQ_INT((int)smsm_cb_data.data, 0x1234); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 2); UT_EQ_INT((int)smsm_cb_data.data, 0x3456); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT); ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_INIT, smsm_state_cb, (void *)0x3456); UT_EQ_INT(ret, 2); ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 2); i += scnprintf(buf + i, max - i, "Test %d - PASS\n", test_num); } while (0); return i; }
static int smd_tty_port_activate(struct tty_port *tport, struct tty_struct *tty) { int res = 0; unsigned int n = tty->index; struct smd_tty_info *info; const char *peripheral = NULL; if (n >= MAX_SMD_TTYS || !smd_tty[n].smd) return -ENODEV; info = smd_tty + n; mutex_lock(&smd_tty_lock); tty->driver_data = info; peripheral = smd_edge_to_subsystem(smd_tty[n].smd->edge); if (peripheral) { info->pil = subsystem_get(peripheral); if (IS_ERR(info->pil)) { SMD_TTY_INFO( "%s failed on smd_tty device :%s subsystem_get failed for %s", __func__, smd_tty[n].smd->port_name, peripheral); /* * Sleep, inorder to reduce the frequency of * retry by user-space modules and to avoid * possible watchdog bite. */ msleep((smd_tty[n].open_wait * 1000)); res = PTR_ERR(info->pil); goto out; } /* Wait for the modem SMSM to be inited for the SMD * Loopback channel to be allocated at the modem. Since * the wait need to be done atmost once, using msleep * doesn't degrade the performance. */ if (n == LOOPBACK_IDX) { if (!is_modem_smsm_inited()) msleep(5000); smsm_change_state(SMSM_APPS_STATE, 0, SMSM_SMD_LOOPBACK); msleep(100); } /* * Wait for a channel to be allocated so we know * the modem is ready enough. */ if (smd_tty[n].open_wait) { res = wait_for_completion_interruptible_timeout( &info->ch_allocated, msecs_to_jiffies(smd_tty[n].open_wait * 1000)); if (res == 0) { SMD_TTY_INFO( "Timed out waiting for SMD channel %s", smd_tty[n].smd->port_name); res = -ETIMEDOUT; goto release_pil; } else if (res < 0) { SMD_TTY_INFO( "Error waiting for SMD channel %s : %d\n", smd_tty[n].smd->port_name, res); goto release_pil; } #ifdef CONFIG_MSM_SMD_TTY_DS_LEGACY /* * on boot, process tried to open smd0 sleeps until * modem is ready or timeout. */ if (n == DS_IDX) { /* wait for open ready status in seconds */ pr_info("%s: checking DS modem status\n", __func__); res = wait_event_interruptible_timeout( info->ch_opened_wait_queue, info->is_dsmodem_ready, (smd_tty_ds_modem_wait * HZ)); if (!res) { res = -ETIMEDOUT; pr_err("%s: timeout to wait for %s modem: %d\n", __func__, smd_tty[n].smd->port_name, res); goto release_pil; } else if (res < 0) { pr_err("%s: Error waiting for %s modem: %d\n", __func__, smd_tty[n].smd->port_name, res); goto release_pil; } pr_info("%s: DS modem is OK, open smd0..\n", __func__); } #endif } } tasklet_init(&info->tty_tsklt, smd_tty_read, (unsigned long)info); wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND, smd_tty[n].smd->port_name); scnprintf(info->ra_wake_lock_name, MAX_RA_WAKE_LOCK_NAME_LEN, "SMD_TTY_%s_RA", smd_tty[n].smd->port_name); wake_lock_init(&info->ra_wake_lock, WAKE_LOCK_SUSPEND, info->ra_wake_lock_name); res = smd_named_open_on_edge(smd_tty[n].smd->port_name, smd_tty[n].smd->edge, &info->ch, info, smd_tty_notify); if (res < 0) { SMD_TTY_INFO("%s: %s open failed %d\n", __func__, smd_tty[n].smd->port_name, res); goto release_wl_tl; } res = wait_event_interruptible_timeout(info->ch_opened_wait_queue, info->is_open, (2 * HZ)); if (res == 0) res = -ETIMEDOUT; if (res < 0) { SMD_TTY_INFO("%s: wait for %s smd_open failed %d\n", __func__, smd_tty[n].smd->port_name, res); goto close_ch; } SMD_TTY_INFO("%s with PID %u opened port %s", current->comm, current->pid, smd_tty[n].smd->port_name); smd_disable_read_intr(info->ch); mutex_unlock(&smd_tty_lock); return 0; close_ch: smd_close(info->ch); info->ch = NULL; release_wl_tl: tasklet_kill(&info->tty_tsklt); wake_lock_destroy(&info->wake_lock); wake_lock_destroy(&info->ra_wake_lock); release_pil: subsystem_put(info->pil); out: mutex_unlock(&smd_tty_lock); return res; }
static void try_to_suspend(struct work_struct *work) { unsigned int initial_count, final_count; if (!pm_get_wakeup_count(&initial_count, true)) { #ifdef CONFIG_HTC_POWER_DEBUG pr_info("[P] suspend abort, wakeup event nonzero\n"); htc_print_active_wakeup_sources(); #endif goto out; } mutex_lock(&autosleep_lock); if (!pm_save_wakeup_count(initial_count) || system_state != SYSTEM_RUNNING) { #ifdef CONFIG_HTC_POWER_DEBUG pr_info("[P] suspend abort, events not matched or being processed\n"); #endif mutex_unlock(&autosleep_lock); goto out; } if (autosleep_state == PM_SUSPEND_ON) { #ifdef CONFIG_HTC_POWER_DEBUG pr_info("[P] suspend abort, autosleep_state is ON\n"); #endif mutex_unlock(&autosleep_lock); return; } if (autosleep_state >= PM_SUSPEND_MAX) hibernate(); else { smsm_change_state(SMSM_APPS_STATE, HTC_SMSM_APPS_RESUME, HTC_SMSM_APPS_SUSPEND); printk(KERN_DEBUG "Enable garbage filter\n"); #ifdef CONFIG_HTC_POWER_DEBUG pr_info("[R] suspend start\n"); #endif pm_suspend(autosleep_state); } mutex_unlock(&autosleep_lock); if (!pm_get_wakeup_count(&final_count, false)) { #ifdef CONFIG_HTC_POWER_DEBUG pr_info("[R] resume end\n"); #endif smsm_change_state(SMSM_APPS_STATE, HTC_SMSM_APPS_SUSPEND, HTC_SMSM_APPS_RESUME); printk(KERN_DEBUG "Disable garbage filter\n"); goto out; } if (final_count == initial_count) { #ifdef CONFIG_HTC_POWER_DEBUG pr_info("[P] wakeup occured for an unknown reason, wait HZ/2\n"); #endif schedule_timeout_uninterruptible(HZ / 2); } #ifdef CONFIG_HTC_POWER_DEBUG pr_info("[R] resume end\n"); #endif out: queue_up_suspend_work(); }
static void notify_modem_app_reboot(void) { smsm_change_state(SMSM_APPS_STATE, SMSM_APPS_REBOOT, SMSM_APPS_REBOOT); }
static int rpcrouter_smd_remote_close(void) { smsm_change_state(SMSM_APPS_STATE, SMSM_RPCINIT, 0); return smd_close(smd_remote_xprt.channel); }
static int debug_test_smsm(char *buf, int max) { int i = 0; int test_num = 0; int ret; /* Test case 1 - Register new callback for notification */ do { test_num++; SMSM_CB_TEST_INIT(); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 0); /* de-assert SMSM_SMD_INIT to trigger state update */ UT_EQ_INT(smsm_cb_data.cb_count, 0); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 1); UT_EQ_INT(smsm_cb_data.old_state & SMSM_SMDINIT, SMSM_SMDINIT); UT_EQ_INT(smsm_cb_data.new_state & SMSM_SMDINIT, 0x0); UT_EQ_INT((int)smsm_cb_data.data, 0x1234); /* re-assert SMSM_SMD_INIT to trigger state update */ INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 2); UT_EQ_INT(smsm_cb_data.old_state & SMSM_SMDINIT, 0x0); UT_EQ_INT(smsm_cb_data.new_state & SMSM_SMDINIT, SMSM_SMDINIT); /* deregister callback */ ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 2); /* make sure state change doesn't cause any more callbacks */ INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 2); i += scnprintf(buf + i, max - i, "Test %d - PASS\n", test_num); } while (0); /* Test case 2 - Update already registered callback */ do { test_num++; SMSM_CB_TEST_INIT(); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 0); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_INIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 1); /* verify both callback bits work */ INIT_COMPLETION(smsm_cb_completion); UT_EQ_INT(smsm_cb_data.cb_count, 0); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 1); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 2); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 3); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 4); /* deregister 1st callback */ ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 1); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 4); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 5); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 6); /* deregister 2nd callback */ ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_INIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 2); /* make sure state change doesn't cause any more callbacks */ INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT); UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 6); i += scnprintf(buf + i, max - i, "Test %d - PASS\n", test_num); } while (0); /* Test case 3 - Two callback registrations with different data */ do { test_num++; SMSM_CB_TEST_INIT(); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 0); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_INIT, smsm_state_cb, (void *)0x3456); UT_EQ_INT(ret, 0); /* verify both callbacks work */ INIT_COMPLETION(smsm_cb_completion); UT_EQ_INT(smsm_cb_data.cb_count, 0); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 1); UT_EQ_INT((int)smsm_cb_data.data, 0x1234); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 2); UT_EQ_INT((int)smsm_cb_data.data, 0x3456); /* cleanup and unregister * degregister in reverse to verify data field is * being used */ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT); ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_INIT, smsm_state_cb, (void *)0x3456); UT_EQ_INT(ret, 2); ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 2); i += scnprintf(buf + i, max - i, "Test %d - PASS\n", test_num); } while (0); return i; }
int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, struct sk_buff *skb, bool is_low) { struct wcn36xx_dxe_ctl *ctl = NULL; struct wcn36xx_dxe_desc *desc = NULL; struct wcn36xx_dxe_ch *ch = NULL; ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch; ctl = ch->head_blk_ctl; ctl->skb = NULL; desc = ctl->desc; /* Set source address of the BD we send */ desc->src_addr_l = ctl->bd_phy_addr; desc->dst_addr_l = ch->dxe_wq; desc->fr_len = sizeof(struct wcn36xx_tx_bd); desc->ctrl = ch->ctrl_bd; wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX"); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ", (char *)desc, sizeof(*desc)); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "BD >>> ", (char *)ctl->bd_cpu_addr, sizeof(struct wcn36xx_tx_bd)); /* Set source address of the SKB we send */ ctl = ctl->next; ctl->skb = skb; desc = ctl->desc; if (ctl->bd_cpu_addr) { /* TODO: Recover from this situation */ wcn36xx_error("bd_cpu_addr cannot be NULL for skb DXE"); return -EINVAL; } desc->src_addr_l = dma_map_single(NULL, ctl->skb->data, ctl->skb->len, DMA_TO_DEVICE); desc->dst_addr_l = ch->dxe_wq; desc->fr_len = ctl->skb->len; /* set dxe descriptor to VALID */ desc->ctrl = ch->ctrl_skb; wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ", (char *)desc, sizeof(*desc)); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ", (char *)ctl->skb->data, ctl->skb->len); /* Move the head of the ring to the next empty descriptor */ ch->head_blk_ctl = ctl->next; /* * When connected and trying to send data frame chip can be in sleep * mode and writing to the register will not wake up the chip. Instead * notify chip about new frame through SMSM bus. */ if (wcn->pw_state == WCN36XX_BMPS) { smsm_change_state(SMSM_APPS_STATE, 0, WCN36XX_SMSM_WLAN_TX_ENABLE); } else { /* indicate End Of Packet and generate interrupt on descriptor * done. */ wcn36xx_dxe_write_register(wcn, ch->reg_ctrl, ch->def_ctrl); } return 0; }
static int smd_tty_open(struct tty_struct *tty, struct file *f) { int res = 0; unsigned int n = tty->index; struct smd_tty_info *info; const char *peripheral = NULL; if (n >= MAX_SMD_TTYS || !smd_tty[n].smd) return -ENODEV; info = smd_tty + n; mutex_lock(&smd_tty_lock); tty->driver_data = info; if (info->open_count++ == 0) { peripheral = smd_edge_to_subsystem(smd_tty[n].smd->edge); if (peripheral) { info->pil = pil_get(peripheral); if (IS_ERR(info->pil)) { res = PTR_ERR(info->pil); goto out; } /* Wait for the modem SMSM to be inited for the SMD * Loopback channel to be allocated at the modem. Since * the wait need to be done atmost once, using msleep * doesn't degrade the performance. */ if (n == LOOPBACK_IDX) { if (!is_modem_smsm_inited()) msleep(5000); smsm_change_state(SMSM_APPS_STATE, 0, SMSM_SMD_LOOPBACK); msleep(100); } /* * Wait for a channel to be allocated so we know * the modem is ready enough. */ if (smd_tty_modem_wait) { res = wait_for_completion_interruptible_timeout( &info->ch_allocated, msecs_to_jiffies(smd_tty_modem_wait * 1000)); if (res == 0) { pr_err("Timed out waiting for SMD" " channel\n"); res = -ETIMEDOUT; goto release_pil; } else if (res < 0) { pr_err("Error waiting for SMD channel:" " %d\n", res); goto release_pil; } res = 0; } } info->tty = tty; tasklet_init(&info->tty_tsklt, smd_tty_read, (unsigned long)info); wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND, smd_tty[n].smd->port_name); if (!info->ch) { res = smd_named_open_on_edge(smd_tty[n].smd->port_name, smd_tty[n].smd->edge, &info->ch, info, smd_tty_notify); if (res < 0) { pr_err("%s: %s open failed %d\n", __func__, smd_tty[n].smd->port_name, res); goto release_pil; } res = wait_event_interruptible_timeout( info->ch_opened_wait_queue, info->is_open, (2 * HZ)); if (res == 0) res = -ETIMEDOUT; if (res < 0) { pr_err("%s: wait for %s smd_open failed %d\n", __func__, smd_tty[n].smd->port_name, res); goto release_pil; } res = 0; } } release_pil: if (res < 0) pil_put(info->pil); else smd_disable_read_intr(info->ch); out: mutex_unlock(&smd_tty_lock); return res; }
static void apps_sleep(void) { uint32_t saved_vector[2]; unsigned int sleep_delay; static uint32_t *msm_pm_reset_vector; uint32_t enter_state; uint32_t enter_wait_set = 0; uint32_t enter_wait_clear = 0; uint32_t exit_state; // uint32_t exit_wait_clear = 0; uint32_t exit_wait_any_set = 0; int ret; int collapsed = 0; enter_state = DEM_SLAVE_SMSM_SLEEP; enter_wait_set = DEM_MASTER_SMSM_SLEEP; exit_state = DEM_SLAVE_SMSM_SLEEP_EXIT; exit_wait_any_set = DEM_MASTER_SMSM_SLEEP_EXIT; msm_irq_enter_sleep1(1, 0); msm_gpio_enter_sleep(0); sleep_delay = 192000*5; /* APPS_SLEEP does not allow infinite timeout */ ret = smsm_set_sleep_duration(sleep_delay); printk("smsm_set_sleep_duration result: %d\n", ret); ret = smsm_change_state(PM_SMSM_WRITE_STATE, PM_SMSM_WRITE_RUN, enter_state); printk("smsm_change_state result: %d\n", ret); msm_irq_enter_sleep2(1, 0); // APPS_SLEEP //ret = smsm_change_state(PM_SMSM_WRITE_STATE, PM_SMSM_WRITE_RUN, enter_state); //printk("smsm_change_state result: %d\n", ret); ret = msm_pm_wait_state(enter_wait_set, enter_wait_clear, 0, 0); printk("msm_pm_wait_state result: %d\n", ret); // msm_enter_prep_hw(); writel(1, A11S_PWRDOWN); writel(4, A11S_SECOP); printk("A11S_PWRDOWN: %.8x\n", readl(A11S_PWRDOWN)); printk("A11S_SECOP: %.8x\n", readl(A11S_SECOP)); printk("msm_sleep(): enter " "A11S_CLK_SLEEP_EN %x, A11S_PWRDOWN %x, " "smsm_get_state %x\n", readl(A11S_CLK_SLEEP_EN), readl(A11S_PWRDOWN), smsm_get_state(PM_SMSM_READ_STATE)); magic_num = 0xAAAA1111; writel(magic_num, HTC_POWER_COLLAPSE_MAGIC_NUM); msm_pm_reset_vector = ioremap(0x0, PAGE_SIZE); // smsm_print_sleep_info(0); saved_vector[0] = msm_pm_reset_vector[0]; saved_vector[1] = msm_pm_reset_vector[1]; msm_pm_reset_vector[0] = 0xE51FF004; /* ldr pc, 4 */ msm_pm_reset_vector[1] = virt_to_phys(msm_pm_collapse_exit); printk(KERN_INFO "msm_sleep(): vector %x %x -> " "%x %x\n", saved_vector[0], saved_vector[1], msm_pm_reset_vector[0], msm_pm_reset_vector[1]); collapsed = msm_pm_collapse(); printk("pmmod: collapsed: %d\n", collapsed); };
static int wcn36xx_msm_smsm_change_state(u32 clear_mask, u32 set_mask) { return smsm_change_state(SMSM_APPS_STATE, clear_mask, set_mask); }
static uint32_t msm_timer_sync_sclk( void (*time_start)(struct msm_timer_sync_data_t *data), bool (*time_expired)(struct msm_timer_sync_data_t *data), void (*update)(struct msm_timer_sync_data_t *data, uint32_t clk_val), struct msm_timer_sync_data_t *data) { /* Time Master State Bits */ #define MASTER_BITS_PER_CPU 1 #define MASTER_TIME_PENDING \ (0x01UL << (MASTER_BITS_PER_CPU * SMSM_APPS_STATE)) /* Time Slave State Bits */ #define SLAVE_TIME_REQUEST 0x0400 #define SLAVE_TIME_POLL 0x0800 #define SLAVE_TIME_INIT 0x1000 uint32_t *smem_clock; uint32_t smem_clock_val; uint32_t state; smem_clock = smem_alloc(SMEM_SMEM_SLOW_CLOCK_VALUE, sizeof(uint32_t)); if (smem_clock == NULL) { printk(KERN_ERR "no smem clock\n"); return 0; } state = smsm_get_state(SMSM_MODEM_STATE); if ((state & SMSM_INIT) == 0) { printk(KERN_ERR "smsm not initialized\n"); return 0; } time_start(data); while ((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) & MASTER_TIME_PENDING) { if (time_expired(data)) { printk(KERN_INFO "get_smem_clock: timeout 1 still " "invalid state %x\n", state); return 0; } } smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_POLL | SLAVE_TIME_INIT, SLAVE_TIME_REQUEST); time_start(data); while (!((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) & MASTER_TIME_PENDING)) { if (time_expired(data)) { printk(KERN_INFO "get_smem_clock: timeout 2 still " "invalid state %x\n", state); smem_clock_val = 0; goto sync_sclk_exit; } } smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST, SLAVE_TIME_POLL); time_start(data); do { smem_clock_val = *smem_clock; } while (smem_clock_val == 0 && !time_expired(data)); state = smsm_get_state(SMSM_TIME_MASTER_DEM); if (smem_clock_val) { if (update != NULL) update(data, smem_clock_val); if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC) printk(KERN_INFO "get_smem_clock: state %x clock %u\n", state, smem_clock_val); } else { printk(KERN_INFO "get_smem_clock: timeout state %x clock %u\n", state, smem_clock_val); } sync_sclk_exit: smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST | SLAVE_TIME_POLL, SLAVE_TIME_INIT); return smem_clock_val; }