void mdp4_dmae_done_dtv(void) { int cndx; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; cndx = 0; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; pr_debug("%s: cpu=%d\n", __func__, smp_processor_id()); spin_lock(&vctrl->spin_lock); if (vctrl->blt_change) { if (pipe->ov_blt_addr) { mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmae_xy(pipe); mdp4_dtv_blt_ov_update(pipe); pipe->blt_ov_done++; vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM); mdp4_stat.kickoff_ov1++; outpdw(MDP_BASE + 0x0008, 0); } vctrl->blt_change = 0; } complete_all(&vctrl->dmae_comp); mdp4_overlay_dma_commit(MDP4_MIXER1); vsync_irq_disable(INTR_DMA_E_DONE, MDP_DMA_E_TERM); spin_unlock(&vctrl->spin_lock); }
/* * mdp4_dma_p_done_lcdc: called from isr */ void mdp4_dmap_done_lcdc(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); if (vctrl->blt_change) { mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; /* Prefill one frame */ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); /* kickoff overlay0 engine */ mdp4_stat.kickoff_ov0++; vctrl->ov_koff++; /* make up for prefill */ outpdw(MDP_BASE + 0x0004, 0); } vctrl->blt_change = 0; } complete_all(&vctrl->dmap_comp); if (mdp_rev <= MDP_REV_41) mdp4_mixer_blend_cfg(MDP4_MIXER0); mdp4_overlay_dma_commit(cndx); spin_unlock(&vctrl->spin_lock); }
static void ipa_mhi_rm_prod_notify(void *user_data, enum ipa_rm_event event, unsigned long data) { IPA_MHI_FUNC_ENTRY(); switch (event) { case IPA_RM_RESOURCE_GRANTED: IPA_MHI_DBG("IPA_RM_RESOURCE_GRANTED\n"); complete_all(&ipa_mhi_ctx->rm_prod_granted_comp); break; case IPA_RM_RESOURCE_RELEASED: IPA_MHI_DBG("IPA_RM_RESOURCE_RELEASED\n"); break; default: IPA_MHI_ERR("unexpected event %d\n", event); WARN_ON(1); break; } IPA_MHI_FUNC_EXIT(); }
/* * mdp4_overlay0_done_dsi: called from isr */ void mdp4_overlay0_done_dsi_video(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) return; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); vctrl->ov_done++; complete_all(&vctrl->ov_comp); if (pipe->ov_blt_addr == 0) { spin_unlock(&vctrl->spin_lock); return; } mdp4_dsi_video_blt_dmap_update(pipe); pipe->dmap_cnt++; spin_unlock(&vctrl->spin_lock); }
int sap_mlme_notifier(struct slsi_dev *sdev, unsigned long event) { int i; struct netdev_vif *ndev_vif; SLSI_INFO_NODEV("Notifier event received %s\n", event ? "SCSC_WIFI_FAILURE_RESET" : "SCSC_WIFI_STOP"); if ((event != SCSC_WIFI_FAILURE_RESET) && (event != SCSC_WIFI_STOP)) { return -EIO; } switch (event) { case SCSC_WIFI_STOP: /* Stop sending signals down */ sdev->mlme_blocked = true; SLSI_INFO_NODEV("MLME BLOCKED\n"); /* cleanup all the VIFs and scan data */ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex); complete_all(&sdev->sig_wait.completion); for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++) if (sdev->netdev[i]) { ndev_vif = netdev_priv(sdev->netdev[i]); slsi_scan_cleanup(sdev, sdev->netdev[i]); SLSI_MUTEX_LOCK(ndev_vif->vif_mutex); slsi_vif_cleanup(sdev, sdev->netdev[i], 0); SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex); } SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex); SLSI_INFO_NODEV("Force cleaned all VIFs\n"); break; case SCSC_WIFI_FAILURE_RESET: break; } return 0; }
/* Perform an expiry operation */ int autofs4_expire_run(struct super_block *sb, struct vfsmount *mnt, struct autofs_sb_info *sbi, struct autofs_packet_expire __user *pkt_p) { struct autofs_packet_expire pkt; struct autofs_info *ino; struct dentry *dentry; int ret = 0; memset(&pkt, 0, sizeof(pkt)); pkt.hdr.proto_version = sbi->version; pkt.hdr.type = autofs_ptype_expire; dentry = autofs4_expire_indirect(sb, mnt, sbi, 0); if (!dentry) return -EAGAIN; pkt.len = dentry->d_name.len; memcpy(pkt.name, dentry->d_name.name, pkt.len); pkt.name[pkt.len] = '\0'; dput(dentry); if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire))) ret = -EFAULT; spin_lock(&sbi->fs_lock); ino = autofs4_dentry_ino(dentry); /* avoid rapid-fire expire attempts if expiry fails */ ino->last_used = now; ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); return ret; }
void mdp4_dmap_done_dsi_cmd(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; int diff; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) return; /* blt enabled */ spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); vctrl->dmap_done++; diff = vctrl->ov_done - vctrl->dmap_done; pr_debug("%s: ov_koff=%d ov_done=%d dmap_koff=%d dmap_done=%d cpu=%d\n", __func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id()); complete_all(&vctrl->dmap_comp); if (diff <= 0) { if (vctrl->blt_wait) vctrl->blt_wait = 0; spin_unlock(&vctrl->spin_lock); return; } /* kick dmap */ mdp4_dsi_cmd_blt_dmap_update(pipe); pipe->dmap_cnt++; mdp4_stat.kickoff_dmap++; vctrl->dmap_koff++; vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */ mb(); spin_unlock(&vctrl->spin_lock); }
static bool fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf) { int i; bool success = false; char *path = __getname(); for (i = 0; i < ARRAY_SIZE(fw_path); i++) { struct file *file; /* skip the unset customized path */ if (!fw_path[i][0]) continue; snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id); file = filp_open(path, O_RDONLY, 0); if (IS_ERR(file)) continue; success = fw_read_file_contents(file, buf); fput(file); if (success) break; } __putname(path); if (success) { dev_dbg(device, "firmware: direct-loading firmware %s\n", buf->fw_id); mutex_lock(&fw_lock); set_bit(FW_STATUS_DONE, &buf->status); complete_all(&buf->completion); mutex_unlock(&fw_lock); } return success; }
/* Perform an expiry operation */ int autofs4_expire_run(struct super_block *sb, struct vfsmount *mnt, struct autofs_sb_info *sbi, struct autofs_packet_expire __user *pkt_p) { struct autofs_packet_expire pkt; struct autofs_info *ino; struct dentry *dentry; int ret = 0; memset(&pkt,0,sizeof pkt); pkt.hdr.proto_version = sbi->version; pkt.hdr.type = autofs_ptype_expire; if ((dentry = autofs4_expire_indirect(sb, mnt, sbi, 0)) == NULL) return -EAGAIN; pkt.len = dentry->d_name.len; memcpy(pkt.name, dentry->d_name.name, pkt.len); pkt.name[pkt.len] = '\0'; dput(dentry); if ( copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)) ) ret = -EFAULT; spin_lock(&sbi->fs_lock); ino = autofs4_dentry_ino(dentry); ino->flags &= ~AUTOFS_INF_EXPIRING; if (!d_unhashed(dentry)) managed_dentry_clear_transit(dentry); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); return ret; }
int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt, struct autofs_sb_info *sbi, int when) { struct dentry *dentry; int ret = -EAGAIN; if (autofs_type_trigger(sbi->type)) dentry = autofs4_expire_direct(sb, mnt, sbi, when); else dentry = autofs4_expire_indirect(sb, mnt, sbi, when); if (dentry) { struct autofs_info *ino = autofs4_dentry_ino(dentry); /* This is synchronous because it makes the daemon a little easier */ ret = autofs4_wait(sbi, dentry, NFY_EXPIRE); spin_lock(&sbi->fs_lock); ino->flags &= ~AUTOFS_INF_EXPIRING; spin_lock(&dentry->d_lock); if (!ret) { if ((IS_ROOT(dentry) || (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent))) && !(dentry->d_flags & DCACHE_NEED_AUTOMOUNT)) __managed_dentry_set_automount(dentry); } spin_unlock(&dentry->d_lock); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); dput(dentry); } return ret; }
/*! * This function sets the internal RTC time based on tm in Gregorian date. * * @param tm the time value to be set in the RTC * * @return 0 if successful; non-zero otherwise. */ static int mxc_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct rtc_drv_data *pdata = dev_get_drvdata(dev); void __iomem *ioaddr = pdata->ioaddr; unsigned long time; u64 old_time_47bit, new_time_47bit; int ret; ret = rtc_tm_to_time(tm, &time); if (ret != 0) return ret; old_time_47bit = (((u64) __raw_readl(ioaddr + SRTC_LPSCMR)) << 32 | ((u64) __raw_readl(ioaddr + SRTC_LPSCLR))); old_time_47bit >>= SRTC_LPSCLR_LLPSC_LSH; __raw_writel(time, ioaddr + SRTC_LPSCMR); rtc_write_sync_lp(ioaddr); new_time_47bit = (((u64) __raw_readl(ioaddr + SRTC_LPSCMR)) << 32 | ((u64) __raw_readl(ioaddr + SRTC_LPSCLR))); new_time_47bit >>= SRTC_LPSCLR_LLPSC_LSH; /* update the difference between previous time and new time */ time_diff = new_time_47bit - old_time_47bit; /* signal all waiting threads that time changed */ complete_all(&srtc_completion); /* allow signalled threads to handle the time change notification */ schedule(); /* reinitialize completion variable */ INIT_COMPLETION(srtc_completion); return 0; }
/* * mdp4_overlay0_done_mddi: called from isr */ void mdp4_overlay0_done_mddi(struct mdp_dma_data *dma) { mdp_disable_irq_nosync(MDP_OVERLAY0_TERM); dma->busy = FALSE; #ifdef CONFIG_SHLCDC_BOARD if (pending_pipe) complete_all(&pending_pipe->comp); #else /*CONFIG_SHLCDC_BOARD*/ complete(&dma->comp); #endif /* CONFIG_SHLCDC_BOARD */ mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE); if (busy_wait_cnt) busy_wait_cnt--; pr_debug("%s: ISR-done\n", __func__); if (mddi_pipe->blt_addr) { if (mddi_pipe->blt_cnt == 0) { mdp4_overlayproc_cfg(mddi_pipe); mdp4_overlay_dmap_xy(mddi_pipe); mddi_pipe->ov_cnt = 0; mddi_pipe->dmap_cnt = 0; /* BLT start from next frame */ } else { mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp4_blt_xy_update(mddi_pipe); outpdw(MDP_BASE + 0x000c, 0x0); /* start DMAP */ } mddi_pipe->blt_cnt++; mddi_pipe->ov_cnt++; } }
void mdp4_dmap_done_dsi_video(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); if (vctrl->blt_change && mdp_ov0_blt_ctl == MDP4_BLT_SWITCH_TG_ON_ISR) { mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); if (pipe->ov_blt_addr) { mdp4_dsi_video_blt_ov_update(pipe); pipe->ov_cnt++; vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mdp4_stat.kickoff_ov0++; vctrl->ov_koff++; outpdw(MDP_BASE + 0x0004, 0); } vctrl->blt_change = 0; } complete_all(&vctrl->dmap_comp); mdp4_overlay_dma_commit(cndx); spin_unlock(&vctrl->spin_lock); }
int mdp_lcdc_off(struct platform_device *pdev) { int ret = 0; struct msm_fb_data_type *mfd; uint32 timer_base = LCDC_BASE; uint32 block = MDP_DMA2_BLOCK; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); #ifdef CONFIG_FB_MSM_MDP40 if (mfd->panel.type == HDMI_PANEL) { block = MDP_DMA_E_BLOCK; timer_base = DTV_BASE; } #endif mdp_histogram_ctrl_all(FALSE); down(&mfd->dma->mutex); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); MDP_OUTP(MDP_BASE + timer_base, 0); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mdp_pipe_ctrl(block, MDP_BLOCK_POWER_OFF, FALSE); ret = panel_next_off(pdev); up(&mfd->dma->mutex); atomic_set(&vsync_cntrl.suspend, 1); atomic_set(&vsync_cntrl.vsync_resume, 0); complete_all(&vsync_cntrl.vsync_wait); msleep(16); return ret; }
/* * mdp4_overlay0_done_lcdc: called from isr */ void mdp4_overlay0_done_lcdc(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); vctrl->ov_done++; complete_all(&vctrl->ov_comp); if (pipe->ov_blt_addr == 0) { spin_unlock(&vctrl->spin_lock); return; } if (mdp_rev <= MDP_REV_41) mdp4_mixer_blend_cfg(MDP4_MIXER0); mdp4_lcdc_blt_dmap_update(pipe); pipe->dmap_cnt++; spin_unlock(&vctrl->spin_lock); }
/* MSC, RCP, RAP messages - mandatory for compliance */ static void mhl_cbus_isr(void) { uint8_t regval; int req_done = FALSE; uint8_t sub_cmd = 0x0; uint8_t cmd_data = 0x0; int msc_msg_recved = FALSE; int rc = -1; regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x08); if (regval == 0xff) return; /* * clear all interrupts that were raised * even if we did not process */ if (regval) mhl_i2c_reg_write(TX_PAGE_CBUS, 0x08, regval); pr_debug("%s: CBUS_INT = %02x\n", __func__, regval); /* MSC_MSG (RCP/RAP) */ if (regval & BIT3) { sub_cmd = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x18); cmd_data = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x19); msc_msg_recved = TRUE; } /* MSC_MT_ABRT/MSC_MR_ABRT/DDC_ABORT */ if (regval & (BIT6 | BIT5 | BIT2)) mhl_cbus_process_errors(regval); /* MSC_REQ_DONE */ if (regval & BIT4) req_done = TRUE; /* Now look for interrupts on CBUS_MSC_INT2 */ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x1E); /* clear all interrupts that were raised */ /* even if we did not process */ if (regval) mhl_i2c_reg_write(TX_PAGE_CBUS, 0x1E, regval); pr_debug("%s: CBUS_MSC_INT2 = %02x\n", __func__, regval); /* received SET_INT */ if (regval & BIT2) { uint8_t intr; intr = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xA0); mhl_msc_recv_set_int(0, intr); pr_debug("%s: MHL_INT_0 = %02x\n", __func__, intr); intr = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xA1); mhl_msc_recv_set_int(1, intr); pr_debug("%s: MHL_INT_1 = %02x\n", __func__, intr); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xA0, 0xFF); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xA1, 0xFF); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xA2, 0xFF); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xA3, 0xFF); } /* received WRITE_STAT */ if (regval & BIT3) { uint8_t stat; stat = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xB0); mhl_msc_recv_write_stat(0, stat); pr_debug("%s: MHL_STATUS_0 = %02x\n", __func__, stat); stat = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xB1); mhl_msc_recv_write_stat(1, stat); pr_debug("%s: MHL_STATUS_1 = %02x\n", __func__, stat); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xB0, 0xFF); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xB1, 0xFF); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xB2, 0xFF); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xB3, 0xFF); } /* received MSC_MSG */ if (msc_msg_recved) { /*mhl msc recv msc msg*/ rc = mhl_msc_recv_msc_msg(sub_cmd, cmd_data); if (rc) pr_err("MHL: mhl msc recv msc msg failed(%d)!\n", rc); } /* complete last command */ if (req_done) complete_all(&mhl_msm_state->msc_cmd_done); return; }
int mdp4_dsi_cmd_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; int undx; int need_wait, cnt; unsigned long flags; pr_debug("%s+: pid=%d\n", __func__, current->pid); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return ret; } need_wait = 0; mutex_lock(&vctrl->update_lock); atomic_set(&vctrl->suspend, 1); complete_all(&vctrl->vsync_comp); pr_debug("%s: clk=%d pan=%d\n", __func__, vctrl->clk_enabled, vctrl->pan_display); if (vctrl->clk_enabled) need_wait = 1; mutex_unlock(&vctrl->update_lock); cnt = 0; if (need_wait) { while (vctrl->clk_enabled) { msleep(20); cnt++; if (cnt > 10) break; } } if (cnt > 10) { spin_lock_irqsave(&vctrl->spin_lock, flags); vctrl->clk_control = 0; vctrl->clk_enabled = 0; vctrl->expire_tick = 0; spin_unlock_irqrestore(&vctrl->spin_lock, flags); mipi_dsi_clk_cfg(0); mdp_clk_ctrl(0); pr_err("%s: Error, SET_CLK_OFF by force\n", __func__); } /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; if (vctrl->vsync_enabled) { vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); vctrl->vsync_enabled = 0; } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ vp->update_cnt = 0; /* empty queue */ } pr_debug("%s-:\n", __func__); return ret; }
int mdp4_dsi_video_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; unsigned long flags; int undx, need_wait = 0; #if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT) \ || defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT) \ || defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT_PANEL) int retry_cnt = 0; #endif printk(KERN_INFO "[LCD][DEBUG] %s is started.. \n", __func__); #if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT) \ || defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT) \ || defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT_PANEL) do { ret = mipi_lgit_lcd_off(pdev); if (ret < 0) { panel_next_off(pdev); msleep(2); panel_next_on(pdev); msleep(5); retry_cnt++; } else { // if upper routine is successed, need to initialize ret variable. ret = 0; break; } } while(retry_cnt < 10); printk(KERN_INFO "[LCD][DEBUG] %s : mipi_lgit_lcd_off retry_cnt = %d\n", __func__, retry_cnt); #endif mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 0); msleep(20); /* >= 17 ms */ if (vctrl->wait_vsync_cnt) { complete_all(&vctrl->vsync_comp); vctrl->wait_vsync_cnt = 0; } if (pipe->ov_blt_addr) { spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) need_wait = 1; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_wait) mdp4_dsi_video_wait4ov(0); } mdp_histogram_ctrl_all(FALSE); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0); dsi_video_enabled = 0; if (vctrl->vsync_irq_enabled) { vctrl->vsync_irq_enabled = 0; vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ vp->update_cnt = 0; /* empty queue */ } mutex_lock(&mfd->dma->ov_mutex); if (pipe) { /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* base pipe may change after borderfill_stage_down */ pipe = vctrl->base_pipe; mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe, 1); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(vctrl->base_pipe, 1); mdp4_overlay_iommu_pipe_free( vctrl->base_pipe->pipe_ndx, 1); } } /* mdp clock off */ mdp_clk_ctrl(0); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mutex_unlock(&mfd->dma->ov_mutex); printk(KERN_INFO "[LCD][DEBUG] %s is ended.. \n", __func__); return ret; }
/* * mdp4_dma_p_done_lcdc: called from isr */ void mdp4_dma_p_done_lcdc(void) { complete_all(&lcdc_comp); }
static int boxer_panel_enable_lcd(struct omap_dss_device *dssdev) { complete_all(&panel_on); //omap_pm_set_min_bus_tput(&dssdev->dev, OCP_INITIATOR_AGENT,166 * 1000 * 4); return 0; }
void mdp4_external_vsync_dtv() { complete_all(&dtv_comp); }
int mdp4_lcdc_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; int need_wait = 0; pr_err("[QC_DEBUG] %s Entry\n", __func__); mdp4_mixer_late_commit(); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); #if defined (CONFIG_EUR_MODEL_GT_I9210) mutex_lock(&mfd->dma->ov_mutex); #endif vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; #if defined (CONFIG_EUR_MODEL_GT_I9210) pr_err("[QC_DEBUG] vctrl->wait_vsync_cnt: %d\n", vctrl->wait_vsync_cnt); mdp4_lcdc_wait4vsync(cndx); atomic_set(&vctrl->vsync_resume, 0); #else atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 0); msleep(20); /* >= 17 ms */ #endif complete_all(&vctrl->vsync_comp); if (pipe->ov_blt_addr) { spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) need_wait = 1; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_wait) mdp4_lcdc_wait4ov(0); } #if !defined (CONFIG_EUR_MODEL_GT_I9210) MDP_OUTP(MDP_BASE + LCDC_BASE, 0); #endif lcdc_enabled = 0; mdp_histogram_ctrl_all(FALSE); if (pipe) { /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(vctrl->base_pipe, 1); mdp4_overlay_iommu_pipe_free( vctrl->base_pipe->pipe_ndx, 1); } } #if defined (CONFIG_EUR_MODEL_GT_I9210) mdp4_lcdc_tg_off(vctrl); atomic_set(&vctrl->suspend, 1); #endif /* MDP clock disable */ mdp_clk_ctrl(0); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); #if defined (CONFIG_EUR_MODEL_GT_I9210) mutex_unlock(&mfd->dma->ov_mutex); pr_err("[QC_DEBUG] %s Exit\n", __func__); #endif return ret; }
int xixfs_ResourceThreadFunction( void *lpParameter ) { PXIXFS_LINUX_VCB pVCB = NULL; PXIXFS_LINUX_META_CTX pCtx = NULL; PXIXCORE_META_CTX xixcoreCtx = NULL; int RC =0; #if LINUX_VERSION_25_ABOVE int TimeOut; #endif unsigned long flags; DebugTrace(DEBUG_LEVEL_TRACE, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO ), ("Enter xixfs_ResourceThreadFunction .\n")); #if defined(NDAS_ORG2423) || defined(NDAS_SIGPENDING_OLD) spin_lock_irqsave(¤t->sigmask_lock, flags); siginitsetinv(¤t->blocked, sigmask(SIGKILL)|sigmask(SIGTERM)); recalc_sigpending(current); spin_unlock_irqrestore(¤t->sigmask_lock, flags); #else spin_lock_irqsave(¤t->sighand->siglock, flags); siginitsetinv(¤t->blocked, sigmask(SIGKILL)|sigmask(SIGTERM)); recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); #endif #if LINUX_VERSION_25_ABOVE daemonize("XixMetaThread"); #else daemonize(); #endif pCtx = (PXIXFS_LINUX_META_CTX)lpParameter; XIXCORE_ASSERT(pCtx); pVCB = pCtx->VCBCtx; XIXFS_ASSERT_VCB(pVCB); xixcoreCtx = &pVCB->XixcoreVcb.MetaContext; while(1){ if(signal_pending(current)) { flush_signals(current); } #if LINUX_VERSION_25_ABOVE TimeOut = DEFAULT_XIXFS_UPDATEWAIT; RC = wait_event_timeout(pCtx->VCBMetaEvent, XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_MASK), TimeOut); #else mod_timer(&(pCtx->VCBMetaTimeOut), jiffies+ 180*HZ); wait_event(pCtx->VCBMetaEvent, XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_MASK)); #endif DebugTrace(DEBUG_LEVEL_TRACE, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO ), ("!!!!! Wake up HELLOE ResourceThreadFunction .\n")); //printk(KERN_DEBUG "!!!!! Wake UP HELLOE ResourceThreadFunction .\n"); spin_lock(&(pCtx->MetaLock)); //DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK, // ("spin_lock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx )); #if LINUX_VERSION_25_ABOVE if(RC == 0 ) { #else if(XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_TIMEOUT)) { XIXCORE_CLEAR_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_TIMEOUT); #endif DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), ("Request Call timeout : xixfs_ResourceThreadFunction .\n")); spin_unlock(&(pCtx->MetaLock)); //DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK, // ("spin_unlock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx )); if(XIXCORE_TEST_FLAGS(xixcoreCtx->ResourceFlag, XIXCORE_META_RESOURCE_NEED_UPDATE)){ XIXCORE_CLEAR_FLAGS(xixcoreCtx->ResourceFlag, XIXCORE_META_RESOURCE_NEED_UPDATE); RC = xixfs_UpdateMetaData(pCtx); if( RC <0 ) { DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), ("fail(0x%x) xixfs_ResourceThreadFunction --> xixfs_UpdateMetaData .\n", RC)); } } #if LINUX_VERSION_25_ABOVE continue; }else if(XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_UPDATE)) { #else } if(XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_UPDATE)) { #endif XIXCORE_CLEAR_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_UPDATE); spin_unlock(&(pCtx->MetaLock)); //DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK, // ("spin_unlock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx )); if(XIXCORE_TEST_FLAGS(xixcoreCtx->ResourceFlag, XIXCORE_META_RESOURCE_NEED_UPDATE)){ XIXCORE_CLEAR_FLAGS(xixcoreCtx->ResourceFlag, XIXCORE_META_RESOURCE_NEED_UPDATE); RC = xixfs_UpdateMetaData(pCtx); if( RC <0 ) { DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), ("fail(0x%x) xixfs_ResourceThreadFunction --> xixfs_UpdateMetaData .\n", RC)); } } xixfs_wakeup_resource_waiter(pCtx); continue; }else if(XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_KILL_THREAD)) { XIXCORE_CLEAR_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_RECHECK_RESOURCES); XIXCORE_SET_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_INSUFFICIENT_RESOURCES); spin_unlock(&(pCtx->MetaLock)); //DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK, // ("spin_unlock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx )); DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), ("Stop Thread : xixfs_ResourceThreadFunction .\n")); xixfs_wakeup_resource_waiter(pCtx); #if LINUX_VERSION_25_ABOVE complete_all(&(pCtx->VCBMetaThreadStopCompletion)); #else del_timer(&(pCtx->VCBMetaTimeOut)); xixfs_wakeup_metaThread_stop_waiter(pCtx); #endif break; }else if( XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_RECHECK_RESOURCES)){ spin_unlock(&(pCtx->MetaLock)); //DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK, // ("spin_unlock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx )); DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), ("get more resource : xixfs_ResourceThreadFunction .\n")); RC = xixfs_GetMoreCheckOutLotMap(pCtx); DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), ("End xixfs_GetMoreCheckOutLotMap .\n")); if( RC <0 ) { DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), ("fail(0x%x) xixfs_ResourceThreadFunction --> xixfs_GetMoreCheckOutLotMap .\n", RC)); }else { spin_lock(&(pCtx->MetaLock)); //DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK, // ("spin_lock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx )); XIXCORE_CLEAR_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_RECHECK_RESOURCES); spin_unlock(&(pCtx->MetaLock)); //DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK, // ("spin_unlock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx )); DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), ("WAKE UP WAITING THREAD!! .\n")); xixfs_wakeup_resource_waiter(pCtx); } continue; }else { DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), ("Request Call Unrecognized : xixfs_ResourceThreadFunction .\n")); spin_unlock(&(pCtx->MetaLock)); //DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK, // ("spin_unlock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx )); } } DebugTrace(DEBUG_LEVEL_TRACE, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO ), ("Exit xixfs_ResourceThreadFunction .\n")); return 0; }
/* * mdp4_overlay0_done_lcdc: called from isr */ void mdp4_overlay0_done_lcdc(void) { complete_all(&lcdc_comp); }
/* * mdp4_primary_vsync_lcdc: called from isr */ void mdp4_primary_vsync_lcdc(void) { complete_all(&lcdc_comp); }
/* * mdp4_dma_p_done_dsi_video: called from isr */ void mdp4_dma_p_done_dsi_video(void) { complete_all(&dsi_video_comp); }
int mdp4_lcdc_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; int need_wait = 0; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 0); msleep(20); /* >= 17 ms */ complete_all(&vctrl->vsync_comp); if (pipe->ov_blt_addr) { spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) need_wait = 1; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_wait) mdp4_lcdc_wait4ov(0); } MDP_OUTP(MDP_BASE + LCDC_BASE, 0); lcdc_enabled = 0; mdp_histogram_ctrl_all(FALSE); if (pipe) { /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* base pipe may change after borderfill_stage_down */ pipe = vctrl->base_pipe; mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(vctrl->base_pipe, 1); mdp4_overlay_iommu_pipe_free( vctrl->base_pipe->pipe_ndx, 1); } } /* MDP clock disable */ mdp_clk_ctrl(0); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); return ret; }
void *pil_get(const char *name) { int ret; struct pil_device *pil; struct pil_device *pil_d; void *retval; #ifdef CONFIG_MSM8960_ONLY static int modem_initialized = 0; int loop_count = 0; #endif if (!name) return NULL; pil = retval = find_peripheral(name); if (!pil) return ERR_PTR(-ENODEV); if (!try_module_get(pil->owner)) { put_device(&pil->dev); return ERR_PTR(-ENODEV); } pil_d = pil_get(pil->desc->depends_on); if (IS_ERR(pil_d)) { retval = pil_d; goto err_depends; } #ifdef CONFIG_MSM8960_ONLY if (!strcmp("modem", name)) { while (unlikely(!modem_initialized && strcmp("rmt_storage", current->comm) && loop_count++ < 10)) { printk("%s: %s(%d) waiting for rmt_storage %d\n", __func__, current->comm, current->pid, loop_count); msleep(500); } } #endif mutex_lock(&pil->lock); if (!pil->count) { if (!strcmp("modem", name)) { printk("%s: %s(%d) for %s\n", __func__, current->comm, current->pid, name); #ifdef CONFIG_MSM8960_ONLY modem_initialized = 1; #endif } ret = load_image(pil); if (ret) { retval = ERR_PTR(ret); goto err_load; } } pil->count++; pil_set_state(pil, PIL_ONLINE); mutex_unlock(&pil->lock); #if defined(CONFIG_MSM8930_ONLY) if (!strcmp("modem", name)) { complete_all(&pil_work_finished); } #elif defined(CONFIG_ARCH_APQ8064) complete_all(&pil_work_finished); #endif out: return retval; err_load: mutex_unlock(&pil->lock); pil_put(pil_d); err_depends: put_device(&pil->dev); module_put(pil->owner); goto out; }
/* * Note: assumes caller has acquired <msm_rpm_irq_lock>. * * Return value: * 0: request acknowledgement * 1: notification * 2: spurious interrupt */ static int msm_rpm_process_ack_interrupt(void) { uint32_t ctx_mask_ack; uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0}; ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_CTX_0)); msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_SEL_0), sel_masks_ack, msm_rpm_sel_mask_size); if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) { struct msm_rpm_notification *n; int i; list_for_each_entry(n, &msm_rpm_notifications, list) for (i = 0; i < msm_rpm_sel_mask_size; i++) if (sel_masks_ack[i] & n->sel_masks[i]) { up(&n->sem); break; } msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_SEL_0), msm_rpm_sel_mask_size); msm_rpm_write(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0); /* Ensure the write is complete before return */ mb(); return 1; } if (msm_rpm_request) { int i; *(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack; memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack, sizeof(sel_masks_ack)); for (i = 0; i < msm_rpm_request->count; i++) msm_rpm_request->req[i].value = msm_rpm_read(MSM_RPM_PAGE_ACK, target_enum(msm_rpm_request->req[i].id)); msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_SEL_0), msm_rpm_sel_mask_size); msm_rpm_write(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0); /* Ensure the write is complete before return */ mb(); if (msm_rpm_request->done) complete_all(msm_rpm_request->done); #if defined(CONFIG_PANTECH_DEBUG) #if defined(CONFIG_PANTECH_DEBUG_RPM_LOG) //p14291_121102 pantech_debug_rpm_log(0, msm_rpm_request->req->id, msm_rpm_request->req->value); #endif #endif msm_rpm_request = NULL; return 0; } return 2; }
/* * mdp4_primary_vsync_dsi_video: called from isr */ void mdp4_primary_vsync_dsi_video(void) { complete_all(&dsi_video_comp); }