static int wait_for_vfork_done(struct task_struct *child, struct completion *vfork) { int killed; freezer_do_not_count(); killed = wait_for_completion_killable(vfork); freezer_count(); if (killed) { task_lock(child); child->vfork_done = NULL; task_unlock(child); } put_task_struct(child); return killed; }
static void mdp4_overlay_dtv_wait4_ov_done(struct msm_fb_data_type *mfd) { unsigned long flag; /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY1_TERM); INIT_COMPLETION(dtv_pipe->comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE); mdp_intr_mask |= INTR_OVERLAY1_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&dtv_pipe->comp); spin_lock_irqsave(&mdp_done_lock, flag); mdp_disable_irq(MDP_OVERLAY1_TERM); spin_unlock_irqrestore(&mdp_done_lock, flag); }
void mdp4_overlay_dtv_wait4vsync(void) { unsigned long flag; if (!dtv_enabled) return; /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA_E_TERM); INIT_COMPLETION(dtv_comp); outp32(MDP_INTR_CLEAR, INTR_EXTERNAL_VSYNC); mdp_intr_mask |= INTR_EXTERNAL_VSYNC; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&dtv_comp); mdp_disable_irq(MDP_DMA_E_TERM); }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { down(&mfd->sem); mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->dmas_comp); mfd->ibuf_flushed = TRUE; pending_pipe = pipe; /* start dma_s pipe */ mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&pipe->dmas_comp); pending_pipe = NULL; mdp_disable_irq(MDP_DMA_S_TERM); }
void mdp4_atv_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; unsigned long flag; struct mdp4_overlay_pipe *pipe; if (!mfd->panel_power_on) return; /* no need to power on cmd block since it's lcdc mode */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf += fbi->var.xoffset * bpp + fbi->var.yoffset * fbi->fix.line_length; mutex_lock(&mfd->dma->ov_mutex); pipe = atv_pipe; pipe->srcp0_addr = (uint32) buf; mdp4_overlay_rgb_setup(pipe); mdp4_overlay_reg_flush(pipe, 1); /* rgb2 and mixer1 */ printk(KERN_INFO "mdp4_atv_overlay: pipe=%x ndx=%d\n", (int)pipe, pipe->pipe_ndx); /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY1_TERM); INIT_COMPLETION(atv_pipe->comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE); mdp_intr_mask |= INTR_OVERLAY1_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&atv_pipe->comp); mdp_disable_irq(MDP_OVERLAY1_TERM); mdp4_stat.kickoff_atv++; mutex_unlock(&mfd->dma->ov_mutex); }
void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; if (pipe == NULL) return; spin_lock_irqsave(&mdp_spin_lock, flag); if (mfd->dma->busy == TRUE) { INIT_COMPLETION(pipe->comp); pending_pipe = pipe; } spin_unlock_irqrestore(&mdp_spin_lock, flag); if (pending_pipe != NULL) { /* wait until DMA finishes the current job */ wait_for_completion_killable(&pipe->comp); pending_pipe = NULL; } }
int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx) { struct mlx5_ipsec_command_context *context = ctx; int res; res = wait_for_completion_killable(&context->complete); if (res) { mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n"); return -EINTR; } if (context->status == MLX5_FPGA_IPSEC_SACMD_COMPLETE) res = context->status_code; else res = -EIO; kfree(context); return res; }
void mdp4_mddi_kickoff_ui(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { if (mdp4_overlay_mixer_play(mddi_pipe->mixer_num) > 0) { #ifdef MDDI_TIMER mddi_add_delay_timer(10); #endif atomic_set(&mddi_delay_kickoff_cnt, 1); INIT_COMPLETION(mddi_delay_comp); up(&mfd->dma->ov_sem); wait_for_completion_killable(&mddi_delay_comp); down(&mfd->dma->ov_sem); /* semaphore was re-locked, wait for DMA completion again*/ mdp4_mddi_dma_busy_wait(mfd, pipe); } mdp4_mddi_overlay_kickoff(mfd, pipe); }
void mdp4_writeback_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY2_TERM); INIT_COMPLETION(writeback_pipe->comp); mfd->dma->busy = TRUE; outp32(MDP_INTR_CLEAR, INTR_OVERLAY2_DONE); mdp_intr_mask |= INTR_OVERLAY2_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); wmb(); /* make sure all registers updated */ spin_unlock_irqrestore(&mdp_spin_lock, flag); /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY2_TERM, mfd); wmb(); pr_debug("%s: before ov done interrupt\n", __func__); wait_for_completion_killable(&mfd->dma->comp); }
static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist) { int ret = 0; if (!hist->frame_cnt || (hist->bin_cnt == 0)) return -EINVAL; if ((mdp_rev <= MDP_REV_41 && hist->bin_cnt > MDP_REV41_HIST_MAX_BIN) || (mdp_rev == MDP_REV_42 && hist->bin_cnt > MDP_REV42_HIST_MAX_BIN)) return -EINVAL; mutex_lock(&mdp_hist_mutex); if (!mdp_is_hist_data) { pr_err("%s - histogram not ready\n", __func__); ret = -EPERM; goto error; } if (!mdp_is_hist_start) { pr_err("%s histogram not started\n", __func__); ret = -EPERM; goto error; } mdp_hist_frame_cnt = hist->frame_cnt; mutex_unlock(&mdp_hist_mutex); if (wait_for_completion_killable(&mdp_hist_comp)) { pr_err("%s(): histogram bin collection killed", __func__); return -EINVAL; } mutex_lock(&mdp_hist_mutex); if (mdp_is_hist_data && mdp_is_hist_init) ret = _mdp_copy_hist_data(hist); error: mutex_unlock(&mdp_hist_mutex); return ret; }
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) { int ret; if (!busy) { busy = true; init_completion(&have_data); register_buffer(buf, size); } if (!wait) return 0; ret = wait_for_completion_killable(&have_data); if (ret < 0) return ret; busy = false; return data_avail; }
void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { down(&mfd->sem); mfd->dma->busy = TRUE; INIT_COMPLETION(dsi_cmd_comp); mdp_enable_irq(MDP_OVERLAY0_TERM); /* Kick off overlay engine */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); /* trigger dsi cmd engine */ mipi_dsi_cmd_mdp_sw_trigger(); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&dsi_cmd_comp); mdp_disable_irq(MDP_OVERLAY0_TERM); }
static void mdp4_overlay_dtv_wait4dmae(struct msm_fb_data_type *mfd) { unsigned long flag; if (!dtv_pipe) { pr_debug("%s: no mixer1 base layer pipe allocated!\n", __func__); return; } /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA_E_TERM); INIT_COMPLETION(dtv_pipe->comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_DMA_E_DONE); mdp_intr_mask |= INTR_DMA_E_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&dtv_pipe->comp); mdp_disable_irq(MDP_DMA_E_TERM); }
void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; unsigned long flag; struct mdp4_overlay_pipe *pipe; if (!mfd->panel_power_on) return; bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf += fbi->var.xoffset * bpp + fbi->var.yoffset * fbi->fix.line_length; mutex_lock(&mfd->dma->ov_mutex); pipe = lcdc_pipe; pipe->srcp0_addr = (uint32) buf; mdp4_overlay_rgb_setup(pipe); mdp4_overlay_reg_flush(pipe, 1); spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY0_TERM); INIT_COMPLETION(lcdc_pipe->comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE); mdp_intr_mask |= INTR_OVERLAY0_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&lcdc_pipe->comp); mdp_disable_irq(MDP_OVERLAY0_TERM); mdp4_stat.kickoff_lcdc++; mutex_unlock(&mfd->dma->ov_mutex); }
void mdp4_overlay_lcdc_wait4vsync(struct msm_fb_data_type *mfd) { unsigned long flag; /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */ INIT_COMPLETION(lcdc_comp); if (mfd->dma->waiting == FALSE) mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_PRIMARY_VSYNC); mdp_intr_mask |= INTR_PRIMARY_VSYNC; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); if( !mfd->panel_power_on ) { mfd->dma->waiting = FALSE; mdp_disable_irq(MDP_DMA2_TERM); return; } wait_for_completion_killable(&lcdc_comp); mdp_disable_irq(MDP_DMA2_TERM); }
void mdp_dma2_update(struct msm_fb_data_type *mfd) #endif { unsigned long flag; down(&mfd->dma->mutex); if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) { down(&mfd->sem); mfd->ibuf_flushed = TRUE; mdp_dma2_update_lcd(mfd); spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA2_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(mfd->dma->comp); spin_unlock_irqrestore(&mdp_spin_lock, flag); /* schedule DMA to start */ mdp_dma_schedule(mfd, MDP_DMA2_TERM); up(&mfd->sem); /* wait until DMA finishes the current job */ /* LGE_CHANGE * Add this code for screen update when dma completion is failed. * 2012-03-06, [email protected] */ if(wait_for_completion_killable(&mfd->dma->comp)< 0) goto out; mdp_disable_irq(MDP_DMA2_TERM); /* signal if pan function is waiting for the update completion */ if (mfd->pan_waiting) { mfd->pan_waiting = FALSE; complete(&mfd->pan_comp); } } out: up(&mfd->dma->mutex); }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { struct msm_fb_panel_data *pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data; down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; if (pdata->power_on_panel_at_pan) { INIT_COMPLETION(pipe->comp); pending_pipe = pipe; } /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); if (pdata->power_on_panel_at_pan) { wait_for_completion_killable(&pipe->comp); pending_pipe = NULL; } }
static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist) { int ret = 0; if (!hist->frame_cnt || (hist->bin_cnt == 0)) return -EINVAL; if ((mdp_rev <= MDP_REV_41 && hist->bin_cnt > MDP_REV41_HIST_MAX_BIN) || (mdp_rev == MDP_REV_42 && hist->bin_cnt > MDP_REV42_HIST_MAX_BIN)) return -EINVAL; mutex_lock(&mdp_hist_mutex); if (!mdp_is_hist_data) { ret = -EINVAL; goto error; } if (!mdp_is_hist_start) { printk(KERN_ERR "%s histogram not started\n", __func__); ret = -EPERM; goto error; } INIT_COMPLETION(mdp_hist_comp); mdp_hist_frame_cnt = hist->frame_cnt; mutex_unlock(&mdp_hist_mutex); wait_for_completion_killable(&mdp_hist_comp); mutex_lock(&mdp_hist_mutex); if (mdp_is_hist_data) ret = _mdp_copy_hist_data(hist); error: mutex_unlock(&mdp_hist_mutex); return ret; }
void mdp_dma2_update(struct msm_fb_data_type *mfd) #endif { unsigned long flag; if (!mfd) return; down(&mfd->dma->mutex); if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) { down(&mfd->sem); mfd->ibuf_flushed = TRUE; mdp_dma2_update_lcd(mfd); spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA2_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(mfd->dma->comp); spin_unlock_irqrestore(&mdp_spin_lock, flag); /* schedule DMA to start */ mdp_dma_schedule(mfd, MDP_DMA2_TERM); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&mfd->dma->comp); mdp_disable_irq(MDP_DMA2_TERM); /* signal if pan function is waiting for the update completion */ if (mfd->pan_waiting) { mfd->pan_waiting = FALSE; complete(&mfd->pan_comp); } } up(&mfd->dma->mutex); }
void mdp_dsi_video_update(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; unsigned long flag; int irq_block = MDP_DMA2_TERM; if (!mfd->panel_power_on) return; down(&mfd->dma->mutex); bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf += calc_fb_offset(mfd, fbi, bpp); /* no need to power on cmd block since it's dsi mode */ /* starting address */ MDP_OUTP(MDP_BASE + DMA_P_BASE + 0x8, (uint32) buf); /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(irq_block); INIT_COMPLETION(mfd->dma->comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, LCDC_FRAME_START); mdp_intr_mask |= LCDC_FRAME_START; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&mfd->dma->comp); mdp_disable_irq(irq_block); up(&mfd->dma->mutex); }
void mdp_lcdc_update(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; unsigned long flag; if (!mfd->panel_power_on) return; /* no need to power on cmd block since it's lcdc mode */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf += (fbi->var.xoffset + fbi->var.yoffset * fbi->var.xres_virtual) * bpp; /* starting address */ MDP_OUTP(MDP_BASE + 0x90008, (uint32) buf); /* enable LCDC irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA2_TERM); INIT_COMPLETION(mfd->dma->comp); mfd->dma->waiting = TRUE; #ifdef CONFIG_FB_MSM_MDP40 outp32(MDP_INTR_CLEAR, INTR_DMA_P_DONE); mdp_intr_mask |= INTR_DMA_P_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); #else outp32(MDP_INTR_CLEAR, LCDC_FRAME_START); mdp_intr_mask |= LCDC_FRAME_START; outp32(MDP_INTR_ENABLE, mdp_intr_mask); #endif spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&mfd->dma->comp); mdp_disable_irq(MDP_DMA2_TERM); }
void mdp_dma2_update(struct msm_fb_data_type *mfd) #endif { unsigned long flag; static int first_vsync; int need_wait = 0; down(&mfd->dma->mutex); if ((mfd) && (mfd->panel_power_on)) { down(&mfd->sem); spin_lock_irqsave(&mdp_spin_lock, flag); if (mfd->dma->busy == TRUE) need_wait++; spin_unlock_irqrestore(&mdp_spin_lock, flag); if (need_wait) wait_for_completion_killable(&mfd->dma->comp); #if defined (CONFIG_MACH_KYLEPLUS_CTC) /* wait until Vsync finishes the current job */ if (first_vsync) { if (!wait_for_completion_killable_timeout (&vsync_cntrl.vsync_comp, HZ/10)) pr_err("Timedout DMA %s %d", __func__, __LINE__); } else { first_vsync = 1; } #endif /* schedule DMA to start */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mfd->ibuf_flushed = TRUE; mdp_dma2_update_lcd(mfd); spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA2_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(mfd->dma->comp); INIT_COMPLETION(vsync_cntrl.vsync_comp); if (!vsync_cntrl.vsync_irq_enabled && vsync_cntrl.disabled_clocks) { MDP_OUTP(MDP_BASE + 0x021c, 0x10); /* read pointer */ outp32(MDP_INTR_CLEAR, MDP_PRIM_RDPTR); mdp_intr_mask |= MDP_PRIM_RDPTR; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_enable_irq(MDP_VSYNC_TERM); vsync_cntrl.vsync_dma_enabled = 1; } spin_unlock_irqrestore(&mdp_spin_lock, flag); /* schedule DMA to start */ mdp_dma_schedule(mfd, MDP_DMA2_TERM); up(&mfd->sem); #ifndef CONFIG_MACH_KYLEPLUS_CTC /* wait until Vsync finishes the current job */ if (first_vsync) { if (!wait_for_completion_killable_timeout (&vsync_cntrl.vsync_comp, HZ/10)) pr_err("Timedout DMA %s %d", __func__, __LINE__); } else { first_vsync = 1; } #endif mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); /* signal if pan function is waiting for the update completion */ if (mfd->pan_waiting) { mfd->pan_waiting = FALSE; complete(&mfd->pan_comp); } } up(&mfd->dma->mutex); }
static void mdp4_overlay_dtv_wait4_ov_done(struct msm_fb_data_type *mfd) { wait_for_completion_killable(&dtv_pipe->comp); mdp_disable_irq(MDP_OVERLAY1_TERM); }
static int hil_dev_connect(struct serio *serio, struct serio_driver *drv) { struct hil_dev *dev; struct input_dev *input_dev; uint8_t did, *idd; int error; dev = kzalloc(sizeof(*dev), GFP_KERNEL); input_dev = input_allocate_device(); if (!dev || !input_dev) { error = -ENOMEM; goto bail0; } dev->serio = serio; dev->dev = input_dev; error = serio_open(serio, drv); if (error) goto bail0; serio_set_drvdata(serio, dev); /* Get device info. MLC driver supplies devid/status/etc. */ init_completion(&dev->cmd_done); serio_write(serio, 0); serio_write(serio, 0); serio_write(serio, HIL_PKT_CMD >> 8); serio_write(serio, HIL_CMD_IDD); error = wait_for_completion_killable(&dev->cmd_done); if (error) goto bail1; init_completion(&dev->cmd_done); serio_write(serio, 0); serio_write(serio, 0); serio_write(serio, HIL_PKT_CMD >> 8); serio_write(serio, HIL_CMD_RSC); error = wait_for_completion_killable(&dev->cmd_done); if (error) goto bail1; init_completion(&dev->cmd_done); serio_write(serio, 0); serio_write(serio, 0); serio_write(serio, HIL_PKT_CMD >> 8); serio_write(serio, HIL_CMD_RNM); error = wait_for_completion_killable(&dev->cmd_done); if (error) goto bail1; init_completion(&dev->cmd_done); serio_write(serio, 0); serio_write(serio, 0); serio_write(serio, HIL_PKT_CMD >> 8); serio_write(serio, HIL_CMD_EXD); error = wait_for_completion_killable(&dev->cmd_done); if (error) goto bail1; did = dev->idd[0]; idd = dev->idd + 1; switch (did & HIL_IDD_DID_TYPE_MASK) { case HIL_IDD_DID_TYPE_KB_INTEGRAL: case HIL_IDD_DID_TYPE_KB_ITF: case HIL_IDD_DID_TYPE_KB_RSVD: case HIL_IDD_DID_TYPE_CHAR: if (HIL_IDD_NUM_BUTTONS(idd) || HIL_IDD_NUM_AXES_PER_SET(*idd)) { printk(KERN_INFO PREFIX "combo devices are not supported.\n"); goto bail1; } dev->is_pointer = false; hil_dev_keyboard_setup(dev); break; case HIL_IDD_DID_TYPE_REL: case HIL_IDD_DID_TYPE_ABS: dev->is_pointer = true; hil_dev_pointer_setup(dev); break; default: goto bail1; } input_dev->id.bustype = BUS_HIL; input_dev->id.vendor = PCI_VENDOR_ID_HP; input_dev->id.product = 0x0001; /* TODO: get from kbd->rsc */ input_dev->id.version = 0x0100; /* TODO: get from kbd->rsc */ input_dev->dev.parent = &serio->dev; if (!dev->is_pointer) { serio_write(serio, 0); serio_write(serio, 0); serio_write(serio, HIL_PKT_CMD >> 8); /* Enable Keyswitch Autorepeat 1 */ serio_write(serio, HIL_CMD_EK1); /* No need to wait for completion */ }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { #ifdef MDP4_NONBLOCKING unsigned long flag; if (pipe == mddi_pipe) { /* base layer */ if (mdp4_overlay_pipe_staged(pipe->mixer_num) > 1) { if (time_before(jiffies, (mddi_last_kick + mddi_kick_interval/2))) { mdp4_stat.kickoff_mddi_skip++; return; /* let other pipe to kickoff */ } } } spin_lock_irqsave(&mdp_spin_lock, flag); if (mfd->dma->busy == TRUE) { INIT_COMPLETION(pipe->comp); pending_pipe = pipe; } spin_unlock_irqrestore(&mdp_spin_lock, flag); if (pending_pipe != NULL) { /* wait until DMA finishes the current job */ wait_for_completion_killable(&pipe->comp); pending_pipe = NULL; } down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); if (pipe != mddi_pipe) { /* non base layer */ int intv; if (mddi_last_kick == 0) intv = 0; else intv = jiffies - mddi_last_kick; mddi_kick_interval += intv; mddi_kick_interval /= 2; /* average */ mddi_last_kick = jiffies; } up(&mfd->sem); #else down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->comp); pending_pipe = pipe; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&pipe->comp); mdp_disable_irq(MDP_OVERLAY0_TERM); #endif }
void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd) { DISP_LOCAL_LOG_EMERG("DISP mdp_pipe_kickoff S\n"); /* complete all the writes before starting */ wmb(); /* kick off PPP engine */ if (term == MDP_PPP_TERM) { if (mdp_debug[MDP_PPP_BLOCK]) jiffies_to_timeval(jiffies, &mdp_ppp_timeval); /* let's turn on PPP block */ mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp_enable_irq(term); INIT_COMPLETION(mdp_ppp_comp); mdp_ppp_waiting = TRUE; outpdw(MDP_BASE + 0x30, 0x1000); wait_for_completion_killable(&mdp_ppp_comp); mdp_disable_irq(term); if (mdp_debug[MDP_PPP_BLOCK]) { struct timeval now; jiffies_to_timeval(jiffies, &now); mdp_ppp_timeval.tv_usec = now.tv_usec - mdp_ppp_timeval.tv_usec; MSM_FB_DEBUG("MDP-PPP: %d\n", (int)mdp_ppp_timeval.tv_usec); } } else if (term == MDP_DMA2_TERM) { if (mdp_debug[MDP_DMA2_BLOCK]) { MSM_FB_DEBUG("MDP-DMA2: %d\n", (int)mdp_dma2_timeval.tv_usec); jiffies_to_timeval(jiffies, &mdp_dma2_timeval); } /* DMA update timestamp */ mdp_dma2_last_update_time = ktime_get_real(); /* let's turn on DMA2 block */ #if 0 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE); #endif #ifdef CONFIG_FB_MSM_MDP22 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */ #else mdp_lut_enable(); #ifdef CONFIG_FB_MSM_MDP40 outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */ #else outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */ #endif #endif #ifdef CONFIG_FB_MSM_MDP40 } else if (term == MDP_DMA_S_TERM) { mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE); outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */ } else if (term == MDP_DMA_E_TERM) { mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE); outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */ } else if (term == MDP_OVERLAY0_TERM) { mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp_lut_enable(); outpdw(MDP_BASE + 0x0004, 0); } else if (term == MDP_OVERLAY1_TERM) { mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp_lut_enable(); outpdw(MDP_BASE + 0x0008, 0); } #else }
void mdp_lcdc_update(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; unsigned long flag; uint32 dma_base; int irq_block = MDP_DMA2_TERM; #ifdef CONFIG_FB_MSM_MDP40 int intr = INTR_DMA_P_DONE; #endif if (!mfd->panel_power_on) return; /* no need to power on cmd block since it's lcdc mode */ if (!mfd->ibuf.visible_swapped) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf += fbi->var.xoffset * bpp + fbi->var.yoffset * fbi->fix.line_length; } else { /* we've done something to update the pointer. */ bpp = mfd->ibuf.bpp; buf = mfd->ibuf.buf; } dma_base = DMA_P_BASE; #ifdef CONFIG_FB_MSM_MDP40 if (mfd->panel.type == HDMI_PANEL) { intr = INTR_DMA_E_DONE; irq_block = MDP_DMA_E_TERM; dma_base = DMA_E_BASE; } #endif /* starting address */ MDP_OUTP(MDP_BASE + dma_base + 0x8, (uint32) buf); /* enable LCDC irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(irq_block); INIT_COMPLETION(mfd->dma->comp); mfd->dma->waiting = TRUE; #ifdef CONFIG_FB_MSM_MDP40 outp32(MDP_INTR_CLEAR, intr); mdp_intr_mask |= intr; outp32(MDP_INTR_ENABLE, mdp_intr_mask); #else outp32(MDP_INTR_CLEAR, LCDC_FRAME_START); mdp_intr_mask |= LCDC_FRAME_START; outp32(MDP_INTR_ENABLE, mdp_intr_mask); #endif spin_unlock_irqrestore(&mdp_spin_lock, flag); if (mfd->ibuf.vsync_enable) wait_for_completion_killable(&mfd->dma->comp); mdp_disable_irq(irq_block); }
/** * kthread_create_on_node - create a kthread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @node: memory node number. * @namefmt: printf-style name for the thread. * * Description: This helper function creates and names a kernel * thread. The thread will be stopped: use wake_up_process() to start * it. See also kthread_run(). * * If thread is going to be bound on a particular cpu, give its node * in @node, to get NUMA affinity for kthread stack, or else give -1. * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either call do_exit() directly if it is a * standalone thread for which no one will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * * Returns a task_struct or ERR_PTR(-ENOMEM). */ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, int node, const char namefmt[], ...) { DECLARE_COMPLETION_ONSTACK(done); struct task_struct *task; struct kthread_create_info *create = kmalloc(sizeof(*create), GFP_KERNEL); if (!create) return ERR_PTR(-ENOMEM); create->threadfn = threadfn; create->data = data; create->node = node; create->done = &done; spin_lock(&kthread_create_lock); list_add_tail(&create->list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); /* * Wait for completion in killable state, for I might be chosen by * the OOM killer while kthreadd is trying to allocate memory for * new kernel thread. */ if (unlikely(wait_for_completion_killable(&done))) { int i = 0; /* * I got SIGKILL, but wait for 10 more seconds for completion * unless chosen by the OOM killer. This delay is there as a * workaround for boot failure caused by SIGKILL upon device * driver initialization timeout. */ while (i++ < 10 && !test_tsk_thread_flag(current, TIF_MEMDIE)) if (wait_for_completion_timeout(&done, HZ)) goto ready; /* * If I was SIGKILLed before kthreadd (or new kernel thread) * calls complete(), leave the cleanup of this structure to * that thread. */ if (xchg(&create->done, NULL)) return ERR_PTR(-ENOMEM); /* * kthreadd (or new kernel thread) will call complete() * shortly. */ wait_for_completion(&done); } ready: task = create->result; if (!IS_ERR(task)) { static const struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); vsnprintf(task->comm, sizeof(task->comm), namefmt, args); va_end(args); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. */ sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); set_cpus_allowed_ptr(task, cpu_all_mask); } kfree(create); return task; } EXPORT_SYMBOL(kthread_create_on_node); static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) { /* Must have done schedule() in kthread() before we set_task_cpu */ if (!wait_task_inactive(p, state)) { WARN_ON(1); return; } /* It's safe because the task is inactive. */ do_set_cpus_allowed(p, cpumask_of(cpu)); p->flags |= PF_NO_SETAFFINITY; } /** * kthread_bind - bind a just-created kthread to a cpu. * @p: thread created by kthread_create(). * @cpu: cpu (might not be online, must be possible) for @k to run on. * * Description: This function is equivalent to set_cpus_allowed(), * except that @cpu doesn't need to be online, and the thread must be * stopped (i.e., just returned from kthread_create()). */ void kthread_bind(struct task_struct *p, unsigned int cpu) { __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { #ifdef MDP4_NONBLOCKING unsigned long flag; /* use dma_p(overlay) pipe ,change bpp into 16 */ #ifdef CONFIG_FB_MSM_BPP_SWITCH if(16 != mfd->panel_info.bpp) { mdp4_switch_bpp_config(mfd,16); } #endif if (pipe == mddi_pipe) { /* base layer */ if (mdp4_overlay_pipe_staged(pipe->mixer_num) > 1) { if (time_before(jiffies, (mddi_last_kick + mddi_kick_interval/2))) { mdp4_stat.kickoff_mddi_skip++; return; /* let other pipe to kickoff */ } } } spin_lock_irqsave(&mdp_spin_lock, flag); if (mfd->dma->busy == TRUE) { INIT_COMPLETION(pipe->comp); pending_pipe = pipe; } spin_unlock_irqrestore(&mdp_spin_lock, flag); if (pending_pipe != NULL) { /* wait until DMA finishes the current job */ #ifdef CONFIG_HUAWEI_KERNEL wait_for_completion_interruptible_timeout(&pipe->comp, 1 * HZ); #else wait_for_completion_killable(&pipe->comp); #endif pending_pipe = NULL; } down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); if (pipe != mddi_pipe) { /* non base layer */ int intv; if (mddi_last_kick == 0) intv = 0; else intv = jiffies - mddi_last_kick; mddi_kick_interval += intv; mddi_kick_interval /= 2; /* average */ mddi_last_kick = jiffies; } up(&mfd->sem); #else down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->comp); pending_pipe = pipe; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ #ifdef CONFIG_HUAWEI_KERNEL wait_for_completion_interruptible_timeout(&pipe->comp, 1 * HZ); #else wait_for_completion_killable(&pipe->comp); #endif mdp_disable_irq(MDP_OVERLAY0_TERM); #endif }
/** * kthread_create_on_node - create a kthread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @node: memory node number. * @namefmt: printf-style name for the thread. * * Description: This helper function creates and names a kernel * thread. The thread will be stopped: use wake_up_process() to start * it. See also kthread_run(). * * If thread is going to be bound on a particular cpu, give its node * in @node, to get NUMA affinity for kthread stack, or else give -1. * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either call do_exit() directly if it is a * standalone thread for which no one will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). */ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, int node, const char namefmt[], ...) { DECLARE_COMPLETION_ONSTACK(done); struct task_struct *task; struct kthread_create_info *create = kmalloc(sizeof(*create), GFP_KERNEL); if (!create) return ERR_PTR(-ENOMEM); create->threadfn = threadfn; create->data = data; create->node = node; create->done = &done; spin_lock(&kthread_create_lock); list_add_tail(&create->list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); /* * Wait for completion in killable state, for I might be chosen by * the OOM killer while kthreadd is trying to allocate memory for * new kernel thread. */ if (unlikely(wait_for_completion_killable(&done))) { /* * If I was SIGKILLed before kthreadd (or new kernel thread) * calls complete(), leave the cleanup of this structure to * that thread. */ if (xchg(&create->done, NULL)) return ERR_PTR(-EINTR); /* * kthreadd (or new kernel thread) will call complete() * shortly. */ wait_for_completion(&done); } task = create->result; if (!IS_ERR(task)) { static const struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); vsnprintf(task->comm, sizeof(task->comm), namefmt, args); va_end(args); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. */ sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); set_cpus_allowed_ptr(task, cpu_all_mask); } kfree(create); return task; } EXPORT_SYMBOL(kthread_create_on_node); /** * kthread_bind - bind a just-created kthread to a cpu. * @p: thread created by kthread_create(). * @cpu: cpu (might not be online, must be possible) for @k to run on. * * Description: This function is equivalent to set_cpus_allowed(), * except that @cpu doesn't need to be online, and the thread must be * stopped (i.e., just returned from kthread_create()). */ void kthread_bind(struct task_struct *p, unsigned int cpu) { /* Must have done schedule() in kthread() before we set_task_cpu */ if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { WARN_ON(1); return; } /* It's safe because the task is inactive. */ do_set_cpus_allowed(p, cpumask_of(cpu)); p->flags |= PF_THREAD_BOUND; } EXPORT_SYMBOL(kthread_bind); /** * kthread_stop - stop a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_stop() for @k to return true, wakes it, and * waits for it to exit. This can also be called after kthread_create() * instead of calling wake_up_process(): the thread will exit without * calling threadfn(). * * If threadfn() may call do_exit() itself, the caller must ensure * task_struct can't go away. * * Returns the result of threadfn(), or %-EINTR if wake_up_process() * was never called. */ int kthread_stop(struct task_struct *k) { struct kthread *kthread; int ret; trace_sched_kthread_stop(k); get_task_struct(k); kthread = to_kthread(k); barrier(); /* it might have exited */ if (k->vfork_done != NULL) { kthread->should_stop = 1; wake_up_process(k); wait_for_completion(&kthread->exited); } ret = k->exit_code; put_task_struct(k); trace_sched_kthread_stop_ret(ret); return ret; } EXPORT_SYMBOL(kthread_stop); int kthreadd(void *unused) { struct task_struct *tsk = current; /* Setup a clean context for our children to inherit. */ set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); set_cpus_allowed_ptr(tsk, cpu_all_mask); set_mems_allowed(node_states[N_HIGH_MEMORY]); current->flags |= PF_NOFREEZE; for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&kthread_create_list)) schedule(); __set_current_state(TASK_RUNNING); spin_lock(&kthread_create_lock); while (!list_empty(&kthread_create_list)) { struct kthread_create_info *create; create = list_entry(kthread_create_list.next, struct kthread_create_info, list); list_del_init(&create->list); spin_unlock(&kthread_create_lock); create_kthread(create); spin_lock(&kthread_create_lock); } spin_unlock(&kthread_create_lock); } return 0; } void __init_kthread_worker(struct kthread_worker *worker, const char *name, struct lock_class_key *key) { spin_lock_init(&worker->lock); lockdep_set_class_and_name(&worker->lock, key, name); INIT_LIST_HEAD(&worker->work_list); worker->task = NULL; }