int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
	int ret = 0;
	unsigned long flag;

	if (ctrl_pdata == NULL) {
		pr_err("%s: Invalid input data\n", __func__);

		return 0;
	}

	pr_debug("%s: Checking BTA status\n", __func__);

	mdss_dsi_clk_ctrl(ctrl_pdata, 1);
	spin_lock_irqsave(&ctrl_pdata->mdp_lock, flag);
	INIT_COMPLETION(ctrl_pdata->bta_comp);
	mdss_dsi_enable_irq(ctrl_pdata, DSI_BTA_TERM);
	spin_unlock_irqrestore(&ctrl_pdata->mdp_lock, flag);
	MIPI_OUTP(ctrl_pdata->ctrl_base + 0x098, 0x01); 
	wmb();

	ret = wait_for_completion_killable_timeout(&ctrl_pdata->bta_comp,
						DSI_BTA_EVENT_TIMEOUT);
	if (ret <= 0) {
		mdss_dsi_disable_irq(ctrl_pdata, DSI_BTA_TERM);
		pr_err("%s: DSI BTA error: %i\n", __func__, ret);
	}

	mdss_dsi_clk_ctrl(ctrl_pdata, 0);
	pr_debug("%s: BTA done with ret: %d\n", __func__, ret);

	return ret;
}
static int mdp3_dmap_histo_reset(struct mdp3_dma *dma)
{
	unsigned long flag;
	int ret;

	spin_lock_irqsave(&dma->histo_lock, flag);

	init_completion(&dma->histo_comp);

	mdp3_dma_clk_auto_gating(dma, 0);

	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, BIT(0)|BIT(1));
	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_RESET_SEQ_START, 1);
	wmb();
	dma->histo_state = MDP3_DMA_HISTO_STATE_RESET;

	spin_unlock_irqrestore(&dma->histo_lock, flag);

	mdp3_dma_callback_enable(dma, MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE);
	ret = wait_for_completion_killable_timeout(&dma->histo_comp,
				msecs_to_jiffies(DMA_HISTO_RESET_TIMEOUT_MS));

	if (ret == 0) {
		pr_err("mdp3_dmap_histo_reset time out\n");
		ret = -ETIMEDOUT;
	} else if (ret < 0) {
		pr_err("mdp3_dmap_histo_reset interrupted\n");
	} else {
		ret = 0;
	}
	mdp3_dma_callback_disable(dma, MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE);
	mdp3_dma_clk_auto_gating(dma, 1);

	return ret;
}
Пример #3
0
static int hid_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
	unsigned long flags;
	struct hid_time_state *time_state =
		platform_get_drvdata(to_platform_device(dev));
	int ret;

	reinit_completion(&time_state->comp_last_time);
	/* get a report with all values through requesting one value */
	sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev,
			HID_USAGE_SENSOR_TIME, hid_time_addresses[0],
			time_state->info[0].report_id, SENSOR_HUB_SYNC);
	/* wait for all values (event) */
	ret = wait_for_completion_killable_timeout(
			&time_state->comp_last_time, HZ*6);
	if (ret > 0) {
		/* no error */
		spin_lock_irqsave(&time_state->lock_last_time, flags);
		*tm = time_state->last_time;
		spin_unlock_irqrestore(&time_state->lock_last_time, flags);
		return 0;
	}
	if (!ret)
		return -EIO; /* timeouted */
	return ret; /* killed (-ERESTARTSYS) */
}
Пример #4
0
void mdp_lcdc_update(struct msm_fb_data_type *mfd)
{
	struct fb_info *fbi = mfd->fbi;
	uint8 *buf;
	int bpp;
	unsigned long flag;
	uint32 dma_base;
	int irq_block = MDP_DMA2_TERM;
#ifdef CONFIG_FB_MSM_MDP40
	int intr = INTR_DMA_P_DONE;
#endif

	if (!mfd->panel_power_on)
		return;

	down(&mfd->dma->mutex);
	/* no need to power on cmd block since it's lcdc mode */
	bpp = fbi->var.bits_per_pixel / 8;
	buf = (uint8 *) fbi->fix.smem_start;

	buf += calc_fb_offset(mfd, fbi, bpp);

	dma_base = DMA_P_BASE;

#ifdef CONFIG_FB_MSM_MDP40
	if (mfd->panel.type == HDMI_PANEL) {
		intr = INTR_DMA_E_DONE;
		irq_block = MDP_DMA_E_TERM;
		dma_base = DMA_E_BASE;
	}
#endif

	/* starting address */
	MDP_OUTP(MDP_BASE + dma_base + 0x8, (uint32) buf);

	/* enable LCDC irq */
	spin_lock_irqsave(&mdp_spin_lock, flag);
	mdp_enable_irq(irq_block);
	INIT_COMPLETION(mfd->dma->comp);
	mfd->dma->waiting = TRUE;
#ifdef CONFIG_FB_MSM_MDP40
	outp32(MDP_INTR_CLEAR, intr);
	mdp_intr_mask |= intr;
	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
#else
	outp32(MDP_INTR_CLEAR, LCDC_FRAME_START);
	mdp_intr_mask |= LCDC_FRAME_START;
	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
#endif
	spin_unlock_irqrestore(&mdp_spin_lock, flag);
	#ifdef CONFIG_HW_ESD_DETECT
	/*add qcom patch to solve esd issue*/
	if (wait_for_completion_killable_timeout(&mfd->dma->comp, HZ/10) <= 0)
		pr_err("DMA_P timedout: %s %i", __func__, __LINE__);
	#else
	wait_for_completion_killable(&mfd->dma->comp);
	#endif
	mdp_disable_irq(irq_block);
	up(&mfd->dma->mutex);
}
Пример #5
0
static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
{
	long ret;

	ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
	if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
		return -ENOENT;
	if (!ret)
		return -ETIMEDOUT;

	return ret < 0 ? ret : 0;
}
int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
	int ret = 0;
	unsigned long flag;

	if (ctrl_pdata == NULL) {
		pr_err("%s: Invalid input data\n", __func__);

		/*
		 * This should not return error otherwise
		 * BTA status thread will treat it as dead panel scenario
		 * and request for blank/unblank
		 */
		return 0;
	}

	pr_debug("%s: Checking BTA status\n", __func__);
#ifdef CONFIG_HUAWEI_KERNEL
	mutex_lock(&ctrl_pdata->cmd_mutex);
#endif
	mdss_dsi_clk_ctrl(ctrl_pdata, 1);
	spin_lock_irqsave(&ctrl_pdata->mdp_lock, flag);
	INIT_COMPLETION(ctrl_pdata->bta_comp);
	mdss_dsi_enable_irq(ctrl_pdata, DSI_BTA_TERM);
	spin_unlock_irqrestore(&ctrl_pdata->mdp_lock, flag);
	MIPI_OUTP(ctrl_pdata->ctrl_base + 0x098, 0x01); /* trigger  */
	wmb();

	ret = wait_for_completion_killable_timeout(&ctrl_pdata->bta_comp,
						DSI_BTA_EVENT_TIMEOUT);
	if (ret <= 0) {
		mdss_dsi_disable_irq(ctrl_pdata, DSI_BTA_TERM);
		pr_err("%s: DSI BTA error: %i\n", __func__, ret);
	}
	mdss_dsi_clk_ctrl(ctrl_pdata, 0);
#ifdef CONFIG_HUAWEI_KERNEL
	mutex_unlock(&ctrl_pdata->cmd_mutex);
#endif
	pr_debug("%s: BTA done with ret: %d\n", __func__, ret);
#ifdef CONFIG_HUAWEI_LCD
	if(ret > 0)
	{
		/*if panel check error and enable the esd check bit in dtsi,report the event to hal layer*/
		if(ctrl_pdata->esd_check_enable)
			ret = panel_check_live_status(ctrl_pdata);
	}
#endif
	return ret;
}
static void mdp4_overlay_dtv_wait4_ov_done(struct msm_fb_data_type *mfd,
	struct mdp4_overlay_pipe *pipe)
{
	unsigned long flag;
	u32 data = inpdw(MDP_BASE + DTV_BASE);

	mfd->ov_start = false;

	if (!(data & 0x1) || (pipe == NULL))
		return;
	wait_for_completion_killable_timeout(&dtv_pipe->comp, HZ/10);
	spin_lock_irqsave(&mdp_done_lock, flag);
	mdp_disable_irq(MDP_OVERLAY1_TERM);
	spin_unlock_irqrestore(&mdp_done_lock, flag);
}
void mdp_dma2_update(struct msm_fb_data_type *mfd)
#endif
{
	int ret = 0;
	unsigned long flag;

	if (!mfd) {
		printk(KERN_ERR "%s: mfd is NULL\n", __func__);
		return;
	}
	down(&mfd->dma->mutex);
	if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {
		down(&mfd->sem);
		mfd->ibuf_flushed = TRUE;
		mdp_dma2_update_lcd(mfd);

		spin_lock_irqsave(&mdp_spin_lock, flag);
		mdp_enable_irq(MDP_DMA2_TERM);
		mfd->dma->busy = TRUE;
		INIT_COMPLETION(mfd->dma->comp);

		spin_unlock_irqrestore(&mdp_spin_lock, flag);
		/* schedule DMA to start */
		mdp_dma_schedule(mfd, MDP_DMA2_TERM);
		up(&mfd->sem);

		/* wait until DMA finishes the current job */
		ret = wait_for_completion_killable_timeout(&mfd->dma->comp, msecs_to_jiffies(500));
		if (ret <= 0) {
			mfd->dma->busy = FALSE;
			mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
			complete(&mfd->dma->comp);
		}

		mdp_disable_irq(MDP_DMA2_TERM);

	/* signal if pan function is waiting for the update completion */
		if (mfd->pan_waiting) {
			mfd->pan_waiting = FALSE;
			complete(&mfd->pan_comp);
		}
	}
	up(&mfd->dma->mutex);
}
Пример #9
0
static int mdp3_wait4vsync(struct mdp3_dma *dma, bool killable,
	unsigned long timeout)
{
	int rc;
	int timeouts = 0;
	static int timeout_occurred;
	int prev_vsync_cnt = dma->vsync_cnt;
	unsigned long tout = timeout ? timeout : KOFF_TIMEOUT;

	do {
		if (killable) {
			rc = wait_for_completion_killable_timeout(
				&dma->vsync_comp, tout);
		} else {
			rc = wait_for_completion_timeout(
				&dma->vsync_comp, tout);
		}

		if (rc == -ERESTARTSYS)
			return rc;

		if (rc == 0) {
			pr_err("%s: TIMEOUT (vsync_cnt: prev: %u cur: %u)\n",
				__func__, prev_vsync_cnt, dma->vsync_cnt);
			timeout_occurred = 1;
			if (timeouts == 0 && dma->vsync_cnt > 0)
				mdss_timeout_dump(__func__);
			timeouts++;
		} else {
			if (timeout_occurred)
				pr_info("%s: recovered from previous timeout\n",
					__func__);
			timeout_occurred = 0;
			break;
		}
	} while (timeout == 0);

	if (timeout == 0 && timeouts)
		pr_err("%s: wait of %u ms timed out %d times!\n", __func__,
			jiffies_to_msecs(tout), timeouts);

	return rc;
}
/**
 * mdss_dsi_bta_status_check() - Check dsi panel status through bta check
 * @ctrl_pdata: pointer to the dsi controller structure
 *
 * This function can be used to check status of the panel using bta check
 * for the panel.
 *
 * Return: positive value if the panel is in good state, negative value or
 * zero otherwise.
 */
int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
	int ret = 0;
	unsigned long flag;

	if (ctrl_pdata == NULL) {
		pr_err("%s: Invalid input data\n", __func__);

		/*
		 * This should not return error otherwise
		 * BTA status thread will treat it as dead panel scenario
		 * and request for blank/unblank
		 */
		return 0;
	}

	pr_debug("%s: Checking BTA status\n", __func__);

	mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 1);
	spin_lock_irqsave(&ctrl_pdata->mdp_lock, flag);
	INIT_COMPLETION(ctrl_pdata->bta_comp);
	mdss_dsi_enable_irq(ctrl_pdata, DSI_BTA_TERM);
	spin_unlock_irqrestore(&ctrl_pdata->mdp_lock, flag);
	MIPI_OUTP(ctrl_pdata->ctrl_base + 0x098, 0x01); /* trigger  */
	wmb();

	ret = wait_for_completion_killable_timeout(&ctrl_pdata->bta_comp,
						DSI_BTA_EVENT_TIMEOUT);
	if (ret <= 0) {
		mdss_dsi_disable_irq(ctrl_pdata, DSI_BTA_TERM);
		pr_err("%s: DSI BTA error: %i\n", __func__, ret);
	}

	mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 0);
	pr_debug("%s: BTA done with ret: %d\n", __func__, ret);

	return ret;
}
void mdp_dma2_update(struct msm_fb_data_type *mfd)
#endif
{
	unsigned long flag;
	static int first_vsync;
	int need_wait = 0;

	down(&mfd->dma->mutex);
	if ((mfd) && (mfd->panel_power_on)) {
		down(&mfd->sem);
		spin_lock_irqsave(&mdp_spin_lock, flag);
		if (mfd->dma->busy == TRUE)
			need_wait++;
		spin_unlock_irqrestore(&mdp_spin_lock, flag);

		if (need_wait)
			wait_for_completion_killable(&mfd->dma->comp);
#if defined (CONFIG_MACH_KYLEPLUS_CTC)
		/* wait until Vsync finishes the current job */
		if (first_vsync) {
			if (!wait_for_completion_killable_timeout
				(&vsync_cntrl.vsync_comp, HZ/10))
				pr_err("Timedout DMA %s %d", __func__,
									__LINE__);
		} else {
			first_vsync = 1;
		}
#endif
		/* schedule DMA to start */
		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
		mfd->ibuf_flushed = TRUE;
		mdp_dma2_update_lcd(mfd);

		spin_lock_irqsave(&mdp_spin_lock, flag);
		mdp_enable_irq(MDP_DMA2_TERM);
		mfd->dma->busy = TRUE;
		INIT_COMPLETION(mfd->dma->comp);
		INIT_COMPLETION(vsync_cntrl.vsync_comp);
		if (!vsync_cntrl.vsync_irq_enabled &&
				vsync_cntrl.disabled_clocks) {
			MDP_OUTP(MDP_BASE + 0x021c, 0x10); /* read pointer */
			outp32(MDP_INTR_CLEAR, MDP_PRIM_RDPTR);
			mdp_intr_mask |= MDP_PRIM_RDPTR;
			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
			mdp_enable_irq(MDP_VSYNC_TERM);
			vsync_cntrl.vsync_dma_enabled = 1;
		}
		spin_unlock_irqrestore(&mdp_spin_lock, flag);
		/* schedule DMA to start */
		mdp_dma_schedule(mfd, MDP_DMA2_TERM);
		up(&mfd->sem);
		
#ifndef CONFIG_MACH_KYLEPLUS_CTC
		/* wait until Vsync finishes the current job */
		if (first_vsync) {
			if (!wait_for_completion_killable_timeout
					(&vsync_cntrl.vsync_comp, HZ/10))
				pr_err("Timedout DMA %s %d", __func__,
								__LINE__);
		} else {
			first_vsync = 1;
		}
#endif
		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);

	/* signal if pan function is waiting for the update completion */
		if (mfd->pan_waiting) {
			mfd->pan_waiting = FALSE;
			complete(&mfd->pan_comp);
		}
	}
	up(&mfd->dma->mutex);
}
Пример #12
0
static int mdp3_dmap_histo_get(struct mdp3_dma *dma)
{
	int i, state, timeout, ret;
	u32 addr;
	unsigned long flag;

	spin_lock_irqsave(&dma->histo_lock, flag);
	state = dma->histo_state;
	spin_unlock_irqrestore(&dma->histo_lock, flag);

	if (state != MDP3_DMA_HISTO_STATE_START &&
		state != MDP3_DMA_HISTO_STATE_READY) {
		pr_err("mdp3_dmap_histo_get invalid state %d\n", state);
		return -EINVAL;
	}

	timeout = HIST_WAIT_TIMEOUT(dma->histogram_config.frame_count);
	ret = wait_for_completion_killable_timeout(&dma->histo_comp, timeout);

	if (ret == 0) {
		pr_debug("mdp3_dmap_histo_get time out\n");
		ret = -ETIMEDOUT;
	} else if (ret < 0) {
		pr_err("mdp3_dmap_histo_get interrupted\n");
	}

	if (ret < 0)
		return ret;

	if (dma->histo_state != MDP3_DMA_HISTO_STATE_READY) {
		pr_debug("mdp3_dmap_histo_get after dma shut down\n");
		return -EPERM;
	}

	addr = MDP3_REG_DMA_P_HIST_R_DATA;
	for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
		dma->histo_data.r_data[i] = MDP3_REG_READ(addr);
		addr += 4;
	}

	addr = MDP3_REG_DMA_P_HIST_G_DATA;
	for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
		dma->histo_data.g_data[i] = MDP3_REG_READ(addr);
		addr += 4;
	}

	addr = MDP3_REG_DMA_P_HIST_B_DATA;
	for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
		dma->histo_data.b_data[i] = MDP3_REG_READ(addr);
		addr += 4;
	}

	dma->histo_data.extra[0] =
			MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_0);
	dma->histo_data.extra[1] =
			MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_1);

	spin_lock_irqsave(&dma->histo_lock, flag);
	init_completion(&dma->histo_comp);
	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1);
	wmb();
	dma->histo_state = MDP3_DMA_HISTO_STATE_START;
	spin_unlock_irqrestore(&dma->histo_lock, flag);

	return 0;
}
Пример #13
0
static int dht11_read_raw(struct iio_dev *iio_dev,
			  const struct iio_chan_spec *chan,
			int *val, int *val2, long m)
{
	struct dht11 *dht11 = iio_priv(iio_dev);
	int ret, timeres;

	mutex_lock(&dht11->lock);
	if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_real_ns()) {
		timeres = ktime_get_resolution_ns();
		if (DHT11_DATA_BIT_HIGH < 2 * timeres) {
			dev_err(dht11->dev, "timeresolution %dns too low\n",
				timeres);
			/* In theory a better clock could become available
			 * at some point ... and there is no error code
			 * that really fits better.
			 */
			ret = -EAGAIN;
			goto err;
		}

		reinit_completion(&dht11->completion);

		dht11->num_edges = 0;
		ret = gpio_direction_output(dht11->gpio, 0);
		if (ret)
			goto err;
		msleep(DHT11_START_TRANSMISSION);
		ret = gpio_direction_input(dht11->gpio);
		if (ret)
			goto err;

		ret = request_irq(dht11->irq, dht11_handle_irq,
				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
				  iio_dev->name, iio_dev);
		if (ret)
			goto err;

		ret = wait_for_completion_killable_timeout(&dht11->completion,
							   HZ);

		free_irq(dht11->irq, iio_dev);

		if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
			dev_err(&iio_dev->dev,
				"Only %d signal edges detected\n",
					dht11->num_edges);
			ret = -ETIMEDOUT;
		}
		if (ret < 0)
			goto err;

		ret = dht11_decode(dht11,
				   dht11->num_edges == DHT11_EDGES_PER_READ ?
					DHT11_EDGES_PREAMBLE :
					DHT11_EDGES_PREAMBLE - 2,
				timeres);
		if (ret)
			goto err;
	}

	ret = IIO_VAL_INT;
	if (chan->type == IIO_TEMP)
		*val = dht11->temperature;
	else if (chan->type == IIO_HUMIDITYRELATIVE)
		*val = dht11->humidity;
	else
		ret = -EINVAL;
err:
	dht11->num_edges = -1;
	mutex_unlock(&dht11->lock);
	return ret;
}