示例#1
0
static int s5p_cec_open(struct inode *inode, struct file *file)
{
	int ret = 0;

	mutex_lock(&cec_lock);
	clk_enable(hdmi_cec_clk);

	if (atomic_read(&hdmi_on)) {
		tvout_dbg("do not allow multiple open for tvout cec\n");
		ret = -EBUSY;
		goto err_multi_open;
	} else
		atomic_inc(&hdmi_on);

	s5p_cec_reset();

	s5p_cec_set_divider();

	s5p_cec_threshold();

	s5p_cec_unmask_tx_interrupts();

	s5p_cec_set_rx_state(STATE_RX);
	s5p_cec_unmask_rx_interrupts();
	s5p_cec_enable_rx();

err_multi_open:
	mutex_unlock(&cec_lock);

	return ret;
}
示例#2
0
static ssize_t s5p_cec_read(struct file *file, char __user *buffer,
			size_t count, loff_t *ppos)
{
	ssize_t retval;
	unsigned long spin_flags;

	if (wait_event_interruptible_timeout(cec_tx_struct.waitq,
		atomic_read(&cec_rx_struct.state) == STATE_DONE,
		msecs_to_jiffies(TVOUT_TIMEOUT)) == 0) {
		printk(KERN_ERR "error : waiting for interrupt is timeout\n");
		return -ERESTARTSYS;
	}
	spin_lock_irqsave(&cec_rx_struct.lock, spin_flags);

	if (cec_rx_struct.size > count) {
		spin_unlock_irqrestore(&cec_rx_struct.lock, spin_flags);

		return -1;
	}

	if (copy_to_user(buffer, cec_rx_struct.buffer, cec_rx_struct.size)) {
		spin_unlock_irqrestore(&cec_rx_struct.lock, spin_flags);
		printk(KERN_ERR " copy_to_user() failed!\n");

		return -EFAULT;
	}

	retval = cec_rx_struct.size;

	s5p_cec_set_rx_state(STATE_RX);
	spin_unlock_irqrestore(&cec_rx_struct.lock, spin_flags);

	return retval;
}
示例#3
0
static ssize_t s5p_cec_read(struct file *file, char __user *buffer,
			size_t count, loff_t *ppos)
{
	ssize_t retval;

	if (wait_event_interruptible(cec_rx_struct.waitq,
			atomic_read(&cec_rx_struct.state) == STATE_DONE)) {
		return -ERESTARTSYS;
	}

	spin_lock_irq(&cec_rx_struct.lock);

	if (cec_rx_struct.size > count) {
		spin_unlock_irq(&cec_rx_struct.lock);

		return -1;
	}

	if (copy_to_user(buffer, cec_rx_struct.buffer, cec_rx_struct.size)) {
		spin_unlock_irq(&cec_rx_struct.lock);
		printk(KERN_ERR " copy_to_user() failed!\n");

		return -EFAULT;
	}

	retval = cec_rx_struct.size;

	s5p_cec_set_rx_state(STATE_RX);
	spin_unlock_irq(&cec_rx_struct.lock);

	return retval;
}
示例#4
0
static irqreturn_t s5p_cec_irq_handler(int irq, void *dev_id)
{

	u32 status = 0;

	status = s5p_cec_get_status();

	if (status & CEC_STATUS_TX_DONE) {
		if (status & CEC_STATUS_TX_ERROR) {
			tvout_dbg(" CEC_STATUS_TX_ERROR!\n");
			s5p_cec_set_tx_state(STATE_ERROR);
		} else {
			tvout_dbg(" CEC_STATUS_TX_DONE!\n");
			s5p_cec_set_tx_state(STATE_DONE);
		}

		s5p_clr_pending_tx();

		wake_up_interruptible(&cec_tx_struct.waitq);
	}

	if (status & CEC_STATUS_RX_DONE) {
		if (status & CEC_STATUS_RX_ERROR) {
			tvout_dbg(" CEC_STATUS_RX_ERROR!\n");
			s5p_cec_rx_reset();

		} else {
			u32 size;

			tvout_dbg(" CEC_STATUS_RX_DONE!\n");

			/* copy data from internal buffer */
			size = status >> 24;

			spin_lock(&cec_rx_struct.lock);

			s5p_cec_get_rx_buf(size, cec_rx_struct.buffer);

			cec_rx_struct.size = size;

			s5p_cec_set_rx_state(STATE_DONE);

			spin_unlock(&cec_rx_struct.lock);

			s5p_cec_enable_rx();
		}

		/* clear interrupt pending bit */
		s5p_clr_pending_rx();

		wake_up_interruptible(&cec_rx_struct.waitq);
	}

	return IRQ_HANDLED;
}
static int s5p_cec_open(struct inode *inode, struct file *file)
{
	g_hdmi_on = true;

	s5p_cec_reset();

	s5p_cec_set_divider();

	s5p_cec_threshold();

	s5p_cec_unmask_tx_interrupts();

	s5p_cec_set_rx_state(STATE_RX);
	s5p_cec_unmask_rx_interrupts();
	s5p_cec_enable_rx();

	return 0;
}