コード例 #1
0
ファイル: stm32-ipcc.c プロジェクト: avagin/linux
static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data)
{
	struct stm32_ipcc *ipcc = data;
	struct device *dev = ipcc->controller.dev;
	u32 status, mr, tosr, chan;
	irqreturn_t ret = IRQ_NONE;
	int proc_offset;

	/* read 'channel occupied' status from other proc */
	proc_offset = ipcc->proc_id ? -IPCC_PROC_OFFST : IPCC_PROC_OFFST;
	tosr = readl_relaxed(ipcc->reg_proc + proc_offset + IPCC_XTOYSR);
	mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);

	/* search for unmasked 'channel occupied' */
	status = tosr & FIELD_GET(RX_BIT_MASK, ~mr);

	for (chan = 0; chan < ipcc->n_chans; chan++) {
		if (!(status & (1 << chan)))
			continue;

		dev_dbg(dev, "%s: chan:%d rx\n", __func__, chan);

		mbox_chan_received_data(&ipcc->controller.chans[chan], NULL);

		stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XSCR,
				    RX_BIT_CHAN(chan));

		ret = IRQ_HANDLED;
	}

	return ret;
}
コード例 #2
0
ファイル: bcm-flexrm-mailbox.c プロジェクト: asmalldev/linux
static void flexrm_shutdown(struct mbox_chan *chan)
{
	u32 reqid;
	unsigned int timeout;
	struct brcm_message *msg;
	struct flexrm_ring *ring = chan->con_priv;

	/* Disable/inactivate ring */
	writel_relaxed(0x0, ring->regs + RING_CONTROL);

	/* Flush ring with timeout of 1s */
	timeout = 1000;
	writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
			ring->regs + RING_CONTROL);
	do {
		if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
		    FLUSH_DONE_MASK)
			break;
		mdelay(1);
	} while (timeout--);

	/* Abort all in-flight requests */
	for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
		msg = ring->requests[reqid];
		if (!msg)
			continue;

		/* Release reqid for recycling */
		ring->requests[reqid] = NULL;
		ida_simple_remove(&ring->requests_ida, reqid);

		/* Unmap DMA mappings */
		flexrm_dma_unmap(ring->mbox->dev, msg);

		/* Give-back message to mailbox client */
		msg->error = -EIO;
		mbox_chan_received_data(chan, msg);
	}

	/* Release IRQ */
	if (ring->irq_requested) {
		free_irq(ring->irq, ring);
		ring->irq_requested = false;
	}

	/* Free-up completion descriptor ring */
	if (ring->cmpl_base) {
		dma_pool_free(ring->mbox->cmpl_pool,
			      ring->cmpl_base, ring->cmpl_dma_base);
		ring->cmpl_base = NULL;
	}

	/* Free-up BD descriptor ring */
	if (ring->bd_base) {
		dma_pool_free(ring->mbox->bd_pool,
			      ring->bd_base, ring->bd_dma_base);
		ring->bd_base = NULL;
	}
}
コード例 #3
0
static irqreturn_t slimpro_mbox_irq(int irq, void *id)
{
	struct slimpro_mbox_chan *mb_chan = id;

	if (mb_chan_status_ack(mb_chan))
		mbox_chan_txdone(mb_chan->chan, 0);

	if (mb_chan_status_avail(mb_chan))
		mbox_chan_received_data(mb_chan->chan, mb_chan->rx_msg);

	return IRQ_HANDLED;
}
コード例 #4
0
ファイル: mailbox-altera.c プロジェクト: 168519/linux
static void altera_mbox_rx_data(struct mbox_chan *chan)
{
	struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
	u32 data[2];

	if (altera_mbox_pending(mbox)) {
		data[MBOX_PTR] =
			readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
		data[MBOX_CMD] =
			readl_relaxed(mbox->mbox_base + MAILBOX_CMD_REG);
		mbox_chan_received_data(chan, (void *)data);
	}
}
コード例 #5
0
ファイル: mailbox-sti.c プロジェクト: AshishNamdev/linux
static irqreturn_t sti_mbox_thread_handler(int irq, void *data)
{
	struct sti_mbox_device *mdev = data;
	struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
	struct mbox_chan *chan;
	unsigned int instance;

	for (instance = 0; instance < pdata->num_inst; instance++) {
keep_looking:
		chan = sti_mbox_irq_to_channel(mdev, instance);
		if (!chan)
			continue;

		mbox_chan_received_data(chan, NULL);
		sti_mbox_clear_irq(chan);
		sti_mbox_enable_channel(chan);
		goto keep_looking;
	}

	return IRQ_HANDLED;
}
コード例 #6
0
ファイル: bcm-flexrm-mailbox.c プロジェクト: Lyude/linux
static void flexrm_shutdown(struct mbox_chan *chan)
{
	u32 reqid;
	unsigned int timeout;
	struct brcm_message *msg;
	struct flexrm_ring *ring = chan->con_priv;

	/* Disable/inactivate ring */
	writel_relaxed(0x0, ring->regs + RING_CONTROL);

	/* Set ring flush state */
	timeout = 1000; /* timeout of 1s */
	writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
			ring->regs + RING_CONTROL);
	do {
		if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
		    FLUSH_DONE_MASK)
			break;
		mdelay(1);
	} while (--timeout);
	if (!timeout)
		dev_err(ring->mbox->dev,
			"setting ring%d flush state timedout\n", ring->num);

	/* Clear ring flush state */
	timeout = 1000; /* timeout of 1s */
	writel_relaxed(0x0, ring + RING_CONTROL);
	do {
		if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
		      FLUSH_DONE_MASK))
			break;
		mdelay(1);
	} while (--timeout);
	if (!timeout)
		dev_err(ring->mbox->dev,
			"clearing ring%d flush state timedout\n", ring->num);

	/* Abort all in-flight requests */
	for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
		msg = ring->requests[reqid];
		if (!msg)
			continue;

		/* Release reqid for recycling */
		ring->requests[reqid] = NULL;

		/* Unmap DMA mappings */
		flexrm_dma_unmap(ring->mbox->dev, msg);

		/* Give-back message to mailbox client */
		msg->error = -EIO;
		mbox_chan_received_data(chan, msg);
	}

	/* Clear requests bitmap */
	bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);

	/* Release IRQ */
	if (ring->irq_requested) {
		irq_set_affinity_hint(ring->irq, NULL);
		free_irq(ring->irq, ring);
		ring->irq_requested = false;
	}

	/* Free-up completion descriptor ring */
	if (ring->cmpl_base) {
		dma_pool_free(ring->mbox->cmpl_pool,
			      ring->cmpl_base, ring->cmpl_dma_base);
		ring->cmpl_base = NULL;
	}

	/* Free-up BD descriptor ring */
	if (ring->bd_base) {
		dma_pool_free(ring->mbox->bd_pool,
			      ring->bd_base, ring->bd_dma_base);
		ring->bd_base = NULL;
	}
}
コード例 #7
0
ファイル: bcm-flexrm-mailbox.c プロジェクト: Lyude/linux
static int flexrm_process_completions(struct flexrm_ring *ring)
{
	u64 desc;
	int err, count = 0;
	unsigned long flags;
	struct brcm_message *msg = NULL;
	u32 reqid, cmpl_read_offset, cmpl_write_offset;
	struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];

	spin_lock_irqsave(&ring->lock, flags);

	/*
	 * Get current completion read and write offset
	 *
	 * Note: We should read completion write pointer atleast once
	 * after we get a MSI interrupt because HW maintains internal
	 * MSI status which will allow next MSI interrupt only after
	 * completion write pointer is read.
	 */
	cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
	cmpl_write_offset *= RING_DESC_SIZE;
	cmpl_read_offset = ring->cmpl_read_offset;
	ring->cmpl_read_offset = cmpl_write_offset;

	spin_unlock_irqrestore(&ring->lock, flags);

	/* For each completed request notify mailbox clients */
	reqid = 0;
	while (cmpl_read_offset != cmpl_write_offset) {
		/* Dequeue next completion descriptor */
		desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));

		/* Next read offset */
		cmpl_read_offset += RING_DESC_SIZE;
		if (cmpl_read_offset == RING_CMPL_SIZE)
			cmpl_read_offset = 0;

		/* Decode error from completion descriptor */
		err = flexrm_cmpl_desc_to_error(desc);
		if (err < 0) {
			dev_warn(ring->mbox->dev,
			"ring%d got completion desc=0x%lx with error %d\n",
			ring->num, (unsigned long)desc, err);
		}

		/* Determine request id from completion descriptor */
		reqid = flexrm_cmpl_desc_to_reqid(desc);

		/* Determine message pointer based on reqid */
		msg = ring->requests[reqid];
		if (!msg) {
			dev_warn(ring->mbox->dev,
			"ring%d null msg pointer for completion desc=0x%lx\n",
			ring->num, (unsigned long)desc);
			continue;
		}

		/* Release reqid for recycling */
		ring->requests[reqid] = NULL;
		spin_lock_irqsave(&ring->lock, flags);
		bitmap_release_region(ring->requests_bmap, reqid, 0);
		spin_unlock_irqrestore(&ring->lock, flags);

		/* Unmap DMA mappings */
		flexrm_dma_unmap(ring->mbox->dev, msg);

		/* Give-back message to mailbox client */
		msg->error = err;
		mbox_chan_received_data(chan, msg);

		/* Increment number of completions processed */
		atomic_inc_return(&ring->msg_cmpl_count);
		count++;
	}

	return count;
}