Example #1
0
/* If hardware is busy, don't restart async read.
 * if status register is 0 - meaning initial state, restart async read,
 * probably for the first time when populating a receive buffer.
 * If read status indicate not busy and a status, restart the async
 * DMA read.
 */
static int sgdma_async_read(struct altera_tse_private *priv)
{
	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
	struct sgdma_descrip *descbase =
		(struct sgdma_descrip *)priv->rx_dma_desc;

	struct sgdma_descrip *cdesc = &descbase[0];
	struct sgdma_descrip *ndesc = &descbase[1];

	struct tse_buffer *rxbuffer = NULL;

	if (!sgdma_rxbusy(priv)) {
		rxbuffer = queue_rx_peekhead(priv);
		if (rxbuffer == NULL) {
			netdev_err(priv->dev, "no rx buffers available\n");
			return 0;
		}

		sgdma_setup_descrip(cdesc,		/* current descriptor */
				    ndesc,		/* next descriptor */
				    sgdma_rxphysaddr(priv, ndesc),
				    0,			/* read addr 0 for rx dma */
				    rxbuffer->dma_addr, /* write addr for rx dma */
				    0,			/* read 'til EOP */
				    0,			/* EOP: NA for rx dma */
				    0,			/* read fixed: NA for rx dma */
				    0);			/* SOP: NA for rx DMA */

		dma_sync_single_for_device(priv->device,
					   priv->rxdescphys,
					   priv->sgdmadesclen,
					   DMA_TO_DEVICE);

		iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
			  &csr->next_descrip);

		iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
			  &csr->control);

		return 1;
	}

	return 0;
}
Example #2
0
/**
 * lsdma_rxqbuf - Queue a read buffer
 * @dma: DMA information structure
 * @bufnum: buffer number
 *
 * Do everything which normally follows a copy from a driver buffer
 * to a user buffer.
 **/
static ssize_t
lsdma_rxqbuf (struct master_dma *dma, size_t bufnum)
{
	unsigned int i;
	struct lsdma_desc *desc;

	if (bufnum != dma->cpu_buffer) {
		return -EINVAL;
	}
	for (i = 0; i < dma->pointers_per_buf; i++) {
		desc = dma->desc[dma->cpu_buffer * dma->pointers_per_buf + i];
		dma_sync_single_for_device (dma->dev,
			mdma_desc_to_dma (desc->dest_addr, desc->dest_addr_h),
			(desc->csr & LSDMA_DESC_CSR_TOTALXFERSIZE),
			DMA_FROM_DEVICE);
	}
	dma->cpu_buffer = (dma->cpu_buffer + 1) % dma->buffers;
	dma->cpu_offset = 0;
	return dma->bufsize;
}
Example #3
0
static void tegra_start_dma_tx(struct tegra_uart_port *t, unsigned long bytes)
{
	struct circ_buf *xmit;
	xmit = &t->uport.state->xmit;

	dma_sync_single_for_device(t->uport.dev, t->xmit_dma_addr,
		UART_XMIT_SIZE, DMA_TO_DEVICE);

	t->fcr_shadow &= ~UART_FCR_T_TRIG_11;
	t->fcr_shadow |= TEGRA_UART_TX_TRIG_4B;
	uart_writeb(t, t->fcr_shadow, UART_FCR);

	t->tx_bytes = bytes & ~(sizeof(u32)-1);
	t->tx_dma_req.source_addr = t->xmit_dma_addr + xmit->tail;
	t->tx_dma_req.size = t->tx_bytes;

	t->tx_in_progress = TEGRA_TX_DMA;

	tegra_dma_enqueue_req(t->tx_dma, &t->tx_dma_req);
}
static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
		struct tty_port *tty, int count)
{
	int copied;

	tup->uport.icount.rx += count;
	if (!tty) {
		dev_err(tup->uport.dev, "No tty port\n");
		return;
	}
	dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
				TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);

	copied = tty_insert_flip_string_lock(tty,
			((unsigned char *)(tup->rx_dma_buf_virt)), count);
	if (copied != count)
		dev_err(tup->uport.dev, "RxData DMA copy to tty layer failed\n");
	dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
				TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
}
Example #5
0
static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
{
	unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;

	tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
				tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
				DMA_PREP_INTERRUPT);
	if (!tup->rx_dma_desc) {
		dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
		return -EIO;
	}

	tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
	tup->rx_dma_desc->callback_param = tup;
	dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
				count, DMA_TO_DEVICE);
	tup->rx_bytes_requested = count;
	tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
	dma_async_issue_pending(tup->rx_dma_chan);
	return 0;
}
Example #6
0
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
	struct page *page = alloc_pages(pool->gfp_mask, pool->order);

	if (!page)
		return NULL;
	/* this is only being used to flush the page for dma,
	   this api is not really suitable for calling from a driver
	   but no better way to flush a page for dma exist at this time */
#ifdef CONFIG_64BIT
	dma_sync_single_for_device(NULL, (dma_addr_t)page_to_phys(page),
		PAGE_SIZE << pool->order,
		DMA_BIDIRECTIONAL);
#else
	arm_dma_ops.sync_single_for_device(NULL,
		pfn_to_dma(NULL, page_to_pfn(page)),
		PAGE_SIZE << pool->order,
		DMA_BIDIRECTIONAL);
#endif

	return page;
}
Example #7
0
static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
{
	struct s3c24xx_uart_dma *dma = ourport->dma;

	dma_sync_single_for_device(ourport->port.dev, dma->rx_addr,
				dma->rx_size, DMA_FROM_DEVICE);

	dma->rx_desc = dmaengine_prep_slave_single(dma->rx_chan,
				dma->rx_addr, dma->rx_size, DMA_DEV_TO_MEM,
				DMA_PREP_INTERRUPT);
	if (!dma->rx_desc) {
		dev_err(ourport->port.dev, "Unable to get desc for Rx\n");
		return;
	}

	dma->rx_desc->callback = s3c24xx_serial_rx_dma_complete;
	dma->rx_desc->callback_param = ourport;
	dma->rx_bytes_requested = dma->rx_size;

	dma->rx_cookie = dmaengine_submit(dma->rx_desc);
	dma_async_issue_pending(dma->rx_chan);
}
Example #8
0
int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
{
	struct uart_8250_dma		*dma = p->dma;
	struct dma_async_tx_descriptor	*desc;
	struct dma_tx_state		state;
	int				dma_status;

	/*
	 * If RCVR FIFO trigger level was not reached, complete the transfer and
	 * let 8250.c copy the remaining data.
	 */
	if ((iir & 0x3f) == UART_IIR_RX_TIMEOUT) {
		dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie,
						&state);
		if (dma_status == DMA_IN_PROGRESS) {
			dmaengine_pause(dma->rxchan);
			__dma_rx_complete(p);
		}
		return -ETIMEDOUT;
	}

	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
					   dma->rx_size, DMA_DEV_TO_MEM,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc)
		return -EBUSY;

	desc->callback = __dma_rx_complete;
	desc->callback_param = p;

	dma->rx_cookie = dmaengine_submit(desc);

	dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
				   dma->rx_size, DMA_FROM_DEVICE);

	dma_async_issue_pending(dma->rxchan);

	return 0;
}
Example #9
0
/*
 * It is expected that the callers take the UART lock when this API is called.
 *
 * There are 2 contexts when this function is called:
 *
 * 1. DMA ISR - DMA ISR triggers the threshold complete calback, which calls the
 * dequue API which in-turn calls this callback. UART lock is taken during
 * the call to the threshold callback.
 *
 * 2. UART ISR - UART calls the dequue API which in-turn will call this API.
 * In this case, UART ISR takes the UART lock.
 */
static void tegra_rx_dma_complete_callback(struct tegra_dma_req *req)
{
	struct tegra_uart_port *t = req->dev;
	struct uart_port *u = &t->uport;
	struct tty_struct *tty = u->state->port.tty;
	int copied;

	/* If we are here, DMA is stopped */

	dev_dbg(t->uport.dev, "%s: %d %d\n", __func__, req->bytes_transferred,
		req->status);
	if (req->bytes_transferred) {
		t->uport.icount.rx += req->bytes_transferred;
		dma_sync_single_for_cpu(t->uport.dev, req->dest_addr,
				req->size, DMA_FROM_DEVICE);
		copied = tty_insert_flip_string(tty,
			((unsigned char *)(req->virt_addr)),
			req->bytes_transferred);
		if (copied != req->bytes_transferred) {
			WARN_ON(1);
			dev_err(t->uport.dev, "Not able to copy uart data "
				"to tty layer Req %d and coped %d\n",
				req->bytes_transferred, copied);
		}
		dma_sync_single_for_device(t->uport.dev, req->dest_addr,
				req->size, DMA_TO_DEVICE);
	}

	do_handle_rx_pio(t);

	/* Push the read data later in caller place. */
	if (req->status == -TEGRA_DMA_REQ_ERROR_ABORTED)
		return;

	spin_unlock(&u->lock);
	tty_flip_buffer_push(u->state->port.tty);
	spin_lock(&u->lock);
}
Example #10
0
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
				uint8_t *buf, int oob_required, int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	dma_addr_t addr = denali->buf.dma_buf;
	size_t size = denali->mtd.writesize + denali->mtd.oobsize;

	uint32_t irq_status = 0;
	uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;

	if (page != denali->page) {
		dev_err(denali->dev, "IN %s: page %d is not"
				" equal to denali->page %d, investigate!!",
				__func__, page, denali->page);
		BUG();
	}

	setup_ecc_for_xfer(denali, false, true);
	denali_enable_dma(denali, true);

	dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);

	clear_interrupts(denali);
	denali_setup_dma(denali, DENALI_READ);

	/* wait for operation to complete */
	irq_status = wait_for_irq(denali, irq_mask);

	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);

	denali_enable_dma(denali, false);

	memcpy(buf, denali->buf.buf, mtd->writesize);
	memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);

	return 0;
}
Example #11
0
static int sgdma_async_write(struct altera_tse_private *priv,
			     struct sgdma_descrip *desc)
{
	struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;

	if (sgdma_txbusy(priv))
		return 0;

	/* clear control and status */
	iowrite32(0, &csr->control);
	iowrite32(0x1f, &csr->status);

	dma_sync_single_for_device(priv->device, priv->txdescphys,
				   priv->sgdmadesclen, DMA_TO_DEVICE);

	iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
		  &csr->next_descrip);

	iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
		  &csr->control);

	return 1;
}
Example #12
0
static int mvneta_send(struct eth_device *edev, void *data, int len)
{
	struct mvneta_port *priv = edev->priv;
	struct txdesc *txdesc = priv->txdesc;
	int ret, error, last_desc;

	/* Flush transmit data */
	dma_sync_single_for_device((unsigned long)data, len, DMA_TO_DEVICE);

	memset(txdesc, 0, sizeof(*txdesc));
	/* Fill the Tx descriptor */
	txdesc->cmd_sts = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC;
	txdesc->buf_ptr = (u32)data;
	txdesc->byte_cnt = len;

	/* Increase the number of prepared descriptors (one), by writing
	 * to the 'NoOfWrittenDescriptors' field in the PTXSU register.
	 */
	writel(1, priv->reg + MVNETA_TXQ_UPDATE_REG(0));

	/* The controller updates the number of transmitted descriptors in
	 * the Tx port status register (PTXS).
	 */
	ret = wait_on_timeout(TRANSFER_TIMEOUT, !mvneta_pending_tx(priv));
	dma_sync_single_for_cpu((unsigned long)data, len, DMA_TO_DEVICE);
	if (ret) {
		dev_err(&edev->dev, "transmit timeout\n");
		return ret;
	}

	last_desc = readl(&txdesc->cmd_sts) & MVNETA_TXD_L_DESC;
	error = readl(&txdesc->error);
	if (last_desc && error & MVNETA_TXD_ERROR) {
		dev_err(&edev->dev, "transmit error %d\n",
			(error & TXD_ERROR_MASK) >> TXD_ERROR_SHIFT);
		return -EIO;
	}
Example #13
0
static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	int port = p->port;

	while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
		unsigned int size;
		union mgmt_port_ring_entry re;
		struct sk_buff *skb;

		/* CN56XX pass 1 needs 8 bytes of padding.  */
		size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;

		skb = netdev_alloc_skb(netdev, size);
		if (!skb)
			break;
		skb_reserve(skb, NET_IP_ALIGN);
		__skb_queue_tail(&p->rx_list, skb);

		re.d64 = 0;
		re.s.len = size;
		re.s.addr = dma_map_single(p->dev, skb->data,
					   size,
					   DMA_FROM_DEVICE);

		/* Put it in the ring.  */
		p->rx_ring[p->rx_next_fill] = re.d64;
		dma_sync_single_for_device(p->dev, p->rx_ring_handle,
					   ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
					   DMA_BIDIRECTIONAL);
		p->rx_next_fill =
			(p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
		p->rx_current_fill++;
		/* Ring the bell.  */
		cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
	}
}
Example #14
0
/**
 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
 * descriptor.
 * @dev:  device of the job ring to be used. This device should have
 *        been assigned prior by caam_jr_register().
 * @desc: points to a job descriptor that execute our request. All
 *        descriptors (and all referenced data) must be in a DMAable
 *        region, and all data references must be physical addresses
 *        accessible to CAAM (i.e. within a PAMU window granted
 *        to it).
 * @cbk:  pointer to a callback function to be invoked upon completion
 *        of this request. This has the form:
 *        callback(struct device *dev, u32 *desc, u32 stat, void *arg)
 *        where:
 *        @dev:    contains the job ring device that processed this
 *                 response.
 *        @desc:   descriptor that initiated the request, same as
 *                 "desc" being argued to caam_jr_enqueue().
 *        @status: untranslated status received from CAAM. See the
 *                 reference manual for a detailed description of
 *                 error meaning, or see the JRSTA definitions in the
 *                 register header file
 *        @areq:   optional pointer to an argument passed with the
 *                 original request
 * @areq: optional pointer to a user argument for use at callback
 *        time.
 **/
int caam_jr_enqueue(struct device *dev, u32 *desc,
		    void (*cbk)(struct device *dev, u32 *desc,
				u32 status, void *areq),
		    void *areq)
{
	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
	struct caam_jrentry_info *head_entry;
	unsigned long flags;
	int head, tail, desc_size;
	dma_addr_t desc_dma, inpbusaddr;

	desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
	desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, desc_dma)) {
		dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
		return -EIO;
	}

	dma_sync_single_for_device(dev, desc_dma, desc_size, DMA_TO_DEVICE);

	inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
	dma_sync_single_for_device(dev, inpbusaddr,
					sizeof(dma_addr_t) * JOBR_DEPTH,
					DMA_TO_DEVICE);
	spin_lock_irqsave(&jrp->inplock, flags);

	head = jrp->head;
	tail = ACCESS_ONCE(jrp->tail);

	if (!rd_reg32(&jrp->rregs->inpring_avail) ||
	    CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
		spin_unlock_irqrestore(&jrp->inplock, flags);
		dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
		return -EBUSY;
	}

	head_entry = &jrp->entinfo[head];
	head_entry->desc_addr_virt = desc;
	head_entry->desc_size = desc_size;
	head_entry->callbk = (void *)cbk;
	head_entry->cbkarg = areq;
	head_entry->desc_addr_dma = desc_dma;

	jrp->inpring[jrp->inp_ring_write_index] = desc_dma;

	dma_sync_single_for_device(dev, inpbusaddr,
					sizeof(dma_addr_t) * JOBR_DEPTH,
					DMA_TO_DEVICE);

	smp_wmb();

	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
				    (JOBR_DEPTH - 1);
	jrp->head = (head + 1) & (JOBR_DEPTH - 1);

	wmb();

	wr_reg32(&jrp->rregs->inpring_jobadd, 1);

	spin_unlock_irqrestore(&jrp->inplock, flags);

	return 0;
}
static void kbase_mmu_sync_pgd(struct device *dev,
		dma_addr_t handle, size_t size)
{
	dma_sync_single_for_device(dev, handle, size, DMA_TO_DEVICE);
}
Example #16
0
/* This is always called in spinlock protected mode, so
 * modify timeout timer is safe here */
void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
{
	struct hsu_dma_buffer *dbuf = &up->rxbuf;
	struct hsu_dma_chan *chan = up->rxc;
	struct uart_port *port = &up->port;
	struct tty_struct *tty = port->state->port.tty;
	int count;

	if (!tty)
		return;

	/*
	 * First need to know how many is already transferred,
	 * then check if its a timeout DMA irq, and return
	 * the trail bytes out, push them up and reenable the
	 * channel
	 */

	/* Timeout IRQ, need wait some time, see Errata 2 */
	if (int_sts & 0xf00)
		udelay(2);

	/* Stop the channel */
	chan_writel(chan, HSU_CH_CR, 0x0);

	count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
	if (!count) {
		/* Restart the channel before we leave */
		chan_writel(chan, HSU_CH_CR, 0x3);
		return;
	}
	del_timer(&chan->rx_timer);

	dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
			dbuf->dma_size, DMA_FROM_DEVICE);

	/*
	 * Head will only wrap around when we recycle
	 * the DMA buffer, and when that happens, we
	 * explicitly set tail to 0. So head will
	 * always be greater than tail.
	 */
	tty_insert_flip_string(tty, dbuf->buf, count);
	port->icount.rx += count;

	dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
			dbuf->dma_size, DMA_FROM_DEVICE);

	/* Reprogram the channel */
	chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
	chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
	chan_writel(chan, HSU_CH_DCR, 0x1
					 | (0x1 << 8)
					 | (0x1 << 16)
					 | (0x1 << 24)	/* timeout bit, see HSU Errata 1 */
					 );
	tty_flip_buffer_push(tty);

	chan_writel(chan, HSU_CH_CR, 0x3);
	chan->rx_timer.expires = jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ;
	add_timer(&chan->rx_timer);

}
Example #17
0
static void myri_rx(struct myri_eth *mp, struct net_device *dev)
{
	struct recvq __iomem *rq = mp->rq;
	struct recvq __iomem *rqa = mp->rqack;
	int entry		= sbus_readl(&rqa->head);
	int limit		= sbus_readl(&rqa->tail);
	int drops;

	DRX(("entry[%d] limit[%d] ", entry, limit));
	if (entry == limit)
		return;
	drops = 0;
	DRX(("\n"));
	while (entry != limit) {
		struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
		u32 csum		= sbus_readl(&rxdack->csum);
		int len			= sbus_readl(&rxdack->myri_scatters[0].len);
		int index		= sbus_readl(&rxdack->ctx);
		struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
		struct sk_buff *skb	= mp->rx_skbs[index];

		/* Ack it. */
		sbus_writel(NEXT_RX(entry), &rqa->head);

		/* Check for errors. */
		DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
		dma_sync_single_for_cpu(&mp->myri_op->dev,
					sbus_readl(&rxd->myri_scatters[0].addr),
					RX_ALLOC_SIZE, DMA_FROM_DEVICE);
		if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
			DRX(("ERROR["));
			dev->stats.rx_errors++;
			if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
				DRX(("BAD_LENGTH] "));
				dev->stats.rx_length_errors++;
			} else {
				DRX(("NO_PADDING] "));
				dev->stats.rx_frame_errors++;
			}

			/* Return it to the LANAI. */
	drop_it:
			drops++;
			DRX(("DROP "));
			dev->stats.rx_dropped++;
			dma_sync_single_for_device(&mp->myri_op->dev,
						   sbus_readl(&rxd->myri_scatters[0].addr),
						   RX_ALLOC_SIZE,
						   DMA_FROM_DEVICE);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
			goto next;
		}

		DRX(("len[%d] ", len));
		if (len > RX_COPY_THRESHOLD) {
			struct sk_buff *new_skb;
			u32 dma_addr;

			DRX(("BIGBUFF "));
			new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
			if (new_skb == NULL) {
				DRX(("skb_alloc(FAILED) "));
				goto drop_it;
			}
			dma_unmap_single(&mp->myri_op->dev,
					 sbus_readl(&rxd->myri_scatters[0].addr),
					 RX_ALLOC_SIZE,
					 DMA_FROM_DEVICE);
			mp->rx_skbs[index] = new_skb;
			new_skb->dev = dev;
			skb_put(new_skb, RX_ALLOC_SIZE);
			dma_addr = dma_map_single(&mp->myri_op->dev,
						  new_skb->data,
						  RX_ALLOC_SIZE,
						  DMA_FROM_DEVICE);
			sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);

			/* Trim the original skb for the netif. */
			DRX(("trim(%d) ", len));
			skb_trim(skb, len);
		} else {
			struct sk_buff *copy_skb = dev_alloc_skb(len);

			DRX(("SMALLBUFF "));
			if (copy_skb == NULL) {
				DRX(("dev_alloc_skb(FAILED) "));
				goto drop_it;
			}
			/* DMA sync already done above. */
			copy_skb->dev = dev;
			DRX(("resv_and_put "));
			skb_put(copy_skb, len);
			skb_copy_from_linear_data(skb, copy_skb->data, len);

			/* Reuse original ring buffer. */
			DRX(("reuse "));
			dma_sync_single_for_device(&mp->myri_op->dev,
						   sbus_readl(&rxd->myri_scatters[0].addr),
						   RX_ALLOC_SIZE,
						   DMA_FROM_DEVICE);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);

			skb = copy_skb;
		}

		/* Just like the happy meal we get checksums from this card. */
		skb->csum = csum;
		skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */

		skb->protocol = myri_type_trans(skb, dev);
		DRX(("prot[%04x] netif_rx ", skb->protocol));
		netif_rx(skb);

		dev->stats.rx_packets++;
		dev->stats.rx_bytes += len;
	next:
		DRX(("NEXT\n"));
		entry = NEXT_RX(entry);
	}
}
Example #18
0
static int greth_rx(struct net_device *dev, int limit)
{
	struct greth_private *greth;
	struct greth_bd *bdp;
	struct sk_buff *skb;
	int pkt_len;
	int bad, count;
	u32 status, dma_addr;
	unsigned long flags;

	greth = netdev_priv(dev);

	for (count = 0; count < limit; ++count) {

		bdp = greth->rx_bd_base + greth->rx_cur;
		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
		mb();
		status = greth_read_bd(&bdp->stat);

		if (unlikely(status & GRETH_BD_EN)) {
			break;
		}

		dma_addr = greth_read_bd(&bdp->addr);
		bad = 0;

		/* Check status for errors. */
		if (unlikely(status & GRETH_RXBD_STATUS)) {
			if (status & GRETH_RXBD_ERR_FT) {
				dev->stats.rx_length_errors++;
				bad = 1;
			}
			if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
				dev->stats.rx_frame_errors++;
				bad = 1;
			}
			if (status & GRETH_RXBD_ERR_CRC) {
				dev->stats.rx_crc_errors++;
				bad = 1;
			}
		}
		if (unlikely(bad)) {
			dev->stats.rx_errors++;

		} else {

			pkt_len = status & GRETH_BD_LEN;

			skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);

			if (unlikely(skb == NULL)) {

				if (net_ratelimit())
					dev_warn(&dev->dev, "low on memory - " "packet dropped\n");

				dev->stats.rx_dropped++;

			} else {
				skb_reserve(skb, NET_IP_ALIGN);

				dma_sync_single_for_cpu(greth->dev,
							dma_addr,
							pkt_len,
							DMA_FROM_DEVICE);

				if (netif_msg_pktdata(greth))
					greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);

				memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);

				skb->protocol = eth_type_trans(skb, dev);
				dev->stats.rx_bytes += pkt_len;
				dev->stats.rx_packets++;
				netif_receive_skb(skb);
			}
		}

		status = GRETH_BD_EN | GRETH_BD_IE;
		if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
			status |= GRETH_BD_WR;
		}

		wmb();
		greth_write_bd(&bdp->stat, status);

		dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);

		spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
		greth_enable_rx(greth);
		spin_unlock_irqrestore(&greth->devlock, flags);

		greth->rx_cur = NEXT_RX(greth->rx_cur);
	}

	return count;
}
Example #19
0
static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
				      struct mlx4_en_rx_desc *rx_desc,
				      struct skb_frag_struct *skb_frags,
				      struct mlx4_en_rx_alloc *page_alloc,
				      unsigned int length)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct sk_buff *skb;
	void *va;
	int used_frags;
	dma_addr_t dma;

	skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
	if (!skb) {
		en_dbg(RX_ERR, priv, "Failed allocating skb\n");
		return NULL;
	}
	skb->dev = priv->dev;
	skb_reserve(skb, NET_IP_ALIGN);
	skb->len = length;
	skb->truesize = length + sizeof(struct sk_buff);

	/* Get pointer to first fragment so we could copy the headers into the
	 * (linear part of the) skb */
	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;

	if (length <= SMALL_PACKET_SIZE) {
		/* We are copying all relevant data to the skb - temporarily
		 * synch buffers for the copy */
		dma = be64_to_cpu(rx_desc->data[0].addr);
		dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length,
					DMA_FROM_DEVICE);
		skb_copy_to_linear_data(skb, va, length);
		dma_sync_single_for_device(&mdev->pdev->dev, dma, length,
					   DMA_FROM_DEVICE);
		skb->tail += length;
	} else {

		/* Move relevant fragments to skb */
		used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
						      skb_shinfo(skb)->frags,
						      page_alloc, length);
		if (unlikely(!used_frags)) {
			kfree_skb(skb);
			return NULL;
		}
		skb_shinfo(skb)->nr_frags = used_frags;

		/* Copy headers into the skb linear buffer */
		memcpy(skb->data, va, HEADER_COPY_SIZE);
		skb->tail += HEADER_COPY_SIZE;

		/* Skip headers in first fragment */
		skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;

		/* Adjust size of first fragment */
		skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
		skb->data_len = length - HEADER_COPY_SIZE;
	}
	return skb;
}
Example #20
0
static int omap_8250_tx_dma(struct uart_8250_port *p)
{
	struct uart_8250_dma		*dma = p->dma;
	struct omap8250_priv		*priv = p->port.private_data;
	struct circ_buf			*xmit = &p->port.state->xmit;
	struct dma_async_tx_descriptor	*desc;
	unsigned int	skip_byte = 0;
	int ret;

	if (dma->tx_running)
		return 0;
	if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {

		/*
		 * Even if no data, we need to return an error for the two cases
		 * below so serial8250_tx_chars() is invoked and properly clears
		 * THRI and/or runtime suspend.
		 */
		if (dma->tx_err || p->capabilities & UART_CAP_RPM) {
			ret = -EBUSY;
			goto err;
		}
		if (p->ier & UART_IER_THRI) {
			p->ier &= ~UART_IER_THRI;
			serial_out(p, UART_IER, p->ier);
		}
		return 0;
	}

	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
	if (priv->habit & OMAP_DMA_TX_KICK) {
		u8 tx_lvl;

		/*
		 * We need to put the first byte into the FIFO in order to start
		 * the DMA transfer. For transfers smaller than four bytes we
		 * don't bother doing DMA at all. It seem not matter if there
		 * are still bytes in the FIFO from the last transfer (in case
		 * we got here directly from omap_8250_dma_tx_complete()). Bytes
		 * leaving the FIFO seem not to trigger the DMA transfer. It is
		 * really the byte that we put into the FIFO.
		 * If the FIFO is already full then we most likely got here from
		 * omap_8250_dma_tx_complete(). And this means the DMA engine
		 * just completed its work. We don't have to wait the complete
		 * 86us at 115200,8n1 but around 60us (not to mention lower
		 * baudrates). So in that case we take the interrupt and try
		 * again with an empty FIFO.
		 */
		tx_lvl = serial_in(p, UART_OMAP_TX_LVL);
		if (tx_lvl == p->tx_loadsz) {
			ret = -EBUSY;
			goto err;
		}
		if (dma->tx_size < 4) {
			ret = -EINVAL;
			goto err;
		}
		skip_byte = 1;
	}

	desc = dmaengine_prep_slave_single(dma->txchan,
			dma->tx_addr + xmit->tail + skip_byte,
			dma->tx_size - skip_byte, DMA_MEM_TO_DEV,
			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc) {
		ret = -EBUSY;
		goto err;
	}

	dma->tx_running = 1;

	desc->callback = omap_8250_dma_tx_complete;
	desc->callback_param = p;

	dma->tx_cookie = dmaengine_submit(desc);

	dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
				   UART_XMIT_SIZE, DMA_TO_DEVICE);

	dma_async_issue_pending(dma->txchan);
	if (dma->tx_err)
		dma->tx_err = 0;

	if (p->ier & UART_IER_THRI) {
		p->ier &= ~UART_IER_THRI;
		serial_out(p, UART_IER, p->ier);
	}
	if (skip_byte)
		serial_out(p, UART_TX, xmit->buf[xmit->tail]);
	return 0;
err:
	dma->tx_err = 1;
	return ret;
}
Example #21
0
static int
dwmci_cmd(struct mci_host *mci, struct mci_cmd *cmd, struct mci_data *data)
{
	struct dwmci_host *host = to_dwmci_host(mci);
	int flags = 0;
	uint32_t mask;
	uint32_t ctrl;
	uint64_t start;
	int ret;
	unsigned int num_bytes = 0;

	start = get_time_ns();
	while (1) {
		if (!(dwmci_readl(host, DWMCI_STATUS) & DWMCI_STATUS_BUSY))
			break;

		if (is_timeout(start, 100 * MSECOND)) {
			dev_dbg(host->dev, "Timeout on data busy\n");
			return -ETIMEDOUT;
		}
	}

	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);

	if (data) {
		num_bytes = data->blocks * data->blocksize;

		if (data->flags & MMC_DATA_WRITE)
			dma_sync_single_for_device((unsigned long)data->src,
						   num_bytes, DMA_TO_DEVICE);
		else
			dma_sync_single_for_device((unsigned long)data->dest,
						   num_bytes, DMA_FROM_DEVICE);

		ret = dwmci_prepare_data(host, data);
		if (ret)
			return ret;
	}

	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);

	if (data)
		flags = dwmci_set_transfer_mode(host, data);

	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
		return -EINVAL;

	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
		flags |= DWMCI_CMD_ABORT_STOP;
	else
		flags |= DWMCI_CMD_PRV_DAT_WAIT;

	if (cmd->resp_type & MMC_RSP_PRESENT) {
		flags |= DWMCI_CMD_RESP_EXP;
		if (cmd->resp_type & MMC_RSP_136)
			flags |= DWMCI_CMD_RESP_LENGTH;
	}

	if (cmd->resp_type & MMC_RSP_CRC)
		flags |= DWMCI_CMD_CHECK_CRC;

	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);

	dev_dbg(host->dev, "Sending CMD%d\n", cmd->cmdidx);

	dwmci_writel(host, DWMCI_CMD, flags);

	start = get_time_ns();
	while (1) {
		mask = dwmci_readl(host, DWMCI_RINTSTS);
		if (mask & DWMCI_INTMSK_CDONE) {
			if (!data)
				dwmci_writel(host, DWMCI_RINTSTS, mask);
			break;
		}
		if (is_timeout(start, 100 * MSECOND)) {
			dev_dbg(host->dev, "Send command timeout..\n");
			return -ETIMEDOUT;
		}
	}

	if (mask & DWMCI_INTMSK_RTO) {
		dev_dbg(host->dev, "Response Timeout..\n");
		return -ETIMEDOUT;
	} else if (mask & DWMCI_INTMSK_RE) {
		dev_dbg(host->dev, "Response Error..\n");
		return -EIO;
	}

	if (cmd->resp_type & MMC_RSP_PRESENT) {
		if (cmd->resp_type & MMC_RSP_136) {
			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
		} else {
			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
		}
	}

	if (data) {
		start = get_time_ns();
		do {
			mask = dwmci_readl(host, DWMCI_RINTSTS);

			if (mask & (DWMCI_DATA_ERR)) {
				dev_dbg(host->dev, "DATA ERROR!\n");
				return -EIO;
			}

			if (!dwmci_use_pio(host) && (mask & DWMCI_DATA_TOUT)) {
				dev_dbg(host->dev, "DATA TIMEOUT!\n");
				return -EIO;
			}

			if (is_timeout(start, SECOND * 10)) {
				dev_dbg(host->dev, "Data timeout\n");
				return -ETIMEDOUT;
			}

			if (dwmci_use_pio(host) && (mask & DWMCI_INTMSK_RXDR)) {
				dwmci_read_data_pio(host, data);
				mask = dwmci_readl(host, DWMCI_RINTSTS);
			}
			if (dwmci_use_pio(host) && (mask & DWMCI_INTMSK_TXDR)) {
				dwmci_write_data_pio(host, data);
				mask = dwmci_readl(host, DWMCI_RINTSTS);
			}
		} while (!(mask & DWMCI_INTMSK_DTO));

		dwmci_writel(host, DWMCI_RINTSTS, mask);

		if (!dwmci_use_pio(host)) {
			ctrl = dwmci_readl(host, DWMCI_CTRL);
			ctrl &= ~(DWMCI_DMA_EN);
			dwmci_writel(host, DWMCI_CTRL, ctrl);

			if (data->flags & MMC_DATA_WRITE)
				dma_sync_single_for_cpu((unsigned long)data->src,
							num_bytes, DMA_TO_DEVICE);
			else
				dma_sync_single_for_cpu((unsigned long)data->dest,
							num_bytes, DMA_FROM_DEVICE);
		}
	}

	udelay(100);

	return 0;
}
Example #22
0
/*-----------------------------------------------------------------------------
 * Function: hdcp_lib_step2
 *-----------------------------------------------------------------------------
 */
int hdcp_lib_step2(void)
{
	/* HDCP authentication steps:
	 *   1) Disable auto Ri check
	 *   2) DDC: read BStatus (nb of devices, MAX_DEV
	 */

	u8 bstatus[2];
	int status = HDCP_OK;

	DBG("hdcp_lib_step2() %u", jiffies_to_msecs(jiffies));

#ifdef _9032_AUTO_RI_
	/* Disable Auto Ri */
	hdcp_lib_auto_ri_check(false);
#endif

	/* DDC: Read Bstatus (1st byte) from Rx */
	if (hdcp_ddc_read(DDC_BSTATUS_LEN, DDC_BSTATUS_ADDR, bstatus))
		return -HDCP_DDC_ERROR;

	/* Get KSV list size */
	DBG("KSV list size: %d", bstatus[0] & DDC_BSTATUS0_DEV_COUNT);
	sha_input.byte_counter = (bstatus[0] & DDC_BSTATUS0_DEV_COUNT) * 5;

	/* Check BStatus topology errors */
	if (bstatus[0] & DDC_BSTATUS0_MAX_DEVS) {
		DBG("MAX_DEV_EXCEEDED set");
		return -HDCP_AUTH_FAILURE;
	}

	if (bstatus[1] & DDC_BSTATUS1_MAX_CASC) {
		DBG("MAX_CASCADE_EXCEEDED set");
		return -HDCP_AUTH_FAILURE;
	}

	DBG("Retrieving KSV list...");

	/* Clear all SHA input data */
	/* TODO: should be done earlier at HDCP init */
	memset(sha_input.data, 0, MAX_SHA_DATA_SIZE);

	if (hdcp.pending_disable)
		return -HDCP_CANCELLED_AUTH;

	/* DDC: read KSV list */
	if (sha_input.byte_counter) {
		if (hdcp_ddc_read(sha_input.byte_counter, DDC_KSV_FIFO_ADDR,
				  (u8 *)&sha_input.data))
			return -HDCP_DDC_ERROR;
	}

	/* Read and add Bstatus */
	if (hdcp_lib_sha_bstatus(&sha_input))
		return -HDCP_DDC_ERROR;

	if (hdcp.pending_disable)
		return -HDCP_CANCELLED_AUTH;

	/* Read V' */
	if (hdcp_ddc_read(DDC_V_LEN, DDC_V_ADDR, sha_input.vprime))
		return -HDCP_DDC_ERROR;

	if (hdcp.pending_disable)
		return -HDCP_CANCELLED_AUTH;

	/* clear sha_input values in cache*/
	dma_sync_single_for_device(NULL,
				   __pa((u32)(&sha_input)),
				   sizeof(struct hdcp_sha_in),
				   DMA_TO_DEVICE);

	status = omap4_secure_dispatcher(PPA_SERVICE_HDCP_CHECK_V,
					FLAG_START_CRITICAL,
					1, __pa((u32)&sha_input), 0, 0, 0);
	/* Wait for user space */
	if (status) {
		printk(KERN_ERR "HDCP: omap4_secure_dispatcher CHECH_V error "
				"%d\n", status);
		return -HDCP_AUTH_FAILURE;
	}

	if (status == HDCP_OK) {
		/* Re-enable Ri check */
#ifdef _9032_AUTO_RI_
		hdcp_lib_auto_ri_check(true);
#endif
	}

	return status;
}
Example #23
0
static int aes_dma_start(struct aes_hwa_ctx *ctx)
{
	int err, fast = 0, in, out;
	size_t count;
	dma_addr_t addr_in, addr_out;
	struct omap_dma_channel_params dma_params;
	struct tf_crypto_aes_operation_state *state =
		crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req));
	static size_t last_count;
	unsigned long flags;

	in = IS_ALIGNED((u32)ctx->in_sg->offset, sizeof(u32));
	out = IS_ALIGNED((u32)ctx->out_sg->offset, sizeof(u32));

	fast = in && out;

	if (fast) {
		count = min(ctx->total, sg_dma_len(ctx->in_sg));
		count = min(count, sg_dma_len(ctx->out_sg));

		if (count != ctx->total)
			return -EINVAL;

		/* Only call dma_map_sg if it has not yet been done */
		if (!(ctx->req->base.flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) {
			err = dma_map_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
			if (!err)
				return -EINVAL;

			err = dma_map_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
			if (!err) {
				dma_unmap_sg(
					NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
				return -EINVAL;
			}
		}
		ctx->req->base.flags &= ~CRYPTO_TFM_REQ_DMA_VISIBLE;

		addr_in = sg_dma_address(ctx->in_sg);
		addr_out = sg_dma_address(ctx->out_sg);

		ctx->flags |= FLAGS_FAST;
	} else {
		count = sg_copy(&ctx->in_sg, &ctx->in_offset, ctx->buf_in,
			ctx->buflen, ctx->total, 0);
		addr_in = ctx->dma_addr_in;
		addr_out = ctx->dma_addr_out;

		ctx->flags &= ~FLAGS_FAST;
	}

	ctx->total -= count;

	/* Configure HWA */
	tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);

	tf_aes_restore_registers(state, ctx->flags & FLAGS_ENCRYPT ? 1 : 0);

	OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG)
		| AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
		| AES_SYSCONFIG_DMA_REQ_IN_EN_BIT);

	ctx->dma_size = count;
	if (!fast)
		dma_sync_single_for_device(NULL, addr_in, count,
			DMA_TO_DEVICE);

	dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
	dma_params.frame_count = count / AES_BLOCK_SIZE;
	dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES;
	dma_params.src_ei = 0;
	dma_params.src_fi = 0;
	dma_params.dst_ei = 0;
	dma_params.dst_fi = 0;
	dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
	dma_params.read_prio = 0;
	dma_params.write_prio = 0;

	/* IN */
	dma_params.trigger = ctx->dma_in;
	dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC;
	dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60;
	dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
	dma_params.src_start = addr_in;
	dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;

	if (reconfigure_dma) {
		omap_set_dma_params(ctx->dma_lch_in, &dma_params);
		omap_set_dma_dest_burst_mode(ctx->dma_lch_in,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_burst_mode(ctx->dma_lch_in,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_data_pack(ctx->dma_lch_in, 1);
	} else {
		if (last_count != count)
			omap_set_dma_transfer_params(ctx->dma_lch_in,
				dma_params.data_type,
				dma_params.elem_count, dma_params.frame_count,
				dma_params.sync_mode, dma_params.trigger,
				dma_params.src_or_dst_synch);

		/* Configure input start address */
		__raw_writel(dma_params.src_start,
			omap_dma_base + (0x60 * (ctx->dma_lch_in) + 0x9c));
	}

	/* OUT */
	dma_params.trigger = ctx->dma_out;
	dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
	dma_params.src_start = AES1_REGS_HW_ADDR + 0x60;
	dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT;
	dma_params.dst_start = addr_out;
	dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;

	if (reconfigure_dma) {
		omap_set_dma_params(ctx->dma_lch_out, &dma_params);
		omap_set_dma_dest_burst_mode(ctx->dma_lch_out,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_burst_mode(ctx->dma_lch_out,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1);
		reconfigure_dma = false;
	} else {
		if (last_count != count) {
			omap_set_dma_transfer_params(ctx->dma_lch_out,
				dma_params.data_type,
				dma_params.elem_count, dma_params.frame_count,
				dma_params.sync_mode, dma_params.trigger,
				dma_params.src_or_dst_synch);
			last_count = count;
		}
		/* Configure output start address */
		__raw_writel(dma_params.dst_start,
			omap_dma_base + (0x60 * (ctx->dma_lch_out) + 0xa0));
	}

	/* Is this really needed? */
	omap_enable_dma_irq(ctx->dma_lch_in, OMAP_DMA_BLOCK_IRQ);
	omap_enable_dma_irq(ctx->dma_lch_out, OMAP_DMA_BLOCK_IRQ);

	wmb();

	omap_start_dma(ctx->dma_lch_in);
	omap_start_dma(ctx->dma_lch_out);

	spin_lock_irqsave(&ctx->lock, flags);
	if (ctx->next_req) {
		struct ablkcipher_request *req =
			ablkcipher_request_cast(ctx->next_req);

		if (!(ctx->next_req->flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) {
			err = dma_map_sg(NULL, req->src, 1, DMA_TO_DEVICE);
			if (!err) {
				/* Silently fail for now... */
				spin_unlock_irqrestore(&ctx->lock, flags);
				return 0;
			}

			err = dma_map_sg(NULL, req->dst, 1, DMA_FROM_DEVICE);
			if (!err) {
				dma_unmap_sg(NULL, req->src, 1, DMA_TO_DEVICE);
				/* Silently fail for now... */
				spin_unlock_irqrestore(&ctx->lock, flags);
				return 0;
			}

			ctx->next_req->flags |= CRYPTO_TFM_REQ_DMA_VISIBLE;
			ctx->next_req = NULL;
		}
	}

	if (ctx->backlog) {
		ctx->backlog->complete(ctx->backlog, -EINPROGRESS);
		ctx->backlog = NULL;
	}
	spin_unlock_irqrestore(&ctx->lock, flags);

	return 0;
}
Example #24
0
/*
 * extract packet from rx queue
 */
static int bcm_enet_receive_queue(struct net_device *dev, int budget)
{
	struct bcm_enet_priv *priv;
	struct device *kdev;
	int processed;

	priv = netdev_priv(dev);
	kdev = &priv->pdev->dev;
	processed = 0;

	/* don't scan ring further than number of refilled
	 * descriptor */
	if (budget > priv->rx_desc_count)
		budget = priv->rx_desc_count;

	do {
		struct bcm_enet_desc *desc;
		struct sk_buff *skb;
		int desc_idx;
		u32 len_stat;
		unsigned int len;

		desc_idx = priv->rx_curr_desc;
		desc = &priv->rx_desc_cpu[desc_idx];

		/* make sure we actually read the descriptor status at
		 * each loop */
		rmb();

		len_stat = desc->len_stat;

		/* break if dma ownership belongs to hw */
		if (len_stat & DMADESC_OWNER_MASK)
			break;

		processed++;
		priv->rx_curr_desc++;
		if (priv->rx_curr_desc == priv->rx_ring_size)
			priv->rx_curr_desc = 0;
		priv->rx_desc_count--;

		/* if the packet does not have start of packet _and_
		 * end of packet flag set, then just recycle it */
		if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
			priv->stats.rx_dropped++;
			continue;
		}

		/* recycle packet if it's marked as bad */
		if (unlikely(len_stat & DMADESC_ERR_MASK)) {
			priv->stats.rx_errors++;

			if (len_stat & DMADESC_OVSIZE_MASK)
				priv->stats.rx_length_errors++;
			if (len_stat & DMADESC_CRC_MASK)
				priv->stats.rx_crc_errors++;
			if (len_stat & DMADESC_UNDER_MASK)
				priv->stats.rx_frame_errors++;
			if (len_stat & DMADESC_OV_MASK)
				priv->stats.rx_fifo_errors++;
			continue;
		}

		/* valid packet */
		skb = priv->rx_skb[desc_idx];
		len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
		/* don't include FCS */
		len -= 4;

		if (len < copybreak) {
			struct sk_buff *nskb;

			nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
			if (!nskb) {
				/* forget packet, just rearm desc */
				priv->stats.rx_dropped++;
				continue;
			}

			/* since we're copying the data, we can align
			 * them properly */
			skb_reserve(nskb, NET_IP_ALIGN);
			dma_sync_single_for_cpu(kdev, desc->address,
						len, DMA_FROM_DEVICE);
			memcpy(nskb->data, skb->data, len);
			dma_sync_single_for_device(kdev, desc->address,
						   len, DMA_FROM_DEVICE);
			skb = nskb;
		} else {
			dma_unmap_single(&priv->pdev->dev, desc->address,
					 priv->rx_skb_size, DMA_FROM_DEVICE);
			priv->rx_skb[desc_idx] = NULL;
		}

		skb_put(skb, len);
		skb->dev = dev;
		skb->protocol = eth_type_trans(skb, dev);
		priv->stats.rx_packets++;
		priv->stats.rx_bytes += len;
		dev->last_rx = jiffies;
		netif_receive_skb(skb);

	} while (--budget > 0);

	if (processed || !priv->rx_desc_count) {
		bcm_enet_refill_rx(dev);

		/* kick rx dma */
		enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
				ENETDMA_CHANCFG_REG(priv->rx_chan));
	}

	return processed;
}
int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
{
	int ret;
	u32 num_words;
	struct qman_fq *new_fq, *old_fq;
	struct device *qidev = drv_ctx->qidev;

	num_words = desc_len(sh_desc);
	if (num_words > MAX_SDLEN) {
		dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
		return -EINVAL;
	}

	/* Note down older req FQ */
	old_fq = drv_ctx->req_fq;

	/* Create a new req FQ in parked state */
	new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
				    drv_ctx->context_a, 0);
	if (unlikely(IS_ERR_OR_NULL(new_fq))) {
		dev_err(qidev, "FQ allocation for shdesc update failed\n");
		return PTR_ERR(new_fq);
	}

	/* Hook up new FQ to context so that new requests keep queuing */
	drv_ctx->req_fq = new_fq;

	/* Empty and remove the older FQ */
	ret = empty_caam_fq(old_fq);
	if (ret) {
		dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);

		/* We can revert to older FQ */
		drv_ctx->req_fq = old_fq;

		if (kill_fq(qidev, new_fq))
			dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
				 new_fq->fqid);

		return ret;
	}

	/*
	 * Re-initialise pre-header. Set RSLS and SDLEN.
	 * Update the shared descriptor for driver context.
	 */
	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
					   num_words);
	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
	dma_sync_single_for_device(qidev, drv_ctx->context_a,
				   sizeof(drv_ctx->sh_desc) +
				   sizeof(drv_ctx->prehdr),
				   DMA_BIDIRECTIONAL);

	/* Put the new FQ in scheduled state */
	ret = qman_schedule_fq(new_fq);
	if (ret) {
		dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);

		/*
		 * We can kill new FQ and revert to old FQ.
		 * Since the desc is already modified, it is success case
		 */

		drv_ctx->req_fq = old_fq;

		if (kill_fq(qidev, new_fq))
			dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
				 new_fq->fqid);
	} else if (kill_fq(qidev, old_fq)) {
		dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid);
	}

	return 0;
}
Example #26
0
/*
 * This function handles receipt of a packet from the network
 */
static int davinci_emac_recv(struct eth_device *edev)
{
	struct davinci_emac_priv *priv = edev->priv;
	void __iomem *rx_curr_desc, *curr_desc, *tail_desc;
	unsigned char *pkt;
	int status, len, ret = -1;

	dev_dbg(priv->dev, "+ emac_recv\n");

	rx_curr_desc = priv->emac_rx_active_head;
	status = readl(rx_curr_desc + EMAC_DESC_PKT_FLAG_LEN);
	if (status & EMAC_CPPI_OWNERSHIP_BIT) {
		ret = 0;
		goto out;
	}

	if (status & EMAC_CPPI_RX_ERROR_FRAME) {
		/* Error in packet - discard it and requeue desc */
		dev_warn(priv->dev, "WARN: emac_rcv_pkt: Error in packet\n");
	} else {
		pkt = (unsigned char *)readl(rx_curr_desc + EMAC_DESC_BUFFER);
		len = readl(rx_curr_desc + EMAC_DESC_BUFF_OFF_LEN) & 0xffff;
		dev_dbg(priv->dev, "| emac_recv got packet (length %i)\n", len);
		dma_sync_single_for_cpu((unsigned long)pkt, len, DMA_FROM_DEVICE);
		net_receive(edev, pkt, len);
		dma_sync_single_for_device((unsigned long)pkt, len, DMA_FROM_DEVICE);
		ret = len;
	}

	/* Ack received packet descriptor */
	writel(BD_TO_HW(rx_curr_desc), priv->adap_emac + EMAC_RX0CP);
	curr_desc = rx_curr_desc;
	priv->emac_rx_active_head = HW_TO_BD(readl(rx_curr_desc + EMAC_DESC_NEXT));

	if (status & EMAC_CPPI_EOQ_BIT) {
		if (priv->emac_rx_active_head) {
			writel(BD_TO_HW(priv->emac_rx_active_head),
				priv->adap_emac + EMAC_RX0HDP);
		} else {
			priv->emac_rx_queue_active = 0;
			dev_info(priv->dev, "INFO:emac_rcv_packet: RX Queue not active\n");
		}
	}

	/* Recycle RX descriptor */
	writel(EMAC_MAX_ETHERNET_PKT_SIZE, rx_curr_desc + EMAC_DESC_BUFF_OFF_LEN);
	writel(EMAC_CPPI_OWNERSHIP_BIT, rx_curr_desc + EMAC_DESC_PKT_FLAG_LEN);
	writel(0, rx_curr_desc + EMAC_DESC_NEXT);

	if (priv->emac_rx_active_head == 0) {
		dev_info(priv->dev, "INFO: emac_rcv_pkt: active queue head = 0\n");
		priv->emac_rx_active_head = curr_desc;
		priv->emac_rx_active_tail = curr_desc;
		if (priv->emac_rx_queue_active != 0) {
			writel(BD_TO_HW(priv->emac_rx_active_head), priv->adap_emac + EMAC_RX0HDP);
			dev_info(priv->dev, "INFO: emac_rcv_pkt: active queue head = 0, HDP fired\n");
			priv->emac_rx_queue_active = 1;
		}
	} else {
		tail_desc = priv->emac_rx_active_tail;
		priv->emac_rx_active_tail = curr_desc;
		writel(BD_TO_HW(curr_desc), tail_desc + EMAC_DESC_NEXT);
		status = readl(tail_desc + EMAC_DESC_PKT_FLAG_LEN);
		if (status & EMAC_CPPI_EOQ_BIT) {
			writel(BD_TO_HW(curr_desc), priv->adap_emac + EMAC_RX0HDP);
			status &= ~EMAC_CPPI_EOQ_BIT;
			writel(status, tail_desc + EMAC_DESC_PKT_FLAG_LEN);
		}
	}

out:
	dev_dbg(priv->dev, "- emac_recv\n");

	return ret;
}
Example #27
0
/* Issue command and read its response.
 * Returns zero on success, negative for error.
 *
 * On error, caller must cope with mmc core retry mechanism.  That
 * means immediate low-level resubmit, which affects the bus lock...
 */
static int
mmc_spi_command_send(struct mmc_spi_host *host,
		struct mmc_request *mrq,
		struct mmc_command *cmd, int cs_on)
{
	struct scratch		*data = host->data;
	u8			*cp = data->status;
	u32			arg = cmd->arg;
	int			status;
	struct spi_transfer	*t;

	/* We can handle most commands (except block reads) in one full
	 * duplex I/O operation before either starting the next transfer
	 * (data block or command) or else deselecting the card.
	 *
	 * First, write 7 bytes:
	 *  - an all-ones byte to ensure the card is ready
	 *  - opcode byte (plus start and transmission bits)
	 *  - four bytes of big-endian argument
	 *  - crc7 (plus end bit) ... always computed, it's cheap
	 *
	 * We init the whole buffer to all-ones, which is what we need
	 * to write while we're reading (later) response data.
	 */
	memset(cp++, 0xff, sizeof(data->status));

	*cp++ = 0x40 | cmd->opcode;
	*cp++ = (u8)(arg >> 24);
	*cp++ = (u8)(arg >> 16);
	*cp++ = (u8)(arg >> 8);
	*cp++ = (u8)arg;
	*cp++ = (crc7(0, &data->status[1], 5) << 1) | 0x01;

	/* Then, read up to 13 bytes (while writing all-ones):
	 *  - N(CR) (== 1..8) bytes of all-ones
	 *  - status byte (for all response types)
	 *  - the rest of the response, either:
	 *      + nothing, for R1 or R1B responses
	 *	+ second status byte, for R2 responses
	 *	+ four data bytes, for R3 and R7 responses
	 *
	 * Finally, read some more bytes ... in the nice cases we know in
	 * advance how many, and reading 1 more is always OK:
	 *  - N(EC) (== 0..N) bytes of all-ones, before deselect/finish
	 *  - N(RC) (== 1..N) bytes of all-ones, before next command
	 *  - N(WR) (== 1..N) bytes of all-ones, before data write
	 *
	 * So in those cases one full duplex I/O of at most 21 bytes will
	 * handle the whole command, leaving the card ready to receive a
	 * data block or new command.  We do that whenever we can, shaving
	 * CPU and IRQ costs (especially when using DMA or FIFOs).
	 *
	 * There are two other cases, where it's not generally practical
	 * to rely on a single I/O:
	 *
	 *  - R1B responses need at least N(EC) bytes of all-zeroes.
	 *
	 *    In this case we can *try* to fit it into one I/O, then
	 *    maybe read more data later.
	 *
	 *  - Data block reads are more troublesome, since a variable
	 *    number of padding bytes precede the token and data.
	 *      + N(CX) (== 0..8) bytes of all-ones, before CSD or CID
	 *      + N(AC) (== 1..many) bytes of all-ones
	 *
	 *    In this case we currently only have minimal speedups here:
	 *    when N(CR) == 1 we can avoid I/O in response_get().
	 */
	if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
		cp += 2;	/* min(N(CR)) + status */
		/* R1 */
	} else {
		cp += 10;	/* max(N(CR)) + status + min(N(RC),N(WR)) */
		if (cmd->flags & MMC_RSP_SPI_S2)	/* R2/R5 */
			cp++;
		else if (cmd->flags & MMC_RSP_SPI_B4)	/* R3/R4/R7 */
			cp += 4;
		else if (cmd->flags & MMC_RSP_BUSY)	/* R1B */
			cp = data->status + sizeof(data->status);
		/* else:  R1 (most commands) */
	}

	dev_dbg(&host->spi->dev, "  mmc_spi: CMD%d, resp %s\n",
		cmd->opcode, maptype(cmd));

	/* send command, leaving chipselect active */
	spi_message_init(&host->m);

	t = &host->t;
	memset(t, 0, sizeof(*t));
	t->tx_buf = t->rx_buf = data->status;
	t->tx_dma = t->rx_dma = host->data_dma;
	t->len = cp - data->status;
	t->cs_change = 1;
	spi_message_add_tail(t, &host->m);

	if (host->dma_dev) {
		host->m.is_dma_mapped = 1;
		dma_sync_single_for_device(host->dma_dev,
				host->data_dma, sizeof(*host->data),
				DMA_BIDIRECTIONAL);
	}
	status = spi_sync(host->spi, &host->m);

	if (host->dma_dev)
		dma_sync_single_for_cpu(host->dma_dev,
				host->data_dma, sizeof(*host->data),
				DMA_BIDIRECTIONAL);
	if (status < 0) {
		dev_dbg(&host->spi->dev, "  ... write returned %d\n", status);
		cmd->error = status;
		return status;
	}

	/* after no-data commands and STOP_TRANSMISSION, chipselect off */
	return mmc_spi_response_get(host, cmd, cs_on);
}
Example #28
0
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
			    uint8_t *buf, int oob_required, int page)
{
	unsigned int max_bitflips = 0;
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	dma_addr_t addr = (unsigned long)denali->buf.buf;
	size_t size = denali->mtd.writesize + denali->mtd.oobsize;

	uint32_t irq_status;
	uint32_t irq_mask = denali->have_hw_ecc_fixup ?
		(INTR_STATUS__DMA_CMD_COMP) :
		(INTR_STATUS__ECC_TRANSACTION_DONE | INTR_STATUS__ECC_ERR);
	bool check_erased_page = false;

	if (page != denali->page) {
		dev_err(denali->dev,
			"IN %s: page %d is not equal to denali->page %d",
			__func__, page, denali->page);
		BUG();
	}

	setup_ecc_for_xfer(denali, true, false);

	denali_enable_dma(denali, true);
	dma_sync_single_for_device(addr, size, DMA_FROM_DEVICE);

	clear_interrupts(denali);
	denali_setup_dma(denali, DENALI_READ);

	/* wait for operation to complete */
	irq_status = wait_for_irq(denali, irq_mask);

	dma_sync_single_for_cpu(addr, size, DMA_FROM_DEVICE);

	memcpy(buf, denali->buf.buf, mtd->writesize);

	check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
	denali_enable_dma(denali, false);

	if (check_erased_page) {
		if (denali->have_hw_ecc_fixup) {
			/* When we have hw ecc fixup, don't check oob.
			 * That code below looks jacked up anyway.  I mean,
			 * look at it, wtf? */
			if (!is_erased(buf, denali->mtd.writesize))
				denali->mtd.ecc_stats.failed++;
		} else {
			read_oob_data(&denali->mtd, chip->oob_poi,
				denali->page);

			/* check ECC failures that may have occurred on
			 * erased pages */
			if (check_erased_page) {
				if (!is_erased(buf, denali->mtd.writesize))
					denali->mtd.ecc_stats.failed++;
				if (!is_erased(buf, denali->mtd.oobsize))
					denali->mtd.ecc_stats.failed++;
			}
		}
	}
	return max_bitflips;
}
Example #29
0
/**
 * This function is used to submit an I/O Request to an EP.
 *
 *	- When the request completes the request's completion callback
 *	  is called to return the request to the driver.
 *	- An EP, except control EPs, may have multiple requests
 *	  pending.
 *	- Once submitted the request cannot be examined or modified.
 *	- Each request is turned into one or more packets.
 *	- A BULK EP can queue any amount of data; the transfer is
 *	  packetized.
 *	- Zero length Packets are specified with the request 'zero'
 *	  flag.
 */
static int dwc_otg_pcd_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
				gfp_t _gfp_flags)
{
	int prevented = 0;
	dwc_otg_pcd_request_t * req;
	dwc_otg_pcd_ep_t * ep;
	dwc_otg_pcd_t * pcd;
	unsigned long flags = 0;

	DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p,%d)\n", __func__, _ep, _req,
		      _gfp_flags);
	req = container_of(_req, dwc_otg_pcd_request_t, req);
	if (!_req || !_req->complete || !_req->buf
	     || !list_empty(&req->queue)) {
		DWC_WARN("%s, bad params\n", __func__);
		return -EINVAL;
	}
	ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
	if (!_ep || (!ep->desc && ep->dwc_ep.num != 0)) {
		DWC_WARN("%s, bad ep\n", __func__);
		return -EINVAL;
	}
	pcd = ep->pcd;
	if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
		DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed);
		DWC_WARN("%s, bogus device state\n", __func__);
		return -ESHUTDOWN;
	}
	DWC_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n", _ep->name,
		       _req, _req->length, _req->buf);
	if (!GET_CORE_IF(pcd)->core_params->opt) {
		if (ep->dwc_ep.num != 0) {
			DWC_ERROR("%s queue req %p, len %d buf %p\n",
				   _ep->name, _req, _req->length, _req->buf);
		}
	}
	SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags);

#if defined(DEBUG) & defined(VERBOSE)
    dump_msg(_req->buf, _req->length);

#endif	/*  */
    _req->status = -EINPROGRESS;
	_req->actual = 0;

	/*
	 * For EP0 IN without premature status, zlp is required?
	 */
	if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
		DWC_DEBUGPL(DBG_PCDV, "%s-OUT ZLP\n", _ep->name);
	    //_req->zero = 1;
	}

	/* map virtual address to hardware */
	if (_req->dma == DMA_ADDR_INVALID) {
		_req->dma = dma_map_single(ep->pcd->gadget.dev.parent,
					  _req->buf,
					  _req->length,
					  ep->dwc_ep.is_in
					  ? DMA_TO_DEVICE :
					  DMA_FROM_DEVICE);
		req->mapped = 1;
	} else {
		dma_sync_single_for_device(ep->pcd->gadget.dev.parent,
					   _req->dma, _req->length,
					   ep->dwc_ep.is_in
					   ? DMA_TO_DEVICE :
					   DMA_FROM_DEVICE);
		req->mapped = 0;
	}

	/* Start the transfer */
	if (list_empty(&ep->queue) && !ep->stopped) {
		/* EP0 Transfer? */
		if (ep->dwc_ep.num == 0) {
			switch (pcd->ep0state) {
			case EP0_IN_DATA_PHASE:
				DWC_DEBUGPL(DBG_PCD, "%s ep0: EP0_IN_DATA_PHASE\n",
					     __func__);
				break;
			case EP0_OUT_DATA_PHASE:
				DWC_DEBUGPL(DBG_PCD, "%s ep0: EP0_OUT_DATA_PHASE\n",
					     __func__);
				if (pcd->request_config) {
					/* Complete STATUS PHASE */
					ep->dwc_ep.is_in = 1;
					pcd->ep0state = EP0_STATUS;
				}
				break;
			default:
				DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
					     pcd->ep0state);
				SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
				return -EL2HLT;
			}
			ep->dwc_ep.dma_addr = _req->dma;
			ep->dwc_ep.start_xfer_buff = _req->buf;
			ep->dwc_ep.xfer_buff = _req->buf;
			ep->dwc_ep.xfer_len = _req->length;
			ep->dwc_ep.xfer_count = 0;
			ep->dwc_ep.sent_zlp = 0;
			ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
			dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
						    &ep->dwc_ep);
		} else {
			/* Setup and start the Transfer */
			ep->dwc_ep.dma_addr = _req->dma;
			ep->dwc_ep.start_xfer_buff = _req->buf;
			ep->dwc_ep.xfer_buff = _req->buf;
			ep->dwc_ep.xfer_len = _req->length;
			ep->dwc_ep.xfer_count = 0;
			ep->dwc_ep.sent_zlp = 0;
			ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
			dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
						   &ep->dwc_ep);
		}
	}
	if ((req != 0) || prevented) {
		++pcd->request_pending;
		list_add_tail(&req->queue, &ep->queue);
		if (ep->dwc_ep.is_in && ep->stopped
		     && !(GET_CORE_IF(pcd)->dma_enable)) {
			/** @todo NGS Create a function for this. */
			diepmsk_data_t diepmsk = {.d32 = 0};
			diepmsk.b.intktxfemp = 1;
			dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepmsk, 0,
					  diepmsk.d32);
		}
	}
Example #30
0
File: bgmac.c Project: 7799/linux
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
			     int weight)
{
	u32 end_slot;
	int handled = 0;

	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
	end_slot &= BGMAC_DMA_RX_STATDPTR;
	end_slot -= ring->index_base;
	end_slot &= BGMAC_DMA_RX_STATDPTR;
	end_slot /= sizeof(struct bgmac_dma_desc);

	ring->end = end_slot;

	while (ring->start != ring->end) {
		struct device *dma_dev = bgmac->core->dma_dev;
		struct bgmac_slot_info *slot = &ring->slots[ring->start];
		struct sk_buff *skb = slot->skb;
		struct bgmac_rx_header *rx;
		u16 len, flags;

		/* Unmap buffer to make it accessible to the CPU */
		dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
					BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);

		/* Get info from the header */
		rx = (struct bgmac_rx_header *)skb->data;
		len = le16_to_cpu(rx->len);
		flags = le16_to_cpu(rx->flags);

		do {
			dma_addr_t old_dma_addr = slot->dma_addr;
			int err;

			/* Check for poison and drop or pass the packet */
			if (len == 0xdead && flags == 0xbeef) {
				bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
					  ring->start);
				dma_sync_single_for_device(dma_dev,
							   slot->dma_addr,
							   BGMAC_RX_BUF_SIZE,
							   DMA_FROM_DEVICE);
				break;
			}

			/* Omit CRC. */
			len -= ETH_FCS_LEN;

			/* Prepare new skb as replacement */
			err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
			if (err) {
				/* Poison the old skb */
				rx->len = cpu_to_le16(0xdead);
				rx->flags = cpu_to_le16(0xbeef);

				dma_sync_single_for_device(dma_dev,
							   slot->dma_addr,
							   BGMAC_RX_BUF_SIZE,
							   DMA_FROM_DEVICE);
				break;
			}
			bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);

			/* Unmap old skb, we'll pass it to the netfif */
			dma_unmap_single(dma_dev, old_dma_addr,
					 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);

			skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
			skb_pull(skb, BGMAC_RX_FRAME_OFFSET);

			skb_checksum_none_assert(skb);
			skb->protocol = eth_type_trans(skb, bgmac->net_dev);
			netif_receive_skb(skb);
			handled++;
		} while (0);

		if (++ring->start >= BGMAC_RX_RING_SLOTS)
			ring->start = 0;

		if (handled >= weight) /* Should never be greater */
			break;
	}

	return handled;
}