示例#1
0
文件: macb.c 项目: chendian/uboot
static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx)
{
	if (rx)
		invalidate_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
			MACB_RX_DMA_DESC_SIZE);
	else
		invalidate_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
			MACB_TX_DMA_DESC_SIZE);
}
示例#2
0
/*
 * Routines to handle (flush/invalidate) the dcache for the QH and qTD
 * structures and data buffers. This is needed on platforms using this
 * EHCI support with dcache enabled.
 */
static void flush_invalidate(u32 addr, int size, int flush)
{
	if (flush)
		flush_dcache_range(addr, addr + size);
	else
		invalidate_dcache_range(addr, addr + size);
}
示例#3
0
文件: rtl8169.c 项目: OpenNoah/u-boot
static void rtl_inval_buffer(void *buf, size_t size)
{
	unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
	unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN);

	invalidate_dcache_range(start, end);
}
示例#4
0
static int _dw_eth_send(struct dw_eth_dev *priv, void *packet, int length)
{
	struct eth_dma_regs *dma_p = priv->dma_regs_p;
	u32 desc_num = priv->tx_currdescnum;
	struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num];
	ulong desc_start = (ulong)desc_p;
	ulong desc_end = desc_start +
		roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
	ulong data_start = desc_p->dmamac_addr;
	ulong data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
	/*
	 * Strictly we only need to invalidate the "txrx_status" field
	 * for the following check, but on some platforms we cannot
	 * invalidate only 4 bytes, so we flush the entire descriptor,
	 * which is 16 bytes in total. This is safe because the
	 * individual descriptors in the array are each aligned to
	 * ARCH_DMA_MINALIGN and padded appropriately.
	 */
	invalidate_dcache_range(desc_start, desc_end);

	/* Check if the descriptor is owned by CPU */
	if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) {
		printf("CPU not owner of tx frame\n");
		return -EPERM;
	}

	memcpy((void *)data_start, packet, length);

	/* Flush data to be sent */
	flush_dcache_range(data_start, data_end);

#if defined(CONFIG_DW_ALTDESCRIPTOR)
	desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST;
	desc_p->dmamac_cntl |= (length << DESC_TXCTRL_SIZE1SHFT) &
			       DESC_TXCTRL_SIZE1MASK;

	desc_p->txrx_status &= ~(DESC_TXSTS_MSK);
	desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA;
#else
	desc_p->dmamac_cntl |= ((length << DESC_TXCTRL_SIZE1SHFT) &
			       DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST |
			       DESC_TXCTRL_TXFIRST;

	desc_p->txrx_status = DESC_TXSTS_OWNBYDMA;
#endif

	/* Flush modified buffer descriptor */
	flush_dcache_range(desc_start, desc_end);

	/* Test the wrap-around condition. */
	if (++desc_num >= CONFIG_TX_DESCR_NUM)
		desc_num = 0;

	priv->tx_currdescnum = desc_num;

	/* Start the transmission */
	writel(POLL_DATA, &dma_p->txpolldemand);

	return 0;
}
示例#5
0
/*
 * make an area consistent.
 */
void consistent_sync(void *vaddr, size_t size, int direction)
{
	unsigned long start;
	unsigned long end;

	start = (unsigned long)vaddr;

	/* Convert start address back down to unshadowed memory region */
#ifdef CONFIG_XILINX_UNCACHED_SHADOW
	start &= ~UNCACHED_SHADOW_MASK;
#endif
	end = start + size;

	switch (direction) {
	case PCI_DMA_NONE:
		BUG();
	case PCI_DMA_FROMDEVICE:	/* invalidate only */
		invalidate_dcache_range(start, end);
		break;
	case PCI_DMA_TODEVICE:		/* writeback only */
		flush_dcache_range(start, end);
		break;
	case PCI_DMA_BIDIRECTIONAL:	/* writeback and invalidate */
		flush_dcache_range(start, end);
		break;
	}
}
示例#6
0
static int altera_tse_free_pkt(struct udevice *dev, uchar *packet,
			       int length)
{
	struct altera_tse_priv *priv = dev_get_priv(dev);
	struct alt_sgdma_descriptor *rx_desc = priv->rx_desc;
	unsigned long rx_buf = (unsigned long)priv->rx_buf;

	alt_sgdma_wait_transfer(priv->sgdma_rx);
	invalidate_dcache_range(rx_buf, rx_buf + PKTSIZE_ALIGN);
	alt_sgdma_construct_descriptor(
		rx_desc,
		rx_desc + 1,
		NULL,	/* read addr */
		priv->rx_buf, /* write addr */
		0,	/* length or EOP */
		0,	/* gen eop */
		0,	/* read fixed */
		0	/* write fixed or sop */
		);

	/* setup the sgdma */
	alt_sgdma_start_transfer(priv->sgdma_rx, rx_desc);
	debug("recv setup\n");

	return 0;
}
示例#7
0
static void mxc_udc_read_setup_pkt(struct usb_device_request *s)
{
	u32 temp;
	temp = readl(USB_ENDPTSETUPSTAT);
	writel(temp, USB_ENDPTSETUPSTAT);
	DBG("setup stat %x\n", temp);
	do {
		temp = readl(USB_USBCMD);
		temp |= USB_CMD_SUTW;
		writel(temp, USB_USBCMD);

		invalidate_dcache_range((unsigned long)(mxc_udc.mxc_ep[0].ep_qh),
			CACHE_ALIGNED_END((mxc_udc.mxc_ep[0].ep_qh),
				sizeof(struct ep_queue_head)));

		memcpy((void *)s,
			(void *)mxc_udc.mxc_ep[0].ep_qh->setup_data, 8);
	} while (!(readl(USB_USBCMD) & USB_CMD_SUTW));

	DBG("handle_setup s.type=%x req=%x len=%x\n",
		s->bmRequestType, s->bRequest, s->wLength);
	temp = readl(USB_USBCMD);
	temp &= ~USB_CMD_SUTW;
	writel(temp, USB_USBCMD);

	DBG("[SETUP] type=%x req=%x val=%x index=%x len=%x\n",
		s->bmRequestType, s->bRequest,
		s->wValue, s->wIndex,
		s->wLength);
}
/**
 * invalidates the address passed till the length
 *
 * @param addr	pointer to memory region to be invalidates
 * @param len	the length of the cache line to be invalidated
 * @return none
 */
void xhci_inval_cache(uint32_t addr, u32 len)
{
	BUG_ON((void *)addr == NULL || len == 0);

	invalidate_dcache_range(addr & ~(CACHELINE_SIZE - 1),
				ALIGN(addr + len, CACHELINE_SIZE));
}
示例#9
0
static void rtl_inval_tx_desc(struct TxDesc *desc)
{
	unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
	unsigned long end = ALIGN(start + sizeof(*desc), ARCH_DMA_MINALIGN);

	invalidate_dcache_range(start, end);
}
示例#10
0
static inline void s3c_ep0_complete_out(void)
{
	u32 ep_ctrl;

	debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
		__func__, readl(&reg->in_endp[EP0_CON].diepctl));
	debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
		__func__, readl(&reg->out_endp[EP0_CON].doepctl));

	debug_cond(DEBUG_IN_EP,
		"%s : Prepare Complete Out packet.\n", __func__);

	invalidate_dcache_range((unsigned long) usb_ctrl_dma_addr,
				(unsigned long) usb_ctrl_dma_addr
				+ DMA_BUFFER_SIZE);

	writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest),
	       &reg->out_endp[EP0_CON].doeptsiz);
	writel(usb_ctrl_dma_addr, &reg->out_endp[EP0_CON].doepdma);

	ep_ctrl = readl(&reg->out_endp[EP0_CON].doepctl);
	writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
	       &reg->out_endp[EP0_CON].doepctl);

	debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
		__func__, readl(&reg->in_endp[EP0_CON].diepctl));
	debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
		__func__, readl(&reg->out_endp[EP0_CON].doepctl));

}
示例#11
0
static int dma_init(void) {
    int ret;

    /* Request DMA channel */
    ret = request_dma(CH_PPI, DRIVER_NAME);
    if(ret < 0) {
        printk(KERN_WARNING DRIVER_NAME ": Could not allocate DMA channel\n");
        return ret;
    }

    /* Disable channel while it is being configured */
    disable_dma(CH_PPI);

    /* Allocate buffer space for the DMA engine to use */
    dma_buffer = __get_dma_pages(GFP_KERNEL, page_alloc_order(BUFFER_SIZE * BUFFER_COUNT));
    if(dma_buffer == 0) {
        printk(KERN_WARNING DRIVER_NAME ": Could not allocate dma_pages\n");
        free_dma(CH_PPI);
        return -ENOMEM;
    }

    /* Invalid caching on the DMA buffer */
    invalidate_dcache_range(dma_buffer, dma_buffer + (BUFFER_SIZE * BUFFER_COUNT));

    /* Set DMA configuration */
    set_dma_start_addr(CH_PPI, dma_buffer);
    set_dma_config(CH_PPI, (DMAFLOW_AUTO | WNR | RESTART | DI_EN | WDSIZE_16 | DMA2D | DI_SEL));
    set_dma_x_count(CH_PPI, SAMPLES_PER_BUFFER * CHANNELS);
    set_dma_x_modify(CH_PPI, SAMPLE_SIZE);
    set_dma_y_count(CH_PPI, BUFFER_COUNT);
    set_dma_y_modify(CH_PPI, SAMPLE_SIZE);
    set_dma_callback(CH_PPI, &buffer_full_handler, NULL);

    return 0;
}
示例#12
0
static int mxsmmc_send_cmd_dma(struct mxsmmc_priv *priv, struct mmc_data *data)
{
	uint32_t data_count = data->blocksize * data->blocks;
	uint32_t cache_data_count;
	int dmach;
	struct mxs_dma_desc *desc = priv->desc;

	memset(desc, 0, sizeof(struct mxs_dma_desc));
	desc->address = (dma_addr_t)desc;

	if (data_count % ARCH_DMA_MINALIGN)
		cache_data_count = roundup(data_count, ARCH_DMA_MINALIGN);
	else
		cache_data_count = data_count;

	if (data->flags & MMC_DATA_READ) {
		priv->desc->cmd.data = MXS_DMA_DESC_COMMAND_DMA_WRITE;
		priv->desc->cmd.address = (dma_addr_t)data->dest;
	} else {
		priv->desc->cmd.data = MXS_DMA_DESC_COMMAND_DMA_READ;
		priv->desc->cmd.address = (dma_addr_t)data->src;

		/* Flush data to DRAM so DMA can pick them up */
		flush_dcache_range((uint32_t)priv->desc->cmd.address,
			(uint32_t)(priv->desc->cmd.address + cache_data_count));
	}

	/* Invalidate the area, so no writeback into the RAM races with DMA */
	invalidate_dcache_range((uint32_t)priv->desc->cmd.address,
			(uint32_t)(priv->desc->cmd.address + cache_data_count));

	priv->desc->cmd.data |= MXS_DMA_DESC_IRQ | MXS_DMA_DESC_DEC_SEM |
				(data_count << MXS_DMA_DESC_BYTES_OFFSET);

	dmach = MXS_DMA_CHANNEL_AHB_APBH_SSP0 + priv->id;
	mxs_dma_desc_append(dmach, priv->desc);
	if (mxs_dma_go(dmach))
		return COMM_ERR;

	/* The data arrived into DRAM, invalidate cache over them */
	if (data->flags & MMC_DATA_READ) {
		invalidate_dcache_range((uint32_t)priv->desc->cmd.address,
			(uint32_t)(priv->desc->cmd.address + cache_data_count));
	}

	return 0;
}
示例#13
0
static void complete_rx(struct s3c_udc *dev, u8 ep_num)
{
	struct s3c_ep *ep = &dev->ep[ep_num];
	struct s3c_request *req = NULL;
	u32 ep_tsr = 0, xfer_size = 0, is_short = 0;
	u32 *p = the_controller->dma_buf[ep_index(ep)+1];

	if (list_empty(&ep->queue)) {
		debug_cond(DEBUG_OUT_EP != 0,
			   "%s: RX DMA done : NULL REQ on OUT EP-%d\n",
			   __func__, ep_num);
		return;

	}

	req = list_entry(ep->queue.next, struct s3c_request, queue);
	ep_tsr = readl(&reg->out_endp[ep_num].doeptsiz);

	if (ep_num == EP0_CON)
		xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP0);
	else
		xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP);

	xfer_size = ep->len - xfer_size;

	invalidate_dcache_range((unsigned long) p,
				(unsigned long) p + DMA_BUFFER_SIZE);

	memcpy(ep->dma_buf, p, ep->len);

	req->req.actual += min(xfer_size, req->req.length - req->req.actual);
	is_short = (xfer_size < ep->ep.maxpacket);

	debug_cond(DEBUG_OUT_EP != 0,
		   "%s: RX DMA done : ep = %d, rx bytes = %d/%d, "
		   "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n",
		   __func__, ep_num, req->req.actual, req->req.length,
		   is_short, ep_tsr, xfer_size);

	if (is_short || req->req.actual == req->req.length) {
		if (ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) {
			debug_cond(DEBUG_OUT_EP != 0, "	=> Send ZLP\n");
			s3c_udc_ep0_zlp(dev);
			/* packet will be completed in complete_tx() */
			dev->ep0state = WAIT_FOR_IN_COMPLETE;
		} else {
			done(ep, req, 0);

			if (!list_empty(&ep->queue)) {
				req = list_entry(ep->queue.next,
					struct s3c_request, queue);
				debug_cond(DEBUG_OUT_EP != 0,
					   "%s: Next Rx request start...\n",
					   __func__);
				setdma_rx(ep, req);
			}
		}
	} else
示例#14
0
static int pic32_eth_send(struct udevice *dev, void *packet, int length)
{
	struct pic32eth_dev *priv = dev_get_priv(dev);
	struct pic32_ectl_regs *ectl_p = priv->ectl_regs;
	struct eth_dma_desc *txd;
	u64 deadline;

	txd = &priv->txd_ring[0];

	/* set proper flags & length in descriptor header */
	txd->hdr = EDH_SOP | EDH_EOP | EDH_EOWN | EDH_BCOUNT(length);

	/* pass buffer address to hardware */
	txd->data_buff = virt_to_phys(packet);

	debug("%s: %d / .hdr %x, .data_buff %x, .stat %x, .nexted %x\n",
	      __func__, __LINE__, txd->hdr, txd->data_buff, txd->stat2,
	      txd->next_ed);

	/* cache flush (packet) */
	flush_dcache_range((ulong)packet, (ulong)packet + length);

	/* cache flush (txd) */
	flush_dcache_range((ulong)txd, (ulong)txd + sizeof(*txd));

	/* pass descriptor table base to h/w */
	writel(virt_to_phys(txd), &ectl_p->txst.raw);

	/* ready to send enabled, hardware can now send the packet(s) */
	writel(ETHCON_TXRTS | ETHCON_ON, &ectl_p->con1.set);

	/* wait until tx has completed and h/w has released ownership
	 * of the tx descriptor or timeout elapsed.
	 */
	deadline = get_ticks() + get_tbclk();
	for (;;) {
		/* check timeout */
		if (get_ticks() > deadline)
			return -ETIMEDOUT;

		if (ctrlc())
			return -EINTR;

		/* tx completed ? */
		if (readl(&ectl_p->con1.raw) & ETHCON_TXRTS) {
			udelay(1);
			continue;
		}

		/* h/w not released ownership yet? */
		invalidate_dcache_range((ulong)txd, (ulong)txd + sizeof(*txd));
		if (!(txd->hdr & EDH_EOWN))
			break;
	}

	return 0;
}
示例#15
0
文件: rtl8169.c 项目: OpenNoah/u-boot
static void rtl_inval_tx_desc(struct TxDesc *desc)
{
#ifndef CONFIG_SYS_NONCACHED_MEMORY
	unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
	unsigned long end = ALIGN(start + sizeof(*desc), ARCH_DMA_MINALIGN);

	invalidate_dcache_range(start, end);
#endif
}
示例#16
0
static int mxc_tqi_is_busy(struct ep_queue_item *tqi)
{
	/* bit 7 is set by software when send, clear by controller
	   when finish */
	/*Invalidate cache to gain dtd content from physical memory*/
	invalidate_dcache_range((unsigned long)tqi,
		CACHE_ALIGNED_END(tqi, sizeof(struct ep_queue_item)));
	return tqi->info & (1 << 7);
}
示例#17
0
static int pic32_eth_recv(struct udevice *dev, int flags, uchar **packetp)
{
	struct pic32eth_dev *priv = dev_get_priv(dev);
	struct eth_dma_desc *rxd;
	u32 idx = priv->rxd_idx;
	u32 rx_count;

	/* find the next ready to receive */
	rxd = &priv->rxd_ring[idx];

	invalidate_dcache_range((ulong)rxd, (ulong)rxd + sizeof(*rxd));
	/* check if owned by MAC */
	if (rxd->hdr & EDH_EOWN)
		return -EAGAIN;

	/* Sanity check on header: SOP and EOP  */
	if ((rxd->hdr & (EDH_SOP | EDH_EOP)) != (EDH_SOP | EDH_EOP)) {
		printf("%s: %s, rx pkt across multiple descr\n",
		       __FILE__, __func__);
		return 0;
	}

	debug("%s: %d /idx %i, hdr=%x, data_buff %x, stat %x, nexted %x\n",
	      __func__, __LINE__, idx, rxd->hdr,
	      rxd->data_buff, rxd->stat2, rxd->next_ed);

	/* Sanity check on rx_stat: OK, CRC */
	if (!RSV_RX_OK(rxd->stat2) || RSV_CRC_ERR(rxd->stat2)) {
		debug("%s: %s: Error, rx problem detected\n",
		      __FILE__, __func__);
		return 0;
	}

	/* invalidate dcache */
	rx_count = RSV_RX_COUNT(rxd->stat2);
	invalidate_dcache_range((ulong)net_rx_packets[idx],
				(ulong)net_rx_packets[idx] + rx_count);

	/* Pass the packet to protocol layer */
	*packetp = net_rx_packets[idx];

	/* increment number of bytes rcvd (ignore CRC) */
	return rx_count - 4;
}
示例#18
0
/*
 * Send a data block via Ethernet
 */
static int ftgmac100_send(struct udevice *dev, void *packet, int length)
{
	struct ftgmac100_data *priv = dev_get_priv(dev);
	struct ftgmac100 *ftgmac100 = priv->iobase;
	struct ftgmac100_txdes *curr_des = &priv->txdes[priv->tx_index];
	ulong des_start = (ulong)curr_des;
	ulong des_end = des_start +
		roundup(sizeof(*curr_des), ARCH_DMA_MINALIGN);
	ulong data_start;
	ulong data_end;
	int rc;

	invalidate_dcache_range(des_start, des_end);

	if (curr_des->txdes0 & FTGMAC100_TXDES0_TXDMA_OWN) {
		dev_err(dev, "no TX descriptor available\n");
		return -EPERM;
	}

	debug("%s(%x, %x)\n", __func__, (int)packet, length);

	length = (length < ETH_ZLEN) ? ETH_ZLEN : length;

	curr_des->txdes3 = (unsigned int)packet;

	/* Flush data to be sent */
	data_start = curr_des->txdes3;
	data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
	flush_dcache_range(data_start, data_end);

	/* Only one segment on TXBUF */
	curr_des->txdes0 &= priv->txdes0_edotr_mask;
	curr_des->txdes0 |= FTGMAC100_TXDES0_FTS |
			    FTGMAC100_TXDES0_LTS |
			    FTGMAC100_TXDES0_TXBUF_SIZE(length) |
			    FTGMAC100_TXDES0_TXDMA_OWN ;

	/* Flush modified buffer descriptor */
	flush_dcache_range(des_start, des_end);

	/* Start transmit */
	writel(1, &ftgmac100->txpd);

	rc = wait_for_bit_ftgmac100_txdone(curr_des,
					   FTGMAC100_TXDES0_TXDMA_OWN, false,
					   FTGMAC100_TX_TIMEOUT_MS, true);
	if (rc)
		return rc;

	debug("%s(): packet sent\n", __func__);

	/* Move to next descriptor */
	priv->tx_index = (priv->tx_index + 1) % PKTBUFSTX;

	return 0;
}
示例#19
0
static void ipi_flush_icache(void *info)
{
    struct blackfin_flush_data *fdata = info;

    /* Invalidate the memory holding the bounds of the flushed region. */
    invalidate_dcache_range((unsigned long)fdata,
                            (unsigned long)fdata + sizeof(*fdata));

    flush_icache_range(fdata->start, fdata->end);
}
示例#20
0
static u32 ftgmac100_read_txdesc(const void *desc)
{
	const struct ftgmac100_txdes *txdes = desc;
	ulong des_start = (ulong)txdes;
	ulong des_end = des_start + roundup(sizeof(*txdes), ARCH_DMA_MINALIGN);

	invalidate_dcache_range(des_start, des_end);

	return txdes->txdes0;
}
示例#21
0
static int altera_tse_free_pkt(struct udevice *dev, uchar *packet,
			       int length)
{
	struct altera_tse_priv *priv = dev_get_priv(dev);
	unsigned long rx_buf = (unsigned long)priv->rx_buf;

	invalidate_dcache_range(rx_buf, rx_buf + PKTSIZE_ALIGN);

	return priv->ops->free_pkt(dev, packet, length);
}
示例#22
0
static int mxc_udc_tqi_empty(struct ep_queue_item *tqi)
{
	int ret;

	invalidate_dcache_range((unsigned long)tqi,
		CACHE_ALIGNED_END(tqi, sizeof(struct ep_queue_item)));

	ret = tqi->info & (1 << 7);
	return ret;
}
示例#23
0
static int dw_eth_recv(struct eth_device *dev)
{
	struct dw_eth_dev *priv = dev->priv;
	u32 status, desc_num = priv->rx_currdescnum;
	struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
	int length = 0;
	uint32_t desc_start = (uint32_t)desc_p;
	uint32_t desc_end = desc_start +
		roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
	uint32_t data_start = (uint32_t)desc_p->dmamac_addr;
	uint32_t data_end;

	/* Invalidate entire buffer descriptor */
	invalidate_dcache_range(desc_start, desc_end);

	status = desc_p->txrx_status;

	/* Check  if the owner is the CPU */
	if (!(status & DESC_RXSTS_OWNBYDMA)) {

		length = (status & DESC_RXSTS_FRMLENMSK) >> \
			 DESC_RXSTS_FRMLENSHFT;

		/* Invalidate received data */
		data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
		invalidate_dcache_range(data_start, data_end);

		NetReceive(desc_p->dmamac_addr, length);

		/*
		 * Make the current descriptor valid again and go to
		 * the next one
		 */
		desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA;

		/* Flush only status field - others weren't changed */
		flush_dcache_range(desc_start, desc_end);

		/* Test the wrap-around condition. */
		if (++desc_num >= CONFIG_RX_DESCR_NUM)
			desc_num = 0;
	}
示例#24
0
/*
 * Send a data block via Ethernet
 */
static int owl_mac_send(struct eth_device *dev, void *pkt, int len)
{
	struct buffer_descriptor *bdp;
	unsigned long status;
	struct owl_mac_info *owl_info = dev->priv;
	int i;
	u32 reg_val = 0;

	if(len <=0 || len > ETH_PKG_MAX){
		printf("owl_mac : bad tx pkt len (%d)\n",len);
	}
	/*  */
	//printf("owl_mac_send: len %d pkt %p\n",len,pkt);
	/*  */

	owl_prepare_tx_bds(dev);

	bdp = &owl_info->tx_bd_base[owl_info->tx_cur_idx];
	status = bdp->status;
	if(status & TXBD_STAT_OWN){
		printf("owl_mac tx error: tx is full\n");
		return 0;
	}
	bdp->buf_addr = dma_map_single(pkt, len, DMA_TO_DEVICE);
	bdp->status = 0;
	bdp->control &= TXBD_CTRL_IC | TXBD_CTRL_TER; /* clear others */
	bdp->control |= TXBD_CTRL_TBS1(len);
	bdp->control |= TXBD_CTRL_FS | TXBD_CTRL_LS;
	bdp->status = TXBD_STAT_OWN;
	flush_dcache_all();
	
	writel(readl(MAC_CSR6)| EC_OPMODE_ST, MAC_CSR6);
	writel(EC_TXPOLL_ST,MAC_CSR1);

	/* wait for finish then return */
	for(i=0; i< OWL_MAC_TX_TIMEOUT; i++){
		reg_val = readl(MAC_CSR5);
		if(reg_val & EC_STATUS_TI ){
			reg_val = reg_val & (~EC_STATUS_TI);
			writel(reg_val,MAC_CSR5);
			break;
		}
		udelay(10);
	}
	dma_unmap_single(pkt, len, bdp->buf_addr);
	if(i >= OWL_MAC_TX_TIMEOUT){
		invalidate_dcache_range((uint32_t)bdp, (uint32_t)bdp + roundup(sizeof(*bdp) * TX_RING_SIZE, ARCH_DMA_MINALIGN));
		printf("owl_mac : Tx timeout 0x%lx \n",bdp->status);
	}

	owl_info->tx_cur_idx = (owl_info->tx_cur_idx + 1) % TX_RING_SIZE;
	//printf("owl_mac_send >> FINISH\n");
	return len;
}
示例#25
0
dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
	       enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

	invalidate_dcache_range((unsigned long)ptr,
			(unsigned long)ptr + size);

	return (dma_addr_t) ptr;
}
示例#26
0
static dma_addr_t __dma_map_single(void *ptr, size_t size,
				   enum dma_data_direction dir)
{
	unsigned long addr = (unsigned long)ptr;

	if (dir == DMA_FROM_DEVICE)
		invalidate_dcache_range(addr, addr + size);
	else
		flush_dcache_range(addr, addr + size);

	return addr;
}
示例#27
0
/*
 * Generic direct DMA implementation
 *
 * This implementation supports a per-device offset that can be applied if
 * the address at which memory is visible to devices is not 0. Platform code
 * can set archdata.dma_data to an unsigned long holding the offset. By
 * default the offset is PCI_DRAM_OFFSET.
 */
static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
                                   size_t size, enum dma_data_direction direction)
{
    switch (direction) {
    case DMA_TO_DEVICE:
        flush_dcache_range(paddr + offset, paddr + offset + size);
        break;
    case DMA_FROM_DEVICE:
        invalidate_dcache_range(paddr + offset, paddr + offset + size);
        break;
    default:
        BUG();
    }
}
示例#28
0
/*
 * Get a data block via Ethernet
 */
static int owl_mac_recv (struct eth_device *dev)
{
	struct owl_mac_info *owl_info = dev->priv;
	u32 status,rx_cur_idx = owl_info->rx_cur_idx;
	struct buffer_descriptor *bdp = &owl_info->rx_bd_base[rx_cur_idx];
	int pkt_len = 0;
	uint32_t desc_start = (uint32_t)bdp;
	uint32_t desc_end = desc_start + roundup(sizeof(*bdp), ARCH_DMA_MINALIGN);
	uint32_t data_start = (uint32_t)bdp->buf_addr;
	uint32_t data_end;

	/* */
	writel(readl(MAC_CSR6)| EC_OPMODE_RA | EC_OPMODE_SR, MAC_CSR6);
	/* */

	invalidate_dcache_range(desc_start, desc_end);

	status = bdp->status;
	if(!(status & RXBD_STAT_OWN)){
		pkt_len = RXBD_STAT_FL(status);

		data_end = data_start + roundup(pkt_len, ARCH_DMA_MINALIGN);
		invalidate_dcache_range(data_start, data_end);

		NetReceive((uchar *)bdp->buf_addr, pkt_len);
		bdp->status = RXBD_STAT_OWN;

		flush_dcache_range(desc_start, desc_end);

		if(++rx_cur_idx >= RX_RING_SIZE)
			rx_cur_idx = 0;
	}

	owl_info->rx_cur_idx  = rx_cur_idx;
	//printf("owl_mac_recv >> FINISH\n");
	return pkt_len;
}
示例#29
0
int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
	   enum dma_data_direction direction)
{
	int i;

	BUG_ON(direction == DMA_NONE);

	for (i = 0; i < nents; i++)
		invalidate_dcache_range(sg_dma_address(&sg[i]),
					sg_dma_address(&sg[i]) +
					sg_dma_len(&sg[i]));

	return nents;
}
示例#30
0
static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
			      enum dma_data_direction direction)
{
	switch (direction) {
	case DMA_BIDIRECTIONAL:
	case DMA_FROM_DEVICE:
		invalidate_dcache_range((unsigned long)vaddr,
			(unsigned long)(vaddr + size));
		break;
	case DMA_TO_DEVICE:
		break;
	default:
		BUG();
	}
}