Example #1
0
/**
 * ipath_map_single - a safety wrapper around pci_map_single()
 *
 * Same idea as ipath_map_page().
 */
dma_addr_t ipath_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
	int direction)
{
	dma_addr_t phys;

	phys = pci_map_single(hwdev, ptr, size, direction);

	if (phys == 0) {
		pci_unmap_single(hwdev, phys, size, direction);
		phys = pci_map_single(hwdev, ptr, size, direction);
		/*
		 * FIXME: If we get 0 again, we should keep this page,
		 * map another, then free the 0 page.
		 */
	}

	return phys;
}
uint BCMFASTPATH
osl_dma_map(osl_t *osh, void *va, uint size, int direction)
{
	int dir;

	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
	return (pci_map_single(osh->pdev, va, size, dir));
}
Example #3
0
/* User DMA Buffers */
void ivtv_udma_alloc(struct ivtv *itv)
{
	if (itv->udma.SG_handle == 0) {
		/* Map DMA Page Array Buffer */
		itv->udma.SG_handle = pci_map_single(itv->pdev, itv->udma.SGarray,
			   sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
		ivtv_udma_sync_for_cpu(itv);
	}
}
Example #4
0
File: trx.c Project: mkrufky/linux
void rtl8822be_tx_fill_special_desc(struct ieee80211_hw *hw, u8 *pdesc,
				    u8 *pbd_desc, struct sk_buff *skb,
				    u8 hw_queue)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	u8 fw_queue;
	u8 txdesc_len = 48;

	dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
					    PCI_DMA_TODEVICE);

	if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
		RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "DMA mapping error");
		return;
	}

	rtl8822be_pre_fill_tx_bd_desc(hw, pbd_desc, pdesc, hw_queue, skb,
				      mapping);

	/* it should be BEACON_QUEUE or H2C_QUEUE,
	 * so skb=NULL is safe to assert
	 */
	fw_queue = _rtl8822be_map_hwqueue_to_fwqueue(NULL, hw_queue);

	CLEAR_PCI_TX_DESC_CONTENT(pdesc, txdesc_len);

	/* common part for BEACON and H2C */
	SET_TX_DESC_TXPKTSIZE((u8 *)pdesc, (u16)(skb->len));

	SET_TX_DESC_QSEL(pdesc, fw_queue);

	if (hw_queue == H2C_QUEUE) {
		/* fill H2C */
		SET_TX_DESC_OFFSET(pdesc, 0);

	} else {
		/* fill beacon */
		SET_TX_DESC_OFFSET(pdesc, txdesc_len);

		SET_TX_DESC_DATARATE(pdesc, DESC_RATE1M);

		SET_TX_DESC_SW_SEQ(pdesc, 0);

		SET_TX_DESC_RATE_ID(pdesc, 7);
		SET_TX_DESC_MACID(pdesc, 0);

		SET_TX_DESC_LS(pdesc, 1);

		SET_TX_DESC_OFFSET(pdesc, 48);

		SET_TX_DESC_USE_RATE(pdesc, 1);
	}

	RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
		      pdesc, txdesc_len);
}
Example #5
0
 /* allocate and initialize Tx and Rx descriptors */
static void
alloc_list (struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	int i;

	np->cur_rx = np->cur_tx = 0;
	np->old_rx = np->old_tx = 0;
	np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);

	/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
	for (i = 0; i < TX_RING_SIZE; i++) {
		np->tx_skbuff[i] = NULL;
		np->tx_ring[i].status = cpu_to_le64 (TFDDone);
		np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
					      ((i+1)%TX_RING_SIZE) *
					      sizeof (struct netdev_desc));
	}

	/* Initialize Rx descriptors */
	for (i = 0; i < RX_RING_SIZE; i++) {
		np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
						((i + 1) % RX_RING_SIZE) *
						sizeof (struct netdev_desc));
		np->rx_ring[i].status = 0;
		np->rx_ring[i].fraginfo = 0;
		np->rx_skbuff[i] = NULL;
	}

	/* Allocate the rx buffers */
	for (i = 0; i < RX_RING_SIZE; i++) {
		/* Allocated fixed size of skbuff */
		struct sk_buff *skb;

		skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
		np->rx_skbuff[i] = skb;
		if (skb == NULL) {
			printk (KERN_ERR
				"%s: alloc_list: allocate Rx buffer error! ",
				dev->name);
			break;
		}
		/* Rubicon now supports 40 bits of addressing space. */
		np->rx_ring[i].fraginfo =
		    cpu_to_le64 ( pci_map_single (
			 	  np->pdev, skb->data, np->rx_buf_sz,
				  PCI_DMA_FROMDEVICE));
		np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
	}

	/* Set RFDListPtr */
	writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
	writel (0, dev->base_addr + RFDListPtr1);

	return;
}
Example #6
0
void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
	bool firstseg, bool lastseg, struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
	struct rtl_tcb_desc *tcb_desc = (struct rtl_tcb_desc *)(skb->cb);

	dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
			PCI_DMA_TODEVICE);

	if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
		RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
			 "DMA mapping error\n");
		return;
	}
	/* Clear all status	*/
	CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_CMDDESC_SIZE_RTL8192S);

	/* This bit indicate this packet is used for FW download. */
	if (tcb_desc->cmd_or_init == DESC_PACKET_TYPE_INIT) {
		/* For firmware downlaod we only need to set LINIP */
		SET_TX_DESC_LINIP(pdesc, tcb_desc->last_inipkt);

		/* 92SE must set as 1 for firmware download HW DMA error */
		SET_TX_DESC_FIRST_SEG(pdesc, 1);
		SET_TX_DESC_LAST_SEG(pdesc, 1);

		/* 92SE need not to set TX packet size when firmware download */
		SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
		SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
		SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);

		wmb();
		SET_TX_DESC_OWN(pdesc, 1);
	} else { /* H2C Command Desc format (Host TXCMD) */
		/* 92SE must set as 1 for firmware download HW DMA error */
		SET_TX_DESC_FIRST_SEG(pdesc, 1);
		SET_TX_DESC_LAST_SEG(pdesc, 1);

		SET_TX_DESC_OFFSET(pdesc, 0x20);

		/* Buffer size + command header */
		SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
		/* Fixed queue of H2C command */
		SET_TX_DESC_QUEUE_SEL(pdesc, 0x13);

		SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);

		SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
		SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);

		wmb();
		SET_TX_DESC_OWN(pdesc, 1);
	}
}
Example #7
0
dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
		enum dma_data_direction direction)
{
	if (dev->bus == &pci_bus_type)
		return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
	if (dev->bus == &vio_bus_type)
		return vio_map_single(to_vio_dev(dev), cpu_addr, size, direction);
	BUG();
	return (dma_addr_t)0;
}
Example #8
0
uint BCMFASTPATH
osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
{
	int dir;

	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;

#if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL)
	if (dmah != NULL) {
		int32 nsegs, i, totsegs = 0, totlen = 0;
		struct scatterlist *sg, _sg[16];
		struct sk_buff *skb;
		for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
			sg = &_sg[totsegs];
			if (skb_is_nonlinear(skb)) {
				nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb));
				ASSERT((nsegs > 0) && (nsegs <= 16));
				pci_map_sg(osh->pdev, sg, nsegs, dir);
			} else {
				nsegs = 1;
				sg->page_link = 0;
				sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb));

				pci_map_single(osh->pdev, PKTDATA(osh, skb),
				    PKTISCTF(osh, skb) ? CTFMAPSZ : PKTLEN(osh, skb), dir);
			}
			totsegs += nsegs;
			totlen += PKTLEN(osh, skb);
		}
		dmah->nsegs = totsegs;
		dmah->origsize = totlen;
		for (i = 0, sg = _sg; i < totsegs; i++, sg++) {
			dmah->segs[i].addr = sg_phys(sg);
			dmah->segs[i].length = sg->length;
		}
		return dmah->segs[0].addr;
	}
#endif

	return (pci_map_single(osh->pdev, va, size, dir));
}
Example #9
0
static void
alloc_list (struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	int i;

	np->cur_rx = np->cur_tx = 0;
	np->old_rx = np->old_tx = 0;
	np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);

	/*                                                               */
	for (i = 0; i < TX_RING_SIZE; i++) {
		np->tx_skbuff[i] = NULL;
		np->tx_ring[i].status = cpu_to_le64 (TFDDone);
		np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
					      ((i+1)%TX_RING_SIZE) *
					      sizeof (struct netdev_desc));
	}

	/*                           */
	for (i = 0; i < RX_RING_SIZE; i++) {
		np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
						((i + 1) % RX_RING_SIZE) *
						sizeof (struct netdev_desc));
		np->rx_ring[i].status = 0;
		np->rx_ring[i].fraginfo = 0;
		np->rx_skbuff[i] = NULL;
	}

	/*                         */
	for (i = 0; i < RX_RING_SIZE; i++) {
		/*                                */
		struct sk_buff *skb;

		skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
		np->rx_skbuff[i] = skb;
		if (skb == NULL) {
			printk (KERN_ERR
				"%s: alloc_list: allocate Rx buffer error! ",
				dev->name);
			break;
		}
		/*                                                   */
		np->rx_ring[i].fraginfo =
		    cpu_to_le64 ( pci_map_single (
			 	  np->pdev, skb->data, np->rx_buf_sz,
				  PCI_DMA_FROMDEVICE));
		np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
	}

	/*                */
	writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
	writel (0, dev->base_addr + RFDListPtr1);
}
static int alloc_buffer(struct vino_device *v, int size)
{
	int count, i, j, err;

	err = i = 0;
	count = (size / PAGE_SIZE + 4) & ~3;
	v->desc = (unsigned long *) kmalloc(count * sizeof(unsigned long),
					    GFP_KERNEL);
	if (!v->desc)
		return -ENOMEM;

	v->dma_desc.cpu = pci_alloc_consistent(NULL, PAGE_RATIO * (count+4) *
					       sizeof(dma_addr_t),
					       &v->dma_desc.dma);
	if (!v->dma_desc.cpu) {
		err = -ENOMEM;
		goto out_free_desc;
	}
	while (i < count) {
		dma_addr_t dma;

		v->desc[i] = get_zeroed_page(GFP_KERNEL | GFP_DMA);
		if (!v->desc[i])
			break;
		dma = pci_map_single(NULL, (void *)v->desc[i], PAGE_SIZE,
				     PCI_DMA_FROMDEVICE);
		for (j = 0; j < PAGE_RATIO; j++)
			v->dma_desc.cpu[PAGE_RATIO * i + j ] = 
				dma + VINO_PAGE_SIZE * j;
		mem_map_reserve(virt_to_page(v->desc[i]));
		i++;
	}
	v->dma_desc.cpu[PAGE_RATIO * count] = VINO_DESC_STOP;
	if (i-- < count) {
		while (i >= 0) {
			mem_map_unreserve(virt_to_page(v->desc[i]));
			pci_unmap_single(NULL, v->dma_desc.cpu[PAGE_RATIO * i],
					 PAGE_SIZE, PCI_DMA_FROMDEVICE);
			free_page(v->desc[i]);
			i--;
		}
		pci_free_consistent(NULL,
				    PAGE_RATIO * (count+4) * sizeof(dma_addr_t),
				    (void *)v->dma_desc.cpu, v->dma_desc.dma);
		err = -ENOBUFS;
		goto out_free_desc;
	}
	v->page_count = count;
	return 0;

out_free_desc:
	kfree(v->desc);
	return err;
}
Example #11
0
static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct epic_private *ep = dev->priv;
	int entry, free_count;
	u32 ctrl_word;
	unsigned long flags;

	if (skb_padto(skb, ETH_ZLEN))
		return 0;

	/* Caution: the write order is important here, set the field with the
	   "ownership" bit last. */

	/* Calculate the next Tx descriptor entry. */
	spin_lock_irqsave(&ep->lock, flags);
	free_count = ep->cur_tx - ep->dirty_tx;
	entry = ep->cur_tx % TX_RING_SIZE;

	ep->tx_skbuff[entry] = skb;
	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
		 			            skb->len, PCI_DMA_TODEVICE);
	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
		ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
	} else if (free_count == TX_QUEUE_LEN/2) {
		ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
	} else if (free_count < TX_QUEUE_LEN - 1) {
		ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
	} else {
		/* Leave room for an additional entry. */
		ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
		ep->tx_full = 1;
	}
	ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
	ep->tx_ring[entry].txstatus =
		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
		| cpu_to_le32(DescOwn);

	ep->cur_tx++;
	if (ep->tx_full)
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&ep->lock, flags);
	/* Trigger an immediate transmit demand. */
	outl(TxQueued, dev->base_addr + COMMAND);

	dev->trans_start = jiffies;
	if (debug > 4)
		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
			   "flag %2.2x Tx status %8.8x.\n",
			   dev->name, (int)skb->len, entry, ctrl_word,
			   (int)inl(dev->base_addr + TxSTAT));

	return 0;
}
Example #12
0
static netdev_tx_t pci_eth_start_xmit(struct sk_buff *skb,
				    struct net_device *dev)
{
	struct pci_eth_private *priv = netdev_priv(dev);
	struct pci_eth_descriptor *descptr;
	void __iomem *ioaddr = dev->base_addr;
	unsigned long flags;

	/* Critical Section */
	spin_lock_irqsave(&priv->lock, flags);

	/* TX resource check */
	if (!priv->tx_free_desc) {
		spin_unlock_irqrestore(&priv->lock, flags);
		netif_stop_queue(dev);
		netdev_err(dev, ": no tx descriptor\n");
		return NETDEV_TX_BUSY;
	}

	/* Statistic Counter */
	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;

	/* Decrement free descriptors counter */
	priv->tx_free_desc--;

	/* Set TX descriptor & Transmit it */
	descptr = priv->tx_insert_ptr;
	if (skb->len < ETH_ZLEN)
		descptr->len = ETH_ZLEN;
	else
		descptr->len = skb->len;

	descptr->skb_ptr = skb;
	descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
		skb->data, skb->len, PCI_DMA_TODEVICE));
	descptr->status = DSC_OWNER_MAC;

	skb_tx_timestamp(skb);

	/* TODO: Trigger the MAC to check the TX descriptor - start DMA
	 * transaction.
	 */

	/* After DMA transaction perform the following check */
	/* If no tx resource, stop */
	if (!priv->tx_free_desc)
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&priv->lock, flags);

	return NETDEV_TX_OK;
}
Example #13
0
static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct b44 *bp = netdev_priv(dev);
	dma_addr_t mapping;
	u32 len, entry, ctrl;

	len = skb->len;
	spin_lock_irq(&bp->lock);

	/* This is a hard error, log it. */
	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
		netif_stop_queue(dev);
		spin_unlock_irq(&bp->lock);
		printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
		       dev->name);
		return 1;
	}

	entry = bp->tx_prod;
	mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);

	bp->tx_buffers[entry].skb = skb;
	pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);

	ctrl  = (len & DESC_CTRL_LEN);
	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
	if (entry == (B44_TX_RING_SIZE - 1))
		ctrl |= DESC_CTRL_EOT;

	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);

	entry = NEXT_TX(entry);

	bp->tx_prod = entry;

	wmb();

	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
	if (bp->flags & B44_FLAG_REORDER_BUG)
		br32(bp, B44_DMATX_PTR);

	if (TX_BUFFS_AVAIL(bp) < 1)
		netif_stop_queue(dev);

	spin_unlock_irq(&bp->lock);

	dev->trans_start = jiffies;

	return 0;
}
static void ad1889_start_wav(ad1889_state_t *state)
{
	unsigned long flags;
	struct dmabuf *dmabuf = &state->dmabuf;
	int cnt;
	u16 tmp;

	spin_lock_irqsave(&state->card->lock, flags);

	if (dmabuf->dma_len)	/* DMA already in flight */
		goto skip_dma;

	/* setup dma */
	cnt = dmabuf->wr_ptr - dmabuf->rd_ptr;
	if (cnt == 0)		/* done - don't need to do anything */
		goto skip_dma;

	/* If the wr_ptr has wrapped, only map to the end */
	if (cnt < 0)
		cnt = DMA_SIZE - dmabuf->rd_ptr;

	dmabuf->dma_handle = pci_map_single(ad1889_dev->pci,
					dmabuf->rawbuf + dmabuf->rd_ptr,
					cnt, PCI_DMA_TODEVICE);
	dmabuf->dma_len = cnt;
	dmabuf->ready = 1;

	DBG("Starting playback at 0x%p for %ld bytes\n", dmabuf->rawbuf +
	    dmabuf->rd_ptr, dmabuf->dma_len);

        /* load up the current register set */
	AD1889_WRITEL(ad1889_dev, AD_DMAWAVCC, cnt);
	AD1889_WRITEL(ad1889_dev, AD_DMAWAVICC, cnt);
	AD1889_WRITEL(ad1889_dev, AD_DMAWAVCA, dmabuf->dma_handle);

	/* TODO: for now we load the base registers with the same thing */
	AD1889_WRITEL(ad1889_dev, AD_DMAWAVBC, cnt);
	AD1889_WRITEL(ad1889_dev, AD_DMAWAVIBC, cnt);
	AD1889_WRITEL(ad1889_dev, AD_DMAWAVBA, dmabuf->dma_handle);

	/* and we're off to the races... */
	AD1889_WRITEL(ad1889_dev, AD_DMACHSS, 0x8);
	tmp = AD1889_READW(ad1889_dev, AD_DSWSMC);
	tmp |= 0x0400; /* set WAEN */
	AD1889_WRITEW(ad1889_dev, AD_DSWSMC, tmp);
	(void) AD1889_READW(ad1889_dev, AD_DSWSMC); /* flush posted PCI write */

	dmabuf->enable |= DAC_RUNNING;

skip_dma:
	spin_unlock_irqrestore(&state->card->lock, flags);
}
Example #15
0
File: r8169.c Project: wxlong/Test
static int
rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct rtl8169_private *tp = dev->priv;
    void *ioaddr = tp->mmio_addr;
    int entry = tp->cur_tx % NUM_TX_DESC;
    u32 len = skb->len;

    if (unlikely(skb->len < ETH_ZLEN)) {
        skb = skb_padto(skb, ETH_ZLEN);
        if (!skb)
            goto err_update_stats;
        len = ETH_ZLEN;
    }

    spin_lock_irq(&tp->lock);

    if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) {
        dma_addr_t mapping;

        mapping = pci_map_single(tp->pci_dev, skb->data, len,
                                 PCI_DMA_TODEVICE);

        tp->Tx_skbuff[entry] = skb;
        tp->TxDescArray[entry].addr = cpu_to_le64(mapping);

        tp->TxDescArray[entry].status = cpu_to_le32(OWNbit | FSbit |
                                        LSbit | len | (EORbit * !((entry + 1) % NUM_TX_DESC)));

        RTL_W8(TxPoll, 0x40);	//set polling bit

        dev->trans_start = jiffies;

        tp->cur_tx++;
    } else
        goto err_drop;


    if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) {
        netif_stop_queue(dev);
    }
out:
    spin_unlock_irq(&tp->lock);

    return 0;

err_drop:
    dev_kfree_skb(skb);
err_update_stats:
    tp->stats.tx_dropped++;
    goto out;
}
Example #16
0
/*
 * Fill the receive queue for management frames with fresh buffers.
 */
int
islpci_mgmt_rx_fill(struct net_device *ndev)
{
	islpci_private *priv = netdev_priv(ndev);
	isl38xx_control_block *cb =	/* volatile not needed */
	    (isl38xx_control_block *) priv->control_block;
	u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);

#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n");
#endif

	while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
		u32 index = curr % ISL38XX_CB_MGMT_QSIZE;
		struct islpci_membuf *buf = &priv->mgmt_rx[index];
		isl38xx_fragment *frag = &cb->rx_data_mgmt[index];

		if (buf->mem == NULL) {
			buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC);
			if (!buf->mem) {
//				printk(KERN_WARNING
;
				return -ENOMEM;
			}
			buf->size = MGMT_FRAME_SIZE;
		}
		if (buf->pci_addr == 0) {
			buf->pci_addr = pci_map_single(priv->pdev, buf->mem,
						       MGMT_FRAME_SIZE,
						       PCI_DMA_FROMDEVICE);
			if (!buf->pci_addr) {
//				printk(KERN_WARNING
;
				return -ENOMEM;
			}
		}

		/* be safe: always reset control block information */
		frag->size = cpu_to_le16(MGMT_FRAME_SIZE);
		frag->flags = 0;
		frag->address = cpu_to_le32(buf->pci_addr);
		curr++;

		/* The fragment address in the control block must have
		 * been written before announcing the frame buffer to
		 * device */
		wmb();
		cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr);
	}
	return 0;
}
Example #17
0
int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
{
	int rc;
	void *base_struct_unaligned;
	struct sis_base_struct *base_struct;
	struct sis_sync_cmd_params params;
	unsigned long error_buffer_paddr;
	dma_addr_t bus_address;

	base_struct_unaligned = kzalloc(sizeof(*base_struct)
		+ SIS_BASE_STRUCT_ALIGNMENT - 1, GFP_KERNEL);
	if (!base_struct_unaligned)
		return -ENOMEM;

	base_struct = PTR_ALIGN(base_struct_unaligned,
		SIS_BASE_STRUCT_ALIGNMENT);
	error_buffer_paddr = (unsigned long)ctrl_info->error_buffer_dma_handle;

	put_unaligned_le32(SIS_BASE_STRUCT_REVISION, &base_struct->revision);
	put_unaligned_le32(lower_32_bits(error_buffer_paddr),
		&base_struct->error_buffer_paddr_low);
	put_unaligned_le32(upper_32_bits(error_buffer_paddr),
		&base_struct->error_buffer_paddr_high);
	put_unaligned_le32(PQI_ERROR_BUFFER_ELEMENT_LENGTH,
		&base_struct->error_buffer_element_length);
	put_unaligned_le32(ctrl_info->max_io_slots,
		&base_struct->error_buffer_num_elements);

	bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
		sizeof(*base_struct), PCI_DMA_TODEVICE);
	if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
		rc = -ENOMEM;
		goto out;
	}

	memset(&params, 0, sizeof(params));
	params.mailbox[1] = lower_32_bits((u64)bus_address);
	params.mailbox[2] = upper_32_bits((u64)bus_address);
	params.mailbox[3] = sizeof(*base_struct);

	rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
		&params);

	pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
		PCI_DMA_TODEVICE);

out:
	kfree(base_struct_unaligned);

	return rc;
}
Example #18
0
static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
	int ring_index, struct p54p_desc *ring, u32 ring_limit,
	struct sk_buff **rx_buf, u32 index)
{
	struct p54p_priv *priv = dev->priv;
	struct p54p_ring_control *ring_control = priv->ring_control;
	u32 limit, idx, i;

	idx = le32_to_cpu(ring_control->host_idx[ring_index]);
	limit = idx;
	limit -= index;
	limit = ring_limit - limit;

	i = idx % ring_limit;
	while (limit-- > 1) {
		struct p54p_desc *desc = &ring[i];

		if (!desc->host_addr) {
			struct sk_buff *skb;
			dma_addr_t mapping;
			skb = dev_alloc_skb(priv->common.rx_mtu + 32);
			if (!skb)
				break;

			mapping = pci_map_single(priv->pdev,
						 skb_tail_pointer(skb),
						 priv->common.rx_mtu + 32,
						 PCI_DMA_FROMDEVICE);

			if (pci_dma_mapping_error(priv->pdev, mapping)) {
				dev_kfree_skb_any(skb);
				dev_err(&priv->pdev->dev,
					"RX DMA Mapping error\n");
				break;
			}

			desc->host_addr = cpu_to_le32(mapping);
			desc->device_addr = 0;	// FIXME: necessary?
			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
			desc->flags = 0;
			rx_buf[i] = skb;
		}

		i++;
		idx++;
		i %= ring_limit;
	}

	wmb();
	ring_control->host_idx[ring_index] = cpu_to_le32(idx);
}
Example #19
0
static void map_dma(unsigned int i, unsigned int j) {
    unsigned int data_len = 0;
    unsigned int k, pci_dir;
    int count;
    struct scatterlist *sg;
    struct mscp *cpp;
    struct scsi_cmnd *SCpnt;

    cpp = &HD(j)->cp[i];
    SCpnt = cpp->SCpnt;
    pci_dir = SCpnt->sc_data_direction;

    if (SCpnt->sense_buffer)
        cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer,
                                               SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));

    cpp->sense_len = SCSI_SENSE_BUFFERSIZE;

    if (scsi_bufflen(SCpnt)) {
        count = scsi_dma_map(SCpnt);
        BUG_ON(count < 0);

        scsi_for_each_sg(SCpnt, sg, count, k) {
            cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
            cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
            data_len += sg->length;
        }

        cpp->sg = TRUE;
        cpp->use_sg = scsi_sg_count(SCpnt);
        cpp->data_address =
            H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
                                 cpp->use_sg * sizeof(struct sg_list),
                                 pci_dir));
        cpp->data_len = H2DEV(data_len);

    } else {
Example #20
0
static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
				    struct net_device *dev)
{
	struct r6040_private *lp = netdev_priv(dev);
	struct r6040_descriptor *descptr;
	void __iomem *ioaddr = lp->base;
	unsigned long flags;

	/* Critical Section */
	spin_lock_irqsave(&lp->lock, flags);

	/* TX resource check */
	if (!lp->tx_free_desc) {
		spin_unlock_irqrestore(&lp->lock, flags);
		netif_stop_queue(dev);
		netdev_err(dev, ": no tx descriptor\n");
		return NETDEV_TX_BUSY;
	}

	/* Statistic Counter */
	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;
	/* Set TX descriptor & Transmit it */
	lp->tx_free_desc--;
	descptr = lp->tx_insert_ptr;
	if (skb->len < MISR)
		descptr->len = MISR;
	else
		descptr->len = skb->len;

	descptr->skb_ptr = skb;
	descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
		skb->data, skb->len, PCI_DMA_TODEVICE));
	descptr->status = DSC_OWNER_MAC;

	skb_tx_timestamp(skb);

	/* Trigger the MAC to check the TX descriptor */
	iowrite16(TM2TX, ioaddr + MTPR);
	lp->tx_insert_ptr = descptr->vndescp;

	/* If no tx resource, stop */
	if (!lp->tx_free_desc)
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&lp->lock, flags);

	return NETDEV_TX_OK;
}
Example #21
0
void mac_drv_fill_rxd(struct s_smc *smc)
{
	int MaxFrameSize;
	unsigned char *v_addr;
	unsigned long b_addr;
	struct sk_buff *skb;
	volatile struct s_smt_fp_rxd *rxd;

	pr_debug(KERN_INFO "entering mac_drv_fill_rxd\n");

	
	

	MaxFrameSize = smc->os.MaxFrameSize;
	
	while (HWM_GET_RX_FREE(smc) > 0) {
		pr_debug(KERN_INFO ".\n");

		rxd = HWM_GET_CURR_RXD(smc);
		skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
		if (skb) {
			
			skb_reserve(skb, 3);
			skb_put(skb, MaxFrameSize);
			v_addr = skb->data;
			b_addr = pci_map_single(&smc->os.pdev,
						v_addr,
						MaxFrameSize,
						PCI_DMA_FROMDEVICE);
			rxd->rxd_os.dma_addr = b_addr;
		} else {
			
			
			
			
			
			pr_debug("Queueing invalid buffer!\n");
			v_addr = smc->os.LocalRxBuffer;
			b_addr = smc->os.LocalRxBufferDMA;
		}

		rxd->rxd_os.skb = skb;

		
		hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
			    FIRST_FRAG | LAST_FRAG);
	}
	pr_debug(KERN_INFO "leaving mac_drv_fill_rxd\n");
}				
Example #22
0
int
islpci_mgmt_rx_fill(struct net_device *ndev)
{
	islpci_private *priv = netdev_priv(ndev);
	isl38xx_control_block *cb =	
	    (isl38xx_control_block *) priv->control_block;
	u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);

#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill \n");
#endif

	while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
		u32 index = curr % ISL38XX_CB_MGMT_QSIZE;
		struct islpci_membuf *buf = &priv->mgmt_rx[index];
		isl38xx_fragment *frag = &cb->rx_data_mgmt[index];

		if (buf->mem == NULL) {
			buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC);
			if (!buf->mem) {
				printk(KERN_WARNING
				       "Error allocating management frame.\n");
				return -ENOMEM;
			}
			buf->size = MGMT_FRAME_SIZE;
		}
		if (buf->pci_addr == 0) {
			buf->pci_addr = pci_map_single(priv->pdev, buf->mem,
						       MGMT_FRAME_SIZE,
						       PCI_DMA_FROMDEVICE);
			if (!buf->pci_addr) {
				printk(KERN_WARNING
				       "Failed to make memory DMA'able.\n");
				return -ENOMEM;
			}
		}

		
		frag->size = cpu_to_le16(MGMT_FRAME_SIZE);
		frag->flags = 0;
		frag->address = cpu_to_le32(buf->pci_addr);
		curr++;

		
		wmb();
		cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr);
	}
	return 0;
}
Example #23
0
/*
 * invaild or writeback cache 
 * and convert virtual address to physical address 
 */
dma_addr_t linux_pci_map_single(void *handle, void *ptr, size_t size, int sd_idx, int direction)
{
	PRTMP_ADAPTER pAd;
	POS_COOKIE pObj;
	
	/* 
		------ Porting Information ------
		> For Tx Alloc:
			mgmt packets => sd_idx = 0
			SwIdx: pAd->MgmtRing.TxCpuIdx
			pTxD : pAd->MgmtRing.Cell[SwIdx].AllocVa;
	 
			data packets => sd_idx = 1
	 		TxIdx : pAd->TxRing[pTxBlk->QueIdx].TxCpuIdx 
	 		QueIdx: pTxBlk->QueIdx 
	 		pTxD  : pAd->TxRing[pTxBlk->QueIdx].Cell[TxIdx].AllocVa;

	 	> For Rx Alloc:
	 		sd_idx = -1
	*/

	pAd = (PRTMP_ADAPTER)handle;
	pObj = (POS_COOKIE)pAd->OS_Cookie;
	
	if (sd_idx == 1)
	{
		PTX_BLK		pTxBlk;
		pTxBlk = (PTX_BLK)ptr;
		return pci_map_single(pObj->pci_dev, pTxBlk->pSrcBufData, pTxBlk->SrcBufLen, direction);
	}
	else
	{
		return pci_map_single(pObj->pci_dev, ptr, size, direction);
	}

}
static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
			       struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
			       struct PVSCSIRingReqDesc *e)
{
	unsigned count;
	unsigned bufflen = scsi_bufflen(cmd);
	struct scatterlist *sg;

	e->dataLen = bufflen;
	e->dataAddr = 0;
	if (bufflen == 0)
		return;

	sg = scsi_sglist(cmd);
	count = scsi_sg_count(cmd);
	if (count != 0) {
		int segs = scsi_dma_map(cmd);
		if (segs > 1) {
			pvscsi_create_sg(ctx, sg, segs);

			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
			ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
						    SGL_SIZE, PCI_DMA_TODEVICE);
			e->dataAddr = ctx->sglPA;
		} else
			e->dataAddr = sg_dma_address(sg);
	} else {
		/*
		 * In case there is no S/G list, scsi_sglist points
		 * directly to the buffer.
		 */
		ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
					     cmd->sc_data_direction);
		e->dataAddr = ctx->dataPA;
	}
}
Example #25
0
static int start_tx(struct sk_buff *skb, struct net_device *dev)
{
	struct netdev_private *np = dev->priv;
	struct netdev_desc *txdesc;
	unsigned entry;

	/* Note: Ordering is important here, set the field with the
	   "ownership" bit last, and only then increment cur_tx. */

	/* Calculate the next Tx descriptor entry. */
	entry = np->cur_tx % TX_RING_SIZE;
	np->tx_skbuff[entry] = skb;
	txdesc = &np->tx_ring[entry];

	txdesc->next_desc = 0;
	/* Note: disable the interrupt generation here before releasing. */
	txdesc->status =
		cpu_to_le32((entry<<2) | DescIntrOnDMADone | DescIntrOnTx | DisableAlign);
	txdesc->frag[0].addr = cpu_to_le32(pci_map_single(np->pci_dev, 
		skb->data, skb->len, PCI_DMA_TODEVICE));
	txdesc->frag[0].length = cpu_to_le32(skb->len | LastFrag);
	if (np->last_tx)
		np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
			entry*sizeof(struct netdev_desc));
	np->last_tx = txdesc;
	np->cur_tx++;

	/* On some architectures: explicitly flush cache lines here. */

	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1) {
		/* do nothing */
	} else {
		np->tx_full = 1;
		netif_stop_queue(dev);
	}
	/* Side effect: The read wakes the potentially-idle transmit channel. */
	if (readl(dev->base_addr + TxListPtr) == 0)
		writel(np->tx_ring_dma + entry*sizeof(*np->tx_ring),
			dev->base_addr + TxListPtr);

	dev->trans_start = jiffies;

	if (debug > 4) {
		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
			   dev->name, np->cur_tx, entry);
	}
	return 0;
}
Example #26
0
File: pci.c Project: Lyude/linux
static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
				  int index, char *frag_data, size_t frag_len,
				  int direction)
{
	struct pci_dev *pdev = mlxsw_pci->pdev;
	dma_addr_t mapaddr;

	mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
	if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
		dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
		return -EIO;
	}
	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
	mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
	return 0;
}
Example #27
0
static void init_rxtx_rings(struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	int i;

	np->rx_head_desc = &np->rx_ring[0];
	np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];

	/* Initial all Rx descriptors. */
	for (i = 0; i < RX_RING_SIZE; i++) {
		np->rx_ring[i].length = cpu_to_le32(np->rx_buf_sz);
		np->rx_ring[i].status = 0;
		np->rx_skbuff[i] = 0;
	}
	/* Mark the last entry as wrapping the ring. */
	np->rx_ring[i-1].length |= cpu_to_le32(DescEndRing);

	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
	for (i = 0; i < RX_RING_SIZE; i++) {
		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
		np->rx_skbuff[i] = skb;
		if (skb == NULL)
			break;
		skb->dev = dev;			/* Mark as being used by this device. */
		np->rx_addr[i] = pci_map_single(np->pdev,skb->tail,
					skb->len,PCI_DMA_FROMDEVICE);

		np->rx_ring[i].buffer1 = cpu_to_le32(np->rx_addr[i]);
		np->rx_ring[i].status = cpu_to_le32(DescOwn);
	}

	np->cur_rx = 0;
	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);

	/* Initialize the Tx descriptors */
	for (i = 0; i < TX_RING_SIZE; i++) {
		np->tx_skbuff[i] = 0;
		np->tx_ring[i].status = 0;
	}
	np->tx_full = 0;
	np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;

	writel(np->ring_dma_addr, dev->base_addr + RxRingPtr);
	writel(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
		dev->base_addr + TxRingPtr);

}
Example #28
0
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void tulip_init_ring(/*RTnet*/struct rtnet_device *rtdev)
{
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	int i;

	tp->susp_rx = 0;
	tp->ttimer = 0;
	tp->nir = 0;

	for (i = 0; i < RX_RING_SIZE; i++) {
		tp->rx_ring[i].status = 0x00000000;
		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
		tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
		tp->rx_buffers[i].skb = NULL;
		tp->rx_buffers[i].mapping = 0;
	}
	/* Mark the last entry as wrapping the ring. */
	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
	tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);

	for (i = 0; i < RX_RING_SIZE; i++) {
		dma_addr_t mapping;

		/* Note the receive buffer must be longword aligned.
		   dev_alloc_skb() provides 16 byte alignment.  But do *not*
		   use skb_reserve() to align the IP header! */
		struct /*RTnet*/rtskb *skb = /*RTnet*/rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ);
		tp->rx_buffers[i].skb = skb;
		if (skb == NULL)
			break;
		mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
		tp->rx_buffers[i].mapping = mapping;
		tp->rx_ring[i].status = cpu_to_le32(DescOwned);	/* Owned by Tulip chip */
		tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
	}
	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);

	/* The Tx buffer descriptor is filled in as needed, but we
	   do need to clear the ownership bit. */
	for (i = 0; i < TX_RING_SIZE; i++) {
		tp->tx_buffers[i].skb = NULL;
		tp->tx_buffers[i].mapping = 0;
		tp->tx_ring[i].status = 0x00000000;
		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
	}
	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
}
Example #29
0
ssize_t* XPCIe_ReadMem(char *buf, size_t count)
{

    int ret = 0;
    dma_addr_t dma_addr;
                                                                                
    //make sure passed in buffer is large enough
    if ( count < BUF_SIZE )  {
      printk("%s: XPCIe_Read: passed in buffer too small.\n", gDrvrName);
      ret = -1;
      goto exit;
    }

    down(&gSem[SEM_DMA]);

    // pci_map_single return the physical address corresponding to 
    // the virtual address passed to it as the 2nd parameter

    dma_addr = pci_map_single(gDev, gReadBuffer, BUF_SIZE, PCI_DMA_FROMDEVICE);
    if ( 0 == dma_addr )  {
        printk("%s: XPCIe_Read: Map error.\n",gDrvrName);
        ret = -1;
        goto exit;
    }

    // Now pass the physical address to the device hardware. This is now
    // the destination physical address for the DMA and hence the to be
    // put on Memory Transactions

    // Do DMA transfer here....

    printk("%s: XPCIe_Read: ReadBuf Virt Addr = %x Phy Addr = %x.\n", gDrvrName, (unsigned int)gReadBuffer, (unsigned int)dma_addr);
                                                                   
    // Unmap the DMA buffer so it is safe for normal access again.
    pci_unmap_single(gDev, dma_addr, BUF_SIZE, PCI_DMA_FROMDEVICE);
                                                                                
    up(&gSem[SEM_DMA]);

    // Now it is safe to copy the data to user space.
    if ( copy_to_user(buf, gReadBuffer, BUF_SIZE) )  {
        ret = -1;
        printk("%s: XPCIe_Read: Failed copy to user.\n",gDrvrName);
        goto exit;
    }
    exit:
      return ((ssize_t*)ret);
}
Example #30
0
ssize_t XPCIe_WriteMem(const char *buf, size_t count)
{
    int ret = 0;
    dma_addr_t dma_addr;

    if ( (count % 4) != 0 )  {
       printk("%s: XPCIe_Write: Buffer length not dword aligned.\n",gDrvrName);
       ret = -1;
       goto exit;
    }

    // Now it is safe to copy the data from user space.
    if ( copy_from_user(gWriteBuffer, buf, count) )  {
        ret = -1;
        printk("%s: XPCIe_Write: Failed copy to user.\n",gDrvrName);
        goto exit;
    }

    //set DMA semaphore if in loopback
    down(&gSem[SEM_DMA]);

    // pci_map_single return the physical address corresponding to 
    // the virtual address passed to it as the 2nd parameter

    dma_addr = pci_map_single(gDev, gWriteBuffer, BUF_SIZE, PCI_DMA_FROMDEVICE);
    if ( 0 == dma_addr )  {
        printk("%s: XPCIe_Write: Map error.\n",gDrvrName);
        ret = -1;
        goto exit;
    }

    // Now pass the physical address to the device hardware. This is now
    // the source physical address for the DMA and hence the to be
    // put on Memory Transactions

    // Do DMA transfer here....

    printk("%s: XPCIe_Write: WriteBuf Virt Addr = %x Phy Addr = %x.\n", gDrvrName, (unsigned int)gReadBuffer, (unsigned int)dma_addr);
                                                                   
    // Unmap the DMA buffer so it is safe for normal access again.
    pci_unmap_single(gDev, dma_addr, BUF_SIZE, PCI_DMA_FROMDEVICE);
                                                                                
    up(&gSem[SEM_DMA]);

    exit:
      return (ret);
}