Example #1
0
/**
 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
 *
 * @rx_queue:		Efx RX queue
 * @rx_buf:		RX buffer structure to populate
 *
 * This allocates memory for a new receive buffer, maps it for DMA,
 * and populates a struct efx_rx_buffer with the relevant
 * information.  Return a negative error code or 0 on success.
 */
static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
				  struct efx_rx_buffer *rx_buf)
{
	struct efx_nic *efx = rx_queue->efx;
	struct net_device *net_dev = efx->net_dev;
	int skb_len = efx->rx_buffer_len;

	rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
	if (unlikely(!rx_buf->skb))
		return -ENOMEM;

	/* Adjust the SKB for padding and checksum */
	skb_reserve(rx_buf->skb, NET_IP_ALIGN);
	rx_buf->len = skb_len - NET_IP_ALIGN;
	rx_buf->data = (char *)rx_buf->skb->data;
	rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;

	rx_buf->dma_addr = pci_map_single(efx->pci_dev,
					  rx_buf->data, rx_buf->len,
					  PCI_DMA_FROMDEVICE);

	if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
		dev_kfree_skb_any(rx_buf->skb);
		rx_buf->skb = NULL;
		return -EIO;
	}

	return 0;
}
static int
nvc0_fb_create(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
	struct nvc0_fb_priv *priv;

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;
	pfb->priv = priv;

	priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!priv->r100c10_page) {
		nvc0_fb_destroy(dev);
		return -ENOMEM;
	}

	priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
	if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
		nvc0_fb_destroy(dev);
		return -EFAULT;
	}

	nouveau_irq_register(dev, 25, nvc0_mfb_isr);
	return 0;
}
Example #3
0
static dma_addr_t xilly_map_single_pci(struct xilly_cleanup *mem,
				       struct xilly_endpoint *ep,
				       void *ptr,
				       size_t size,
				       int direction
	)
{

	dma_addr_t addr = 0;
	struct xilly_dma *this;
	int pci_direction;

	this = kmalloc(sizeof(struct xilly_dma), GFP_KERNEL);
	if (!this)
		return 0;

	pci_direction = xilly_pci_direction(direction);
	addr = pci_map_single(ep->pdev, ptr, size, pci_direction);
	this->direction = pci_direction;

	if (pci_dma_mapping_error(ep->pdev, addr)) {
		kfree(this);
		return 0;
	}

	this->dma_addr = addr;
	this->pdev = ep->pdev;
	this->size = size;

	list_add_tail(&this->node, &mem->to_unmap);

	return addr;
}
Example #4
0
File: trx.c Project: mkrufky/linux
void rtl8822be_tx_fill_special_desc(struct ieee80211_hw *hw, u8 *pdesc,
				    u8 *pbd_desc, struct sk_buff *skb,
				    u8 hw_queue)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	u8 fw_queue;
	u8 txdesc_len = 48;

	dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
					    PCI_DMA_TODEVICE);

	if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
		RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "DMA mapping error");
		return;
	}

	rtl8822be_pre_fill_tx_bd_desc(hw, pbd_desc, pdesc, hw_queue, skb,
				      mapping);

	/* it should be BEACON_QUEUE or H2C_QUEUE,
	 * so skb=NULL is safe to assert
	 */
	fw_queue = _rtl8822be_map_hwqueue_to_fwqueue(NULL, hw_queue);

	CLEAR_PCI_TX_DESC_CONTENT(pdesc, txdesc_len);

	/* common part for BEACON and H2C */
	SET_TX_DESC_TXPKTSIZE((u8 *)pdesc, (u16)(skb->len));

	SET_TX_DESC_QSEL(pdesc, fw_queue);

	if (hw_queue == H2C_QUEUE) {
		/* fill H2C */
		SET_TX_DESC_OFFSET(pdesc, 0);

	} else {
		/* fill beacon */
		SET_TX_DESC_OFFSET(pdesc, txdesc_len);

		SET_TX_DESC_DATARATE(pdesc, DESC_RATE1M);

		SET_TX_DESC_SW_SEQ(pdesc, 0);

		SET_TX_DESC_RATE_ID(pdesc, 7);
		SET_TX_DESC_MACID(pdesc, 0);

		SET_TX_DESC_LS(pdesc, 1);

		SET_TX_DESC_OFFSET(pdesc, 48);

		SET_TX_DESC_USE_RATE(pdesc, 1);
	}

	RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
		      pdesc, txdesc_len);
}
Example #5
0
/**
 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
 *
 * @rx_queue:		Efx RX queue
 *
 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
 * and populates struct efx_rx_buffers for each one. Return a negative error
 * code or 0 on success. If a single page can be split between two buffers,
 * then the page will either be inserted fully, or not at at all.
 */
static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
{
	struct efx_nic *efx = rx_queue->efx;
	struct efx_rx_buffer *rx_buf;
	struct page *page;
	void *page_addr;
	unsigned int page_offset;
	struct efx_rx_page_state *state;
	dma_addr_t dma_addr;
	unsigned index, count;

	/* We can split a page between two buffers */
	BUILD_BUG_ON(EFX_RX_BATCH & 1);

	for (count = 0; count < EFX_RX_BATCH; ++count) {
		page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
				   efx->rx_buffer_order);
		if (unlikely(page == NULL))
			return -ENOMEM;
		dma_addr = pci_map_page(efx->pci_dev, page, 0,
					efx_rx_buf_size(efx),
					PCI_DMA_FROMDEVICE);
		if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
			__free_pages(page, efx->rx_buffer_order);
			return -EIO;
		}
		page_addr = page_address(page);
		state = page_addr;
		state->refcnt = 0;
		state->dma_addr = dma_addr;

		page_addr += sizeof(struct efx_rx_page_state);
		dma_addr += sizeof(struct efx_rx_page_state);
		page_offset = sizeof(struct efx_rx_page_state);

	split:
		index = rx_queue->added_count & rx_queue->ptr_mask;
		rx_buf = efx_rx_buffer(rx_queue, index);
		rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
		rx_buf->u.page = page;
		rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
		rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
		rx_buf->is_page = true;
		++rx_queue->added_count;
		++rx_queue->alloc_page_count;
		++state->refcnt;

		if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
			/* Use the second half of the page */
			get_page(page);
			dma_addr += (PAGE_SIZE >> 1);
			page_addr += (PAGE_SIZE >> 1);
			page_offset += (PAGE_SIZE >> 1);
			++count;
			goto split;
		}
	}
Example #6
0
void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
	bool firstseg, bool lastseg, struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
	struct rtl_tcb_desc *tcb_desc = (struct rtl_tcb_desc *)(skb->cb);

	dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
			PCI_DMA_TODEVICE);

	if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
		RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
			 "DMA mapping error\n");
		return;
	}
	/* Clear all status	*/
	CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_CMDDESC_SIZE_RTL8192S);

	/* This bit indicate this packet is used for FW download. */
	if (tcb_desc->cmd_or_init == DESC_PACKET_TYPE_INIT) {
		/* For firmware downlaod we only need to set LINIP */
		SET_TX_DESC_LINIP(pdesc, tcb_desc->last_inipkt);

		/* 92SE must set as 1 for firmware download HW DMA error */
		SET_TX_DESC_FIRST_SEG(pdesc, 1);
		SET_TX_DESC_LAST_SEG(pdesc, 1);

		/* 92SE need not to set TX packet size when firmware download */
		SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
		SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
		SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);

		wmb();
		SET_TX_DESC_OWN(pdesc, 1);
	} else { /* H2C Command Desc format (Host TXCMD) */
		/* 92SE must set as 1 for firmware download HW DMA error */
		SET_TX_DESC_FIRST_SEG(pdesc, 1);
		SET_TX_DESC_LAST_SEG(pdesc, 1);

		SET_TX_DESC_OFFSET(pdesc, 0x20);

		/* Buffer size + command header */
		SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
		/* Fixed queue of H2C command */
		SET_TX_DESC_QUEUE_SEL(pdesc, 0x13);

		SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);

		SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
		SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);

		wmb();
		SET_TX_DESC_OWN(pdesc, 1);
	}
}
Example #7
0
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
	struct radeon_device *rdev;
	struct radeon_ttm_tt *gtt = (void *)ttm;
	unsigned i;
	int r;
	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);

	if (ttm->state != tt_unpopulated)
		return 0;

	if (slave && ttm->sg) {
		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
						 gtt->ttm.dma_address, ttm->num_pages);
		ttm->state = tt_unbound;
		return 0;
	}

	rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
	if (rdev->flags & RADEON_IS_AGP) {
		return ttm_agp_tt_populate(ttm);
	}
#endif

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
#ifdef CONFIG_SWIOTLB
	if (swiotlb_nr_tbl()) {
		return ttm_dma_populate(&gtt->ttm, rdev->dev);
	}
#endif
#endif

	r = ttm_pool_populate(ttm);
	if (r) {
		return r;
	}

	for (i = 0; i < ttm->num_pages; i++) {
		gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
						       0, PAGE_SIZE,
						       PCI_DMA_BIDIRECTIONAL);
		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
			while (--i) {
				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
				gtt->ttm.dma_address[i] = 0;
			}
			ttm_pool_unpopulate(ttm);
			return -EFAULT;
		}
	}
	return 0;
}
Example #8
0
int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
{
	int rc;
	void *base_struct_unaligned;
	struct sis_base_struct *base_struct;
	struct sis_sync_cmd_params params;
	unsigned long error_buffer_paddr;
	dma_addr_t bus_address;

	base_struct_unaligned = kzalloc(sizeof(*base_struct)
		+ SIS_BASE_STRUCT_ALIGNMENT - 1, GFP_KERNEL);
	if (!base_struct_unaligned)
		return -ENOMEM;

	base_struct = PTR_ALIGN(base_struct_unaligned,
		SIS_BASE_STRUCT_ALIGNMENT);
	error_buffer_paddr = (unsigned long)ctrl_info->error_buffer_dma_handle;

	put_unaligned_le32(SIS_BASE_STRUCT_REVISION, &base_struct->revision);
	put_unaligned_le32(lower_32_bits(error_buffer_paddr),
		&base_struct->error_buffer_paddr_low);
	put_unaligned_le32(upper_32_bits(error_buffer_paddr),
		&base_struct->error_buffer_paddr_high);
	put_unaligned_le32(PQI_ERROR_BUFFER_ELEMENT_LENGTH,
		&base_struct->error_buffer_element_length);
	put_unaligned_le32(ctrl_info->max_io_slots,
		&base_struct->error_buffer_num_elements);

	bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
		sizeof(*base_struct), PCI_DMA_TODEVICE);
	if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
		rc = -ENOMEM;
		goto out;
	}

	memset(&params, 0, sizeof(params));
	params.mailbox[1] = lower_32_bits((u64)bus_address);
	params.mailbox[2] = upper_32_bits((u64)bus_address);
	params.mailbox[3] = sizeof(*base_struct);

	rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
		&params);

	pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
		PCI_DMA_TODEVICE);

out:
	kfree(base_struct_unaligned);

	return rc;
}
Example #9
0
static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
	int ring_index, struct p54p_desc *ring, u32 ring_limit,
	struct sk_buff **rx_buf, u32 index)
{
	struct p54p_priv *priv = dev->priv;
	struct p54p_ring_control *ring_control = priv->ring_control;
	u32 limit, idx, i;

	idx = le32_to_cpu(ring_control->host_idx[ring_index]);
	limit = idx;
	limit -= index;
	limit = ring_limit - limit;

	i = idx % ring_limit;
	while (limit-- > 1) {
		struct p54p_desc *desc = &ring[i];

		if (!desc->host_addr) {
			struct sk_buff *skb;
			dma_addr_t mapping;
			skb = dev_alloc_skb(priv->common.rx_mtu + 32);
			if (!skb)
				break;

			mapping = pci_map_single(priv->pdev,
						 skb_tail_pointer(skb),
						 priv->common.rx_mtu + 32,
						 PCI_DMA_FROMDEVICE);

			if (pci_dma_mapping_error(priv->pdev, mapping)) {
				dev_kfree_skb_any(skb);
				dev_err(&priv->pdev->dev,
					"RX DMA Mapping error\n");
				break;
			}

			desc->host_addr = cpu_to_le32(mapping);
			desc->device_addr = 0;	// FIXME: necessary?
			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
			desc->flags = 0;
			rx_buf[i] = skb;
		}

		i++;
		idx++;
		i %= ring_limit;
	}

	wmb();
	ring_control->host_idx[ring_index] = cpu_to_le32(idx);
}
Example #10
0
static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
			    skb_frag_t *frag)
{
	st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
				      frag->page_offset, frag->size,
				      PCI_DMA_TODEVICE);
	if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
		st->unmap_single = false;
		st->unmap_len = frag->size;
		st->in_len = frag->size;
		st->dma_addr = st->unmap_addr;
		return 0;
	}
	return -ENOMEM;
}
Example #11
0
/**
 * amdgpu_dummy_page_init - init dummy page used by the driver
 *
 * @adev: amdgpu_device pointer
 *
 * Allocate the dummy page used by the driver (all asics).
 * This dummy page is used by the driver as a filler for gart entries
 * when pages are taken out of the GART
 * Returns 0 on sucess, -ENOMEM on failure.
 */
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
	struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;

	if (adev->dummy_page_addr)
		return 0;
	adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
					     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
	if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
		dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
		adev->dummy_page_addr = 0;
		return -ENOMEM;
	}
	return 0;
}
Example #12
0
File: pci.c Project: Lyude/linux
static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
				  int index, char *frag_data, size_t frag_len,
				  int direction)
{
	struct pci_dev *pdev = mlxsw_pci->pdev;
	dma_addr_t mapaddr;

	mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
	if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
		dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
		return -EIO;
	}
	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
	mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
	return 0;
}
Example #13
0
/**
 * tso_get_fragment - record fragment details and map for DMA
 * @st:			TSO state
 * @efx:		Efx NIC
 * @data:		Pointer to fragment data
 * @len:		Length of fragment
 *
 * Record fragment details and map for DMA.  Return 0 on success, or
 * -%ENOMEM if DMA mapping fails.
 */
static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
				   int len, struct page *page, int page_off)
{

	st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
					  len, PCI_DMA_TODEVICE);
	if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
		st->ifc.unmap_len = len;
		st->ifc.len = len;
		st->ifc.dma_addr = st->ifc.unmap_addr;
		st->ifc.page = page;
		st->ifc.page_off = page_off;
		return 0;
	}
	return -ENOMEM;
}
Example #14
0
static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
				 const struct sk_buff *skb)
{
	int hl = st->header_len;
	int len = skb_headlen(skb) - hl;

	st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
					len, PCI_DMA_TODEVICE);
	if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
		st->unmap_single = true;
		st->unmap_len = len;
		st->in_len = len;
		st->dma_addr = st->unmap_addr;
		return 0;
	}
	return -ENOMEM;
}
Example #15
0
static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
				       struct pci_dev *hwdev,
				       dma_addr_t *dma_handle)
{
	struct sk_buff *skb;
	skb = netdev_alloc_skb(dev, RX_BUF_SIZE);
	if (!skb)
		return NULL;
	*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
				     PCI_DMA_FROMDEVICE);
	if (pci_dma_mapping_error(hwdev, *dma_handle)) {
		dev_kfree_skb_any(skb);
		return NULL;
	}
	skb_reserve(skb, 2);	/* make IP header 4byte aligned */
	return skb;
}
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
	struct radeon_device *rdev;
	struct radeon_ttm_tt *gtt = (void *)ttm;
	unsigned i;
	int r;

	if (ttm->state != tt_unpopulated)
		return 0;

	rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
	if (rdev->flags & RADEON_IS_AGP) {
		return ttm_agp_tt_populate(ttm);
	}
#endif

#ifdef CONFIG_SWIOTLB
	if (swiotlb_nr_tbl()) {
		return ttm_dma_populate(&gtt->ttm, rdev->dev);
	}
#endif

	r = ttm_pool_populate(ttm);
	if (r) {
		return r;
	}

	for (i = 0; i < ttm->num_pages; i++) {
		gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
						       0, PAGE_SIZE,
						       PCI_DMA_BIDIRECTIONAL);
		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
			while (--i) {
				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
				gtt->ttm.dma_address[i] = 0;
			}
			ttm_pool_unpopulate(ttm);
			return -EFAULT;
		}
	}
	return 0;
}
Example #17
0
static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
{
	struct qtnf_pcie_bus_priv *priv = &ps->base;
	struct qtnf_pearl_rx_bd *rxbd;
	struct sk_buff *skb;
	dma_addr_t paddr;

	skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
	if (!skb) {
		priv->rx_skb[index] = NULL;
		return -ENOMEM;
	}

	priv->rx_skb[index] = skb;
	rxbd = &ps->rx_bd_vbase[index];

	paddr = pci_map_single(priv->pdev, skb->data,
			       SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
	if (pci_dma_mapping_error(priv->pdev, paddr)) {
		pr_err("skb DMA mapping error: %pad\n", &paddr);
		return -ENOMEM;
	}

	/* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
	rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
	rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
	rxbd->info = 0x0;

	priv->rx_bd_w_index = index;

	/* sync up all descriptor updates */
	wmb();

#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
	writel(QTN_HOST_HI32(paddr),
	       PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base));
#endif
	writel(QTN_HOST_LO32(paddr),
	       PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base));

	writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base));
	return 0;
}
Example #18
0
void mi_mic_dma_chan_set_dstat_wb(struct mic_dma_ctx_t *dma_ctx,
				struct md_mic_dma_chan *chan)
{
#ifndef _MIC_SCIF_
	struct pci_dev *pdev;
#endif
	if (!chan->dstat_wb_phys) {
		chan->dstat_wb_loc = kzalloc(sizeof(uint32_t), GFP_KERNEL);

#ifdef _MIC_SCIF_
		chan->dstat_wb_phys = virt_to_phys(chan->dstat_wb_loc);
#else
		micscif_pci_dev(dma_ctx->device_num, &pdev);
		chan->dstat_wb_phys = mic_map_single(dma_ctx->device_num - 1, pdev, chan->dstat_wb_loc,
			sizeof(uint32_t));
		BUG_ON(pci_dma_mapping_error(pdev, chan->dstat_wb_phys));
#endif
	}
	md_mic_dma_chan_set_dstat_wb(&dma_ctx->dma_dev, chan);
}
Example #19
0
/**
 * mic_map_single - Maps a virtual address to a MIC physical address.
 *
 * @mdev: pointer to mic_device instance.
 * @va: Kernel direct mapped virtual address.
 * @size: Size of the region to be mapped.
 *
 * This API calls pci_map_single(..) for the direct mapped virtual address
 * and then converts the DMA address provided to a DMA address understood
 * by MIC. Caller should check for errors by calling mic_map_error(..).
 *
 * returns DMA address as required by MIC.
 */
dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size)
{
	dma_addr_t mic_addr = 0;
	struct pci_dev *pdev = container_of(mdev->sdev->parent,
		struct pci_dev, dev);
	dma_addr_t dma_addr =
		pci_map_single(pdev, va, size, PCI_DMA_BIDIRECTIONAL);

	if (!pci_dma_mapping_error(pdev, dma_addr)) {
		mic_addr = mic_map(mdev, dma_addr, size);
		if (!mic_addr) {
			dev_err(mdev->sdev->parent,
				"mic_map failed dma_addr 0x%llx size 0x%lx\n",
				dma_addr, size);
			pci_unmap_single(pdev, dma_addr,
					 size, PCI_DMA_BIDIRECTIONAL);
		}
	}
	return mic_addr;
}
Example #20
0
static int xilly_map_single_pci(struct xilly_endpoint *ep,
				void *ptr,
				size_t size,
				int direction,
				dma_addr_t *ret_dma_handle
	)
{
	int pci_direction;
	dma_addr_t addr;
	struct xilly_mapping *this;
	int rc = 0;

	this = kzalloc(sizeof(*this), GFP_KERNEL);
	if (!this)
		return -ENOMEM;

	pci_direction = xilly_pci_direction(direction);

	addr = pci_map_single(ep->pdev, ptr, size, pci_direction);

	if (pci_dma_mapping_error(ep->pdev, addr)) {
		kfree(this);
		return -ENODEV;
	}

	this->device = ep->pdev;
	this->dma_addr = addr;
	this->size = size;
	this->direction = pci_direction;

	*ret_dma_handle = addr;

	rc = devm_add_action(ep->dev, xilly_pci_unmap, this);

	if (rc) {
		pci_unmap_single(ep->pdev, addr, size, pci_direction);
		kfree(this);
	}

	return rc;
}
Example #21
0
static struct efx_tso_header *
efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
{
	struct efx_tso_header *tsoh;

	tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
	if (unlikely(!tsoh))
		return NULL;

	tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
					TSOH_BUFFER(tsoh), header_len,
					PCI_DMA_TODEVICE);
	if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
					   tsoh->dma_addr))) {
		kfree(tsoh);
		return NULL;
	}

	tsoh->unmap_len = header_len;
	return tsoh;
}
Example #22
0
File: rx.c Project: 71eh/open80211s
/**
 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
 *
 * @rx_queue:		Efx RX queue
 *
 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
 * struct efx_rx_buffer for each one. Return a negative error code or 0
 * on success. May fail having only inserted fewer than EFX_RX_BATCH
 * buffers.
 */
static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
{
	struct efx_nic *efx = rx_queue->efx;
	struct net_device *net_dev = efx->net_dev;
	struct efx_rx_buffer *rx_buf;
	struct sk_buff *skb;
	int skb_len = efx->rx_buffer_len;
	unsigned index, count;

	for (count = 0; count < EFX_RX_BATCH; ++count) {
		index = rx_queue->added_count & rx_queue->ptr_mask;
		rx_buf = efx_rx_buffer(rx_queue, index);

		rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
		if (unlikely(!skb))
			return -ENOMEM;

		/* Adjust the SKB for padding and checksum */
		skb_reserve(skb, NET_IP_ALIGN);
		rx_buf->len = skb_len - NET_IP_ALIGN;
		rx_buf->is_page = false;
		skb->ip_summed = CHECKSUM_UNNECESSARY;

		rx_buf->dma_addr = pci_map_single(efx->pci_dev,
						  skb->data, rx_buf->len,
						  PCI_DMA_FROMDEVICE);
		if (unlikely(pci_dma_mapping_error(efx->pci_dev,
						   rx_buf->dma_addr))) {
			dev_kfree_skb_any(skb);
			rx_buf->u.skb = NULL;
			return -EIO;
		}

		++rx_queue->added_count;
		++rx_queue->alloc_skb_count;
	}

	return 0;
}
Example #23
0
/* TODO:
 * See if we can use __get_free_pages or something similar
 * get_free_pages expects a power of 2 number of pages
 */
static void
alloc_dma_desc_ring_mem(struct dma_channel *ch, struct mic_dma_ctx_t *dma_ctx)
{
#ifndef _MIC_SCIF_
	struct pci_dev *pdev;
#endif
	/* Is there any kernel allocator which provides the
	 * option to give the alignment??
	 */
	ch->desc_ring = kzalloc(
			(DMA_DESC_RING_SIZE * sizeof(*ch->desc_ring)) + PAGE_SIZE, GFP_KERNEL);
	ch->desc_ring_bak = ch->desc_ring;
	ch->desc_ring = (union md_mic_dma_desc *)ALIGN(
			(uint64_t)ch->desc_ring, PAGE_SIZE);
#ifdef _MIC_SCIF_
	ch->desc_ring_phys = virt_to_phys(ch->desc_ring);
#else
	micscif_pci_dev(dma_ctx->device_num, &pdev);
	ch->desc_ring_phys = mic_map_single(dma_ctx->device_num - 1, pdev, (void *)ch->desc_ring,
			(DMA_DESC_RING_SIZE * sizeof(*ch->desc_ring)) + PAGE_SIZE);
	BUG_ON(pci_dma_mapping_error(pdev, ch->desc_ring_phys));
#endif
}
Example #24
0
static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
	unsigned long flags;
	struct p54p_priv *priv = dev->priv;
	struct p54p_ring_control *ring_control = priv->ring_control;
	struct p54p_desc *desc;
	dma_addr_t mapping;
	u32 device_idx, idx, i;

	spin_lock_irqsave(&priv->lock, flags);
	device_idx = le32_to_cpu(ring_control->device_idx[1]);
	idx = le32_to_cpu(ring_control->host_idx[1]);
	i = idx % ARRAY_SIZE(ring_control->tx_data);

	mapping = pci_map_single(priv->pdev, skb->data, skb->len,
				 PCI_DMA_TODEVICE);
	if (pci_dma_mapping_error(priv->pdev, mapping)) {
		spin_unlock_irqrestore(&priv->lock, flags);
		p54_free_skb(dev, skb);
		dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
		return ;
	}
	priv->tx_buf_data[i] = skb;

	desc = &ring_control->tx_data[i];
	desc->host_addr = cpu_to_le32(mapping);
	desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
	desc->len = cpu_to_le16(skb->len);
	desc->flags = 0;

	wmb();
	ring_control->host_idx[1] = cpu_to_le32(idx + 1);
	spin_unlock_irqrestore(&priv->lock, flags);

	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
	P54P_READ(dev_int);
}
Example #25
0
static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
{
	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
	struct qtnf_pcie_bus_priv *priv = &ps->base;
	dma_addr_t txbd_paddr, skb_paddr;
	struct qtnf_pearl_tx_bd *txbd;
	unsigned long flags;
	int len, i;
	u32 info;
	int ret = 0;

	spin_lock_irqsave(&priv->tx_lock, flags);

	if (!qtnf_tx_queue_ready(ps)) {
		if (skb->dev) {
			netif_tx_stop_all_queues(skb->dev);
			priv->tx_stopped = 1;
		}

		spin_unlock_irqrestore(&priv->tx_lock, flags);
		return NETDEV_TX_BUSY;
	}

	i = priv->tx_bd_w_index;
	priv->tx_skb[i] = skb;
	len = skb->len;

	skb_paddr = pci_map_single(priv->pdev, skb->data,
				   skb->len, PCI_DMA_TODEVICE);
	if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
		pr_err("skb DMA mapping error: %pad\n", &skb_paddr);
		ret = -ENOMEM;
		goto tx_done;
	}

	txbd = &ps->tx_bd_vbase[i];
	txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
	txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr));

	info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT;
	txbd->info = cpu_to_le32(info);

	/* sync up all descriptor updates before passing them to EP */
	dma_wmb();

	/* write new TX descriptor to PCIE_RX_FIFO on EP */
	txbd_paddr = ps->tx_bd_pbase + i * sizeof(struct qtnf_pearl_tx_bd);

#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
	writel(QTN_HOST_HI32(txbd_paddr),
	       PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base));
#endif
	writel(QTN_HOST_LO32(txbd_paddr),
	       PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base));

	if (++i >= priv->tx_bd_num)
		i = 0;

	priv->tx_bd_w_index = i;

tx_done:
	if (ret && skb) {
		pr_err_ratelimited("drop skb\n");
		if (skb->dev)
			skb->dev->stats.tx_dropped++;
		dev_kfree_skb_any(skb);
	}

	priv->tx_done_count++;
	spin_unlock_irqrestore(&priv->tx_lock, flags);

	qtnf_pearl_data_tx_reclaim(ps);

	return NETDEV_TX_OK;
}
Example #26
0
static inline void mwl_tx_skb(struct mwl_priv *priv, int desc_num,
			      struct sk_buff *tx_skb)
{
	struct ieee80211_tx_info *tx_info;
	struct mwl_tx_ctrl *tx_ctrl;
	struct mwl_tx_hndl *tx_hndl;
	struct mwl_tx_desc *tx_desc;
	struct ieee80211_sta *sta;
	struct ieee80211_vif *vif;
	struct mwl_vif *mwl_vif;
	struct ieee80211_key_conf *k_conf;
	bool ccmp = false;
	struct mwl_dma_data *dma_data;
	struct ieee80211_hdr *wh;
	dma_addr_t dma;

	if (WARN_ON(!tx_skb))
		return;

	tx_info = IEEE80211_SKB_CB(tx_skb);
	tx_ctrl = (struct mwl_tx_ctrl *)&tx_info->status;
	sta = (struct ieee80211_sta *)tx_ctrl->sta;
	vif = (struct ieee80211_vif *)tx_ctrl->vif;
	mwl_vif = mwl_dev_get_vif(vif);
	k_conf = (struct ieee80211_key_conf *)tx_ctrl->k_conf;

	mwl_tx_encapsulate_frame(priv, tx_skb, k_conf, &ccmp);

	dma_data = (struct mwl_dma_data *)tx_skb->data;
	wh = &dma_data->wh;

	if (ieee80211_is_data(wh->frame_control) ||
	    (ieee80211_is_mgmt(wh->frame_control) &&
	    ieee80211_has_protected(wh->frame_control) &&
	    !is_multicast_ether_addr(wh->addr1))) {
		if (is_multicast_ether_addr(wh->addr1)) {
			if (ccmp) {
				mwl_tx_insert_ccmp_hdr(dma_data->data,
						       mwl_vif->keyidx,
						       mwl_vif->iv16,
						       mwl_vif->iv32);
				INCREASE_IV(mwl_vif->iv16, mwl_vif->iv32);
			}
		} else {
			if (ccmp) {
				if (vif->type == NL80211_IFTYPE_STATION) {
					mwl_tx_insert_ccmp_hdr(dma_data->data,
							       mwl_vif->keyidx,
							       mwl_vif->iv16,
							       mwl_vif->iv32);
					INCREASE_IV(mwl_vif->iv16,
						    mwl_vif->iv32);
				} else {
					struct mwl_sta *sta_info;

					sta_info = mwl_dev_get_sta(sta);

					mwl_tx_insert_ccmp_hdr(dma_data->data,
							       0,
							       sta_info->iv16,
							       sta_info->iv32);
					INCREASE_IV(sta_info->iv16,
						    sta_info->iv32);
				}
			}
		}
	}

	tx_hndl = priv->desc_data[desc_num].pnext_tx_hndl;
	tx_hndl->psk_buff = tx_skb;
	tx_desc = tx_hndl->pdesc;
	tx_desc->tx_priority = tx_ctrl->tx_priority;
	tx_desc->qos_ctrl = cpu_to_le16(tx_ctrl->qos_ctrl);
	tx_desc->pkt_len = cpu_to_le16(tx_skb->len);
	tx_desc->packet_info = 0;
	tx_desc->data_rate = 0;
	tx_desc->type = tx_ctrl->type;
	tx_desc->xmit_control = tx_ctrl->xmit_control;
	tx_desc->sap_pkt_info = 0;
	dma = pci_map_single(priv->pdev, tx_skb->data,
			     tx_skb->len, PCI_DMA_TODEVICE);
	if (pci_dma_mapping_error(priv->pdev, dma)) {
		dev_kfree_skb_any(tx_skb);
		wiphy_err(priv->hw->wiphy,
			  "failed to map pci memory!\n");
		return;
	}
	tx_desc->pkt_ptr = cpu_to_le32(dma);
	tx_desc->status = cpu_to_le32(EAGLE_TXD_STATUS_FW_OWNED);
	/* make sure all the memory transactions done by cpu were completed */
	wmb();	/*Data Memory Barrier*/
	writel(MACREG_H2ARIC_BIT_PPA_READY,
	       priv->iobase1 + MACREG_REG_H2A_INTERRUPT_EVENTS);
	priv->desc_data[desc_num].pnext_tx_hndl = tx_hndl->pnext;
	priv->fw_desc_cnt[desc_num]++;
}
Example #27
0
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
	struct radeon_device *rdev;
	struct radeon_ttm_tt *gtt = (void *)ttm;
	unsigned i;
	int r;
#ifdef DUMBBELL_WIP
	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
#endif /* DUMBBELL_WIP */

	if (ttm->state != tt_unpopulated)
		return 0;

#ifdef DUMBBELL_WIP
	/*
	 * Maybe unneeded on FreeBSD.
	 *   -- dumbbell@
	 */
	if (slave && ttm->sg) {
		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
						 gtt->ttm.dma_address, ttm->num_pages);
		ttm->state = tt_unbound;
		return 0;
	}
#endif /* DUMBBELL_WIP */

	rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
#ifdef DUMBBELL_WIP
	if (rdev->flags & RADEON_IS_AGP) {
		return ttm_agp_tt_populate(ttm);
	}
#endif /* DUMBBELL_WIP */
#endif

#ifdef CONFIG_SWIOTLB
	if (swiotlb_nr_tbl()) {
		return ttm_dma_populate(&gtt->ttm, rdev->dev);
	}
#endif

	r = ttm_pool_populate(ttm);
	if (r) {
		return r;
	}

	for (i = 0; i < ttm->num_pages; i++) {
		gtt->ttm.dma_address[i] = VM_PAGE_TO_PHYS(ttm->pages[i]);
#ifdef DUMBBELL_WIP
		gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
						       0, PAGE_SIZE,
						       PCI_DMA_BIDIRECTIONAL);
		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
			while (--i) {
				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
				gtt->ttm.dma_address[i] = 0;
			}
			ttm_pool_unpopulate(ttm);
			return -EFAULT;
		}
#endif /* DUMBBELL_WIP */
	}
	return 0;
}
Example #28
0
/**
 * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
 *
 * @rx_queue:		Efx RX queue
 * @rx_buf:		RX buffer structure to populate
 *
 * This allocates memory for a new receive buffer, maps it for DMA,
 * and populates a struct efx_rx_buffer with the relevant
 * information.  Return a negative error code or 0 on success.
 */
static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
				   struct efx_rx_buffer *rx_buf)
{
	struct efx_nic *efx = rx_queue->efx;
	int bytes, space, offset;

	bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;

	/* If there is space left in the previously allocated page,
	 * then use it. Otherwise allocate a new one */
	rx_buf->page = rx_queue->buf_page;
	if (rx_buf->page == NULL) {
		dma_addr_t dma_addr;

		rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
					   efx->rx_buffer_order);
		if (unlikely(rx_buf->page == NULL))
			return -ENOMEM;

		dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
					0, efx_rx_buf_size(efx),
					PCI_DMA_FROMDEVICE);

		if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
			__free_pages(rx_buf->page, efx->rx_buffer_order);
			rx_buf->page = NULL;
			return -EIO;
		}

		rx_queue->buf_page = rx_buf->page;
		rx_queue->buf_dma_addr = dma_addr;
		rx_queue->buf_data = (page_address(rx_buf->page) +
				      EFX_PAGE_IP_ALIGN);
	}

	rx_buf->len = bytes;
	rx_buf->data = rx_queue->buf_data;
	offset = efx_rx_buf_offset(rx_buf);
	rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;

	/* Try to pack multiple buffers per page */
	if (efx->rx_buffer_order == 0) {
		/* The next buffer starts on the next 512 byte boundary */
		rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
		offset += ((bytes + 0x1ff) & ~0x1ff);

		space = efx_rx_buf_size(efx) - offset;
		if (space >= bytes) {
			/* Refs dropped on kernel releasing each skb */
			get_page(rx_queue->buf_page);
			goto out;
		}
	}

	/* This is the final RX buffer for this page, so mark it for
	 * unmapping */
	rx_queue->buf_page = NULL;
	rx_buf->unmap_addr = rx_queue->buf_dma_addr;

 out:
	return 0;
}
Example #29
0
static int mic_psmi_alloc_buffer(mic_ctx_t *mic_ctx)
{
	int i, j, ret;
	void *va;
	dma_addr_t dma_hndl;
	struct mic_psmi_ctx *psmi_ctx = &mic_ctx->bi_psmi;

	/* allocate psmi page tables */
	psmi_ctx->nr_dma_pages =
		ALIGN(psmi_ctx->dma_mem_size, 
				MIC_PSMI_PAGE_SIZE) / MIC_PSMI_PAGE_SIZE;
	if ((psmi_ctx->va_tbl =
		kmalloc(psmi_ctx->nr_dma_pages *
				sizeof(struct mic_psmi_pte), GFP_KERNEL)) == NULL) {
		printk("mic: psmi va table alloc failed\n");
		return -ENOMEM;
	}
	psmi_ctx->dma_tbl_size =
		(psmi_ctx->nr_dma_pages + 2) * sizeof(struct mic_psmi_pte);
	if ((psmi_ctx->dma_tbl =
			kmalloc(psmi_ctx->dma_tbl_size, GFP_KERNEL)) == NULL) {
		printk("mic: psmi dma table alloc failed\n");
		ret = -ENOMEM;
		goto free_va_tbl;
	}
	psmi_ctx->dma_tbl_hndl =
		pci_map_single(mic_ctx->bi_pdev, 
			psmi_ctx->dma_tbl, psmi_ctx->dma_tbl_size, PCI_DMA_BIDIRECTIONAL);
	if (pci_dma_mapping_error(mic_ctx->bi_pdev, 
						psmi_ctx->dma_tbl_hndl)) {
		printk("mic: psmi dma table mapping failed\n");
		ret = -ENOMEM;
		goto free_dma_tbl;
	}

	/* allocate psmi pages */
	for (i = 0; i < psmi_ctx->nr_dma_pages; i++) {
		if ((va = (void *)__get_free_pages(
				GFP_KERNEL | __GFP_HIGHMEM,
					MIC_PSMI_PAGE_ORDER)) == NULL) {
			printk("mic: psmi page alloc failed: %d\n", i);
			ret = -ENOMEM;
			goto free_ptes;
		}
		memset(va, 0, MIC_PSMI_PAGE_SIZE);
		dma_hndl = pci_map_single(mic_ctx->bi_pdev, va, 
						MIC_PSMI_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
		if (pci_dma_mapping_error(mic_ctx->bi_pdev, dma_hndl)) {
			printk("mic: psmi page mapping failed: %d\n", i);
			free_pages((unsigned long)va, MIC_PSMI_PAGE_ORDER);
			ret = -ENOMEM;
			goto free_ptes;
		}
		psmi_ctx->dma_tbl[i + 1].pa = dma_hndl;
		psmi_ctx->va_tbl[i].pa = (uint64_t)va;
	}
	psmi_ctx->dma_tbl[0].pa = MIC_PSMI_SIGNATURE;
	psmi_ctx->dma_tbl[psmi_ctx->nr_dma_pages + 1].pa = MIC_PSMI_SIGNATURE;
	printk("mic: psmi #%d, %ld bytes, "
			"dma_tbl va=0x%lx hndl=0x%lx\n", mic_ctx->bi_id + 1, 
			(unsigned long)psmi_ctx->dma_mem_size, 
			(unsigned long)psmi_ctx->dma_tbl, 
			(unsigned long)psmi_ctx->dma_tbl_hndl);
	return 0;
free_ptes:
	for (j = 1; j < i; j++)
		mic_psmi_free_pte(mic_ctx, j);
	pci_unmap_single(mic_ctx->bi_pdev, 
		psmi_ctx->dma_tbl_hndl, psmi_ctx->dma_tbl_size, PCI_DMA_BIDIRECTIONAL);
free_dma_tbl:
	kfree(psmi_ctx->dma_tbl);
	psmi_ctx->dma_tbl = NULL;
free_va_tbl:
	kfree(psmi_ctx->va_tbl);
	psmi_ctx->va_tbl = NULL;
	return ret;
}
Example #30
0
/*
 * Add a socket buffer to a TX queue
 *
 * This maps all fragments of a socket buffer for DMA and adds them to
 * the TX queue.  The queue's insert pointer will be incremented by
 * the number of fragments in the socket buffer.
 *
 * If any DMA mapping fails, any mapped fragments will be unmapped,
 * the queue's insert pointer will be restored to its original value.
 *
 * This function is split out from efx_hard_start_xmit to allow the
 * loopback test to direct packets via specific TX queues.
 *
 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
 * You must hold netif_tx_lock() to call this function.
 */
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
	struct efx_nic *efx = tx_queue->efx;
	struct pci_dev *pci_dev = efx->pci_dev;
	struct efx_tx_buffer *buffer;
	skb_frag_t *fragment;
	struct page *page;
	int page_offset;
	unsigned int len, unmap_len = 0, fill_level, insert_ptr;
	dma_addr_t dma_addr, unmap_addr = 0;
	unsigned int dma_len;
	bool unmap_single;
	int q_space, i = 0;
	netdev_tx_t rc = NETDEV_TX_OK;

	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);

	if (skb_shinfo(skb)->gso_size)
		return efx_enqueue_skb_tso(tx_queue, skb);

	/* Get size of the initial fragment */
	len = skb_headlen(skb);

	/* Pad if necessary */
	if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
		EFX_BUG_ON_PARANOID(skb->data_len);
		len = 32 + 1;
		if (skb_pad(skb, len - skb->len))
			return NETDEV_TX_OK;
	}

	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
	q_space = efx->txq_entries - 1 - fill_level;

	/* Map for DMA.  Use pci_map_single rather than pci_map_page
	 * since this is more efficient on machines with sparse
	 * memory.
	 */
	unmap_single = true;
	dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);

	/* Process all fragments */
	while (1) {
		if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
			goto pci_err;

		/* Store fields for marking in the per-fragment final
		 * descriptor */
		unmap_len = len;
		unmap_addr = dma_addr;

		/* Add to TX queue, splitting across DMA boundaries */
		do {
			if (unlikely(q_space-- <= 0)) {
				/* It might be that completions have
				 * happened since the xmit path last
				 * checked.  Update the xmit path's
				 * copy of read_count.
				 */
				netif_tx_stop_queue(tx_queue->core_txq);
				/* This memory barrier protects the
				 * change of queue state from the access
				 * of read_count. */
				smp_mb();
				tx_queue->old_read_count =
					ACCESS_ONCE(tx_queue->read_count);
				fill_level = (tx_queue->insert_count
					      - tx_queue->old_read_count);
				q_space = efx->txq_entries - 1 - fill_level;
				if (unlikely(q_space-- <= 0)) {
					rc = NETDEV_TX_BUSY;
					goto unwind;
				}
				smp_mb();
				netif_tx_start_queue(tx_queue->core_txq);
			}

			insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
			buffer = &tx_queue->buffer[insert_ptr];
			efx_tsoh_free(tx_queue, buffer);
			EFX_BUG_ON_PARANOID(buffer->tsoh);
			EFX_BUG_ON_PARANOID(buffer->skb);
			EFX_BUG_ON_PARANOID(buffer->len);
			EFX_BUG_ON_PARANOID(!buffer->continuation);
			EFX_BUG_ON_PARANOID(buffer->unmap_len);

			dma_len = efx_max_tx_len(efx, dma_addr);
			if (likely(dma_len >= len))
				dma_len = len;

			/* Fill out per descriptor fields */
			buffer->len = dma_len;
			buffer->dma_addr = dma_addr;
			len -= dma_len;
			dma_addr += dma_len;
			++tx_queue->insert_count;
		} while (len);

		/* Transfer ownership of the unmapping to the final buffer */
		buffer->unmap_single = unmap_single;
		buffer->unmap_len = unmap_len;
		unmap_len = 0;

		/* Get address and size of next fragment */
		if (i >= skb_shinfo(skb)->nr_frags)
			break;
		fragment = &skb_shinfo(skb)->frags[i];
		len = fragment->size;
		page = fragment->page;
		page_offset = fragment->page_offset;
		i++;
		/* Map for DMA */
		unmap_single = false;
		dma_addr = pci_map_page(pci_dev, page, page_offset, len,
					PCI_DMA_TODEVICE);
	}

	/* Transfer ownership of the skb to the final buffer */
	buffer->skb = skb;
	buffer->continuation = false;

	/* Pass off to hardware */
	efx_nic_push_buffers(tx_queue);

	return NETDEV_TX_OK;

 pci_err:
	netif_err(efx, tx_err, efx->net_dev,
		  " TX queue %d could not map skb with %d bytes %d "
		  "fragments for DMA\n", tx_queue->queue, skb->len,
		  skb_shinfo(skb)->nr_frags + 1);

	/* Mark the packet as transmitted, and free the SKB ourselves */
	dev_kfree_skb_any(skb);

 unwind:
	/* Work backwards until we hit the original insert pointer value */
	while (tx_queue->insert_count != tx_queue->write_count) {
		--tx_queue->insert_count;
		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
		buffer = &tx_queue->buffer[insert_ptr];
		efx_dequeue_buffer(tx_queue, buffer);
		buffer->len = 0;
	}

	/* Free the fragment we were mid-way through pushing */
	if (unmap_len) {
		if (unmap_single)
			pci_unmap_single(pci_dev, unmap_addr, unmap_len,
					 PCI_DMA_TODEVICE);
		else
			pci_unmap_page(pci_dev, unmap_addr, unmap_len,
				       PCI_DMA_TODEVICE);
	}

	return rc;
}