コード例 #1
0
ファイル: cppi_dma.c プロジェクト: ARMP/android_kernel_lge_x3
static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
{
	int	j;

	/* initialize channel fields */
	c->head = NULL;
	c->tail = NULL;
	c->last_processed = NULL;
	c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
	c->controller = cppi;
	c->is_rndis = 0;
	c->freelist = NULL;

	/* build the BD Free list for the channel */
	for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
		struct cppi_descriptor	*bd;
		dma_addr_t		dma;

		bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
		bd->dma = dma;
		cppi_bd_free(c, bd);
	}
}
コード例 #2
0
void *hcd_buffer_alloc(
	struct usb_bus 	*bus,
	size_t			size,
	gfp_t			mem_flags,
	dma_addr_t		*dma
)
{
	struct usb_hcd		*hcd = bus_to_hcd(bus);
	int 			i;

	/* some USB hosts just use PIO */
	if (!bus->controller->dma_mask &&
	    !(hcd->driver->flags & HCD_LOCAL_MEM)) {
		*dma = ~(dma_addr_t) 0;
		return kmalloc(size, mem_flags);
	}

	for (i = 0; i < HCD_BUFFER_POOLS; i++) {
		if (size <= pool_max [i])
			return dma_pool_alloc(hcd->pool [i], mem_flags, dma);
	}
	return dma_alloc_coherent(hcd->self.controller, size, dma, 0);
}
コード例 #3
0
ファイル: lab1_dma.c プロジェクト: starius/mylinuxprog
static int __init my_init( void ) {
    char *kbuf;
    dma_addr_t handle;
    size_t size = ( 10 * PAGE_SIZE );
    struct dma_pool *mypool;
    /* dma_alloc_coherent method */
    kbuf = dma_alloc_coherent( NULL, size, &handle, GFP_KERNEL );
    output( kbuf, handle, size, "This is the dma_alloc_coherent() string" );
    dma_free_coherent( NULL, size, kbuf, handle );
    /* dma_map/unmap_single */
    kbuf = kmalloc( size, GFP_KERNEL );
    handle = dma_map_single( NULL, kbuf, size, direction );
    output( kbuf, handle, size, "This is the dma_map_single() string" );
    dma_unmap_single( NULL, handle, size, direction );
    kfree( kbuf );
    /* dma_pool method */
    mypool = dma_pool_create( "mypool", NULL, pool_size, pool_align, 0 );
    kbuf = dma_pool_alloc( mypool, GFP_KERNEL, &handle );
    output( kbuf, handle, size, "This is the dma_pool_alloc() string" );
    dma_pool_free( mypool, kbuf, handle );
    dma_pool_destroy( mypool );
    return -1;
}
コード例 #4
0
ファイル: lab1_dma.c プロジェクト: KimHyoJin/driver-samples
static int __init my_init(void)
{
	dev_set_name(&dev, "my0");
	device_register(&dev);

	/* dma_alloc_coherent method */
	/*

	printk(KERN_INFO "Loading DMA allocation test module\n");
	printk(KERN_INFO "\nTesting dma_alloc_coherent()..........\n\n");
	kbuf = dma_alloc_coherent(&dev, size, &handle, GFP_KERNEL);
	output(kbuf, handle, size, "This is the dma_alloc_coherent() string");
	dma_free_coherent(&dev, size, kbuf, handle);
	*/

	/* dma_map/unmap_single */

	printk(KERN_INFO "\nTesting dma_map_single()................\n\n");
	kbuf = kmalloc(size, GFP_KERNEL);
	handle = dma_map_single(&dev, kbuf, size, direction);
	output(kbuf, handle, size, "This is the dma_map_single() string");
	dma_unmap_single(&dev, handle, size, direction);
	kfree(kbuf);

	/* dma_pool method */

	printk(KERN_INFO "\nTesting dma_pool_alloc()..........\n\n");
	mypool = dma_pool_create("mypool", &dev, pool_size, pool_align, 0);
	kbuf = dma_pool_alloc(mypool, GFP_KERNEL, &handle);
	output(kbuf, handle, size, "This is the dma_pool_alloc() string");
	dma_pool_free(mypool, kbuf, handle);
	dma_pool_destroy(mypool);

	device_unregister(&dev);

	return 0;
}
コード例 #5
0
ファイル: xhci-mem.c プロジェクト: mikebyrne/linux-2.6
/*
 * Allocates a generic ring segment from the ring pool, sets the dma address,
 * initializes the segment to zero, and sets the private next pointer to NULL.
 *
 * Section 4.11.1.1:
 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
 */
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
{
	struct xhci_segment *seg;
	dma_addr_t	dma;

	seg = kzalloc(sizeof *seg, flags);
	if (!seg)
		return 0;
	xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);

	seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
	if (!seg->trbs) {
		kfree(seg);
		return 0;
	}
	xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
			seg->trbs, (unsigned long long)dma);

	memset(seg->trbs, 0, SEGMENT_SIZE);
	seg->dma = dma;
	seg->next = NULL;

	return seg;
}
コード例 #6
0
ファイル: tdma.c プロジェクト: 020gzh/linux
static struct mv_cesa_tdma_desc *
mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
{
	struct mv_cesa_tdma_desc *new_tdma = NULL;
	dma_addr_t dma_handle;

	new_tdma = dma_pool_alloc(cesa_dev->dma->tdma_desc_pool, flags,
				  &dma_handle);
	if (!new_tdma)
		return ERR_PTR(-ENOMEM);

	memset(new_tdma, 0, sizeof(*new_tdma));
	new_tdma->cur_dma = dma_handle;
	if (chain->last) {
		chain->last->next_dma = cpu_to_le32(dma_handle);
		chain->last->next = new_tdma;
	} else {
		chain->first = new_tdma;
	}

	chain->last = new_tdma;

	return new_tdma;
}
コード例 #7
0
ファイル: dma.c プロジェクト: AppEngine/linux-2.6
int s3c2410_dma_enqueue(unsigned int channel, void *id,
			dma_addr_t data, int size)
{
	struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
	struct s3c64xx_dma_buff *next;
	struct s3c64xx_dma_buff *buff;
	struct pl080s_lli *lli;
	int ret;

	WARN_ON(!chan);
	if (!chan)
		return -EINVAL;

	buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_KERNEL);
	if (!buff) {
		printk(KERN_ERR "%s: no memory for buffer\n", __func__);
		return -ENOMEM;
	}

	lli = dma_pool_alloc(dma_pool, GFP_KERNEL, &buff->lli_dma);
	if (!lli) {
		printk(KERN_ERR "%s: no memory for lli\n", __func__);
		ret = -ENOMEM;
		goto err_buff;
	}

	pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
		 __func__, buff, data, lli, (u32)buff->lli_dma, size);

	buff->lli = lli;
	buff->pw = id;

	s3c64xx_dma_fill_lli(chan, lli, data, size);

	if ((next = chan->next) != NULL) {
		struct s3c64xx_dma_buff *end = chan->end;
		struct pl080s_lli *endlli = end->lli;

		pr_debug("enquing onto channel\n");

		end->next = buff;
		endlli->next_lli = buff->lli_dma;

		if (chan->flags & S3C2410_DMAF_CIRCULAR) {
			struct s3c64xx_dma_buff *curr = chan->curr;
			lli->next_lli = curr->lli_dma;
		}

		if (next == chan->curr) {
			writel(buff->lli_dma, chan->regs + PL080_CH_LLI);
			chan->next = buff;
		}

		show_lli(endlli);
		chan->end = buff;
	} else {
		pr_debug("enquing onto empty channel\n");

		chan->curr = buff;
		chan->next = buff;
		chan->end = buff;

		s3c64xx_lli_to_regs(chan, lli);
	}

	show_lli(lli);

	dbg_showchan(chan);
	dbg_showbuffs(chan);
	return 0;

err_buff:
	kfree(buff);
	return ret;
}
コード例 #8
0
ファイル: htt_tx.c プロジェクト: 383530895/linux
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
	struct ath10k *ar = htt->ar;
	struct device *dev = ar->dev;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
	struct ath10k_hif_sg_item sg_items[2];
	struct htt_data_tx_desc_frag *frags;
	u8 vdev_id = skb_cb->vdev_id;
	u8 tid = skb_cb->htt.tid;
	int prefetch_len;
	int res;
	u8 flags0 = 0;
	u16 msdu_id, flags1 = 0;
	dma_addr_t paddr;
	u32 frags_paddr;
	bool use_frags;

	res = ath10k_htt_tx_inc_pending(htt);
	if (res)
		goto err;

	spin_lock_bh(&htt->tx_lock);
	res = ath10k_htt_tx_alloc_msdu_id(htt);
	if (res < 0) {
		spin_unlock_bh(&htt->tx_lock);
		goto err_tx_dec;
	}
	msdu_id = res;
	htt->pending_tx[msdu_id] = msdu;
	spin_unlock_bh(&htt->tx_lock);

	prefetch_len = min(htt->prefetch_len, msdu->len);
	prefetch_len = roundup(prefetch_len, 4);

	/* Since HTT 3.0 there is no separate mgmt tx command. However in case
	 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
	 * fragment list host driver specifies directly frame pointer. */
	use_frags = htt->target_version_major < 3 ||
		    !ieee80211_is_mgmt(hdr->frame_control);

	skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
					   &paddr);
	if (!skb_cb->htt.txbuf)
		goto err_free_msdu_id;
	skb_cb->htt.txbuf_paddr = paddr;

	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
				       DMA_TO_DEVICE);
	res = dma_mapping_error(dev, skb_cb->paddr);
	if (res)
		goto err_free_txbuf;

	if (likely(use_frags)) {
		frags = skb_cb->htt.txbuf->frags;

		frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
		frags[0].len = __cpu_to_le32(msdu->len);
		frags[1].paddr = 0;
		frags[1].len = 0;

		flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);

		frags_paddr = skb_cb->htt.txbuf_paddr;
	} else {
		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);

		frags_paddr = skb_cb->paddr;
	}

	/* Normally all commands go through HTC which manages tx credits for
	 * each endpoint and notifies when tx is completed.
	 *
	 * HTT endpoint is creditless so there's no need to care about HTC
	 * flags. In that case it is trivial to fill the HTC header here.
	 *
	 * MSDU transmission is considered completed upon HTT event. This
	 * implies no relevant resources can be freed until after the event is
	 * received. That's why HTC tx completion handler itself is ignored by
	 * setting NULL to transfer_context for all sg items.
	 *
	 * There is simply no point in pushing HTT TX_FRM through HTC tx path
	 * as it's a waste of resources. By bypassing HTC it is possible to
	 * avoid extra memory allocations, compress data structures and thus
	 * improve performance. */

	skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
	skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
			sizeof(skb_cb->htt.txbuf->cmd_hdr) +
			sizeof(skb_cb->htt.txbuf->cmd_tx) +
			prefetch_len);
	skb_cb->htt.txbuf->htc_hdr.flags = 0;

	if (!ieee80211_has_protected(hdr->frame_control))
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;

	flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;

	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;

	/* Prevent firmware from sending up tx inspection requests. There's
	 * nothing ath10k can do with frames requested for inspection so force
	 * it to simply rely a regular tx completion with discard status.
	 */
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;

	skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
	skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
	skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
	skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
	skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
	skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
	skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
	skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);

	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
	ath10k_dbg(ar, ATH10K_DBG_HTT,
		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
		   flags0, flags1, msdu->len, msdu_id, frags_paddr,
		   (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
			msdu->data, msdu->len);
	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
	trace_ath10k_tx_payload(ar, msdu->data, msdu->len);

	sg_items[0].transfer_id = 0;
	sg_items[0].transfer_context = NULL;
	sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
	sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
			    sizeof(skb_cb->htt.txbuf->frags);
	sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
			  sizeof(skb_cb->htt.txbuf->cmd_hdr) +
			  sizeof(skb_cb->htt.txbuf->cmd_tx);

	sg_items[1].transfer_id = 0;
	sg_items[1].transfer_context = NULL;
	sg_items[1].vaddr = msdu->data;
	sg_items[1].paddr = skb_cb->paddr;
	sg_items[1].len = prefetch_len;

	res = ath10k_hif_tx_sg(htt->ar,
			       htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
			       sg_items, ARRAY_SIZE(sg_items));
	if (res)
		goto err_unmap_msdu;

	return 0;

err_unmap_msdu:
	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txbuf:
	dma_pool_free(htt->tx_pool,
		      skb_cb->htt.txbuf,
		      skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
	spin_lock_bh(&htt->tx_lock);
	htt->pending_tx[msdu_id] = NULL;
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
	spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
	ath10k_htt_tx_dec_pending(htt);
err:
	return res;
}
コード例 #9
0
static int buffer_mgr_build_mlli(struct device *dev,
				 struct sg_data_array *sg_data,
				 struct mlli_params *mlli_params)
{
	struct scatterlist *cur_sg_entry;
	uint32_t *cur_mlli_entry;
	uint32_t data_len;
	uint32_t curr_nents = 0;
	uint32_t i;

	DX_LOG_DEBUG("sg_params: sg_data->num_of_sg = 0x%08X \n",
		   (uint32_t)sg_data->num_of_sg);
	/* Allocate memory from the pointed pool */
	mlli_params->mlli_virt_addr =
		dma_pool_alloc(mlli_params->curr_pool, GFP_KERNEL,
			       &mlli_params->mlli_dma_addr);
	if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
		DX_LOG_ERR("mlli table dma_pool_alloc() failed\n");
		return -ENOMEM;
	}
	cur_mlli_entry = (unsigned int *)mlli_params->mlli_virt_addr;

	/* go over all SG and link it to one MLLI table */
	for(i = 0; i < sg_data->num_of_sg;i++) {

		data_len = sg_data->sg_data_len_array[i];

		for (cur_sg_entry = sg_data->sg_array[i];
		     (cur_sg_entry != NULL) && (data_len !=0);
		     cur_sg_entry = sg_next(cur_sg_entry) , cur_mlli_entry+=2) {
			uint32_t entry_len =
				((data_len > sg_dma_len(cur_sg_entry))) ?
				sg_dma_len(cur_sg_entry) : data_len;
			data_len -= entry_len;
			cur_mlli_entry[SEP_LLI_ADDR_WORD_OFFSET] =
					sg_dma_address(cur_sg_entry);
			cur_mlli_entry[SEP_LLI_SIZE_WORD_OFFSET] =
					entry_len;
			DX_LOG_DEBUG("entry[%d] : mlli addr = 0x%08lX "
				     "mlli_len =0x%08lX\n",
				   curr_nents,
				   (unsigned long)cur_mlli_entry[SEP_LLI_ADDR_WORD_OFFSET],
				   (unsigned long)cur_mlli_entry[SEP_LLI_SIZE_WORD_OFFSET]);
			curr_nents++;
		} /* for */
		/* set last bit in the current table*/
		SEP_LLI_SET(&mlli_params->mlli_virt_addr[
			SEP_LLI_ENTRY_BYTE_SIZE*(curr_nents-1)],
			LAST, sg_data->is_last[i]);
	} /* for */
	mlli_params->mlli_len =
		curr_nents * SEP_LLI_ENTRY_BYTE_SIZE;

	SEP_LLI_SET(&mlli_params->mlli_virt_addr[
		SEP_LLI_ENTRY_BYTE_SIZE*(curr_nents-1)],
		LAST, 1);

	DX_LOG_DEBUG("MLLI params: virt addr = 0x%08lX "
		     "dma_address=0x%08lX, mlli_len =0x%08X\n",
		   (unsigned long)mlli_params->mlli_virt_addr,
		   (unsigned long)mlli_params->mlli_dma_addr,
		   (unsigned)mlli_params->mlli_len);
	return 0;
}
コード例 #10
0
ファイル: lightnvm.c プロジェクト: acton393/linux
static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
				    gfp_t mem_flags, dma_addr_t *dma_handler)
{
	return dma_pool_alloc(pool, mem_flags, dma_handler);
}
コード例 #11
0
ファイル: udc.c プロジェクト: AmesianX/netlink-mmap
/**
 * _hardware_queue: configures a request at hardware level
 * @gadget: gadget
 * @mEp:    endpoint
 *
 * This function returns an error code
 */
static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
{
	struct ci13xxx *ci = mEp->ci;
	unsigned i;
	int ret = 0;
	unsigned length = mReq->req.length;

	/* don't queue twice */
	if (mReq->req.status == -EALREADY)
		return -EALREADY;

	mReq->req.status = -EALREADY;

	if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
		mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
					   &mReq->zdma);
		if (mReq->zptr == NULL)
			return -ENOMEM;

		memset(mReq->zptr, 0, sizeof(*mReq->zptr));
		mReq->zptr->next    = TD_TERMINATE;
		mReq->zptr->token   = TD_STATUS_ACTIVE;
		if (!mReq->req.no_interrupt)
			mReq->zptr->token   |= TD_IOC;
	}
	ret = usb_gadget_map_request(&ci->gadget, &mReq->req, mEp->dir);
	if (ret)
		return ret;

	/*
	 * TD configuration
	 * TODO - handle requests which spawns into several TDs
	 */
	memset(mReq->ptr, 0, sizeof(*mReq->ptr));
	mReq->ptr->token    = length << ffs_nr(TD_TOTAL_BYTES);
	mReq->ptr->token   &= TD_TOTAL_BYTES;
	mReq->ptr->token   |= TD_STATUS_ACTIVE;
	if (mReq->zptr) {
		mReq->ptr->next    = mReq->zdma;
	} else {
		mReq->ptr->next    = TD_TERMINATE;
		if (!mReq->req.no_interrupt)
			mReq->ptr->token  |= TD_IOC;
	}
	mReq->ptr->page[0]  = mReq->req.dma;
	for (i = 1; i < 5; i++)
		mReq->ptr->page[i] =
			(mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;

	if (!list_empty(&mEp->qh.queue)) {
		struct ci13xxx_req *mReqPrev;
		int n = hw_ep_bit(mEp->num, mEp->dir);
		int tmp_stat;

		mReqPrev = list_entry(mEp->qh.queue.prev,
				struct ci13xxx_req, queue);
		if (mReqPrev->zptr)
			mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
		else
			mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
		wmb();
		if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
			goto done;
		do {
			hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
			tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
		} while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
		hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
		if (tmp_stat)
			goto done;
	}

	/*  QH configuration */
	mEp->qh.ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
	mEp->qh.ptr->td.token &= ~TD_STATUS;   /* clear status */
	mEp->qh.ptr->cap |=  QH_ZLT;

	wmb();   /* synchronize before ep prime */

	ret = hw_ep_prime(ci, mEp->num, mEp->dir,
			   mEp->type == USB_ENDPOINT_XFER_CONTROL);
done:
	return ret;
}
コード例 #12
0
ファイル: hwcs.c プロジェクト: jhbsz/102
int
do_frag_hw(ath_hwcs_node_info_t *rx_entries, ath_hwcs_node_info_t *tx_entry, int num)
{
	int i;
	volatile ath_hwcs_desc_t *desc = NULL, *firstDesc = NULL;
	volatile ath_hwcs_desc_t *tx_desc = NULL;

	dma_addr_t tx_handle;
	dma_addr_t rx_handles[512];

	tx_desc = (ath_hwcs_desc_t *)dma_pool_alloc(dmapool,
			GFP_KERNEL, &tx_handle);

	tx_desc->buf = tx_entry->buf;
	if (num != 0) {
		tx_desc->info.status = (0x0 | ATH_HWCS_TX_SOF_MASK | ATH_HWCS_TX_EOF_MASK |
				ATH_HWCS_TYPE_WITH_CP);
	} else {
#ifdef CONFIG_ATH_HWCS_INT
		tx_desc->info.status = (0x0 | ATH_HWCS_TX_SOF_MASK | ATH_HWCS_TX_EOF_MASK |
				ATH_HWCS_TYPE_CSUM_ONLY |
				ATH_HWCS_INTR_ENABLE);
		ath_reg_rmw_set(ATH_HWCS_IMASK, ATH_HWCS_TX_INTR_MASK);
#else
		tx_desc->info.status = (0x0 | ATH_HWCS_TX_SOF_MASK | ATH_HWCS_TX_EOF_MASK |
				ATH_HWCS_TYPE_CSUM_ONLY);
#endif
	}

	tx_desc->info.control.pktSize = tx_entry->len;
	tx_desc->next = (ath_hwcs_desc_t *)tx_desc;

	for(i=0; i<num; i++) {
		if(NULL == desc) {
			desc = (ath_hwcs_desc_t *)dma_pool_alloc(dmapool,
					GFP_KERNEL, &rx_handles[i]);
			firstDesc = desc;
		}
		else {
			desc->next = (ath_hwcs_desc_t *)dma_pool_alloc(dmapool, GFP_KERNEL, &rx_handles[i]);
			desc = desc->next;
		}
		desc->buf = rx_entries[i].buf;
		desc->info.status = (0x0 | ATH_HWCS_PKT_VOID);
		desc->info.control.pktSize = rx_entries[i].len;
	}
	if (num != 0) {
		desc->next = (ath_hwcs_desc_t *)firstDesc;
#ifdef CONFIG_ATH_HWCS_INT
		ath_reg_rmw_set(ATH_HWCS_IMASK, ATH_HWCS_RX_INTR_MASK);
		desc->info.status |= ATH_HWCS_INTR_ENABLE;
#endif
		ath_reg_wr(ATH_HWCS_DMARX_DESC, (unsigned int)rx_handles[0]);
		ath_reg_wr(ATH_HWCS_DMARX_CONTROL, ATH_HWCS_DMARX_ENABLE);
	}
	ath_reg_wr(ATH_HWCS_DMATX_DESC0, (unsigned int)tx_handle);
	ath_reg_wr(ATH_HWCS_DMATX_CONTROL0, ATH_HWCS_DMATX_ENABLE);
#ifdef CONFIG_ATH_HWCS_INT
	if(num == 0)
		wait_event_interruptible(wq, (check_pkt_void(tx_desc) == 1));
#else
	while ((check_pkt_void(tx_desc)) == 0);
#endif
	if (num != 0) {

#ifdef CONFIG_ATH_HWCS_INT
		wait_event_interruptible(wq, (check_pkt_void(desc) == 0));
#endif
		desc = firstDesc;
		for(i=0; i<num; i++) {
#ifndef CONFIG_ATH_HWCS_INT
			while ((check_pkt_void(desc)) != 0);
#endif
			rx_entries[i].checksum = (desc->info.control.pktSize & 0xffff);
			rx_entries[i].actual = (desc->status_only & 0x7ffff);
#if 0
			if(rx_entries[i].actual < rx_entries[i].len) {
				retval = -1;
				break;
			}
#endif
			firstDesc = desc;
			desc = desc->next;
			dma_pool_free(dmapool, (void *)firstDesc, rx_handles[i]);
		}
	}
	tx_entry->checksum = (tx_desc->info.control.pktSize & 0xffff);
	tx_entry->actual = (tx_desc->status_only & 0x7ffff);
	dma_pool_free(dmapool, (void *)tx_desc, tx_handle);
	return 0;
}
コード例 #13
0
ファイル: img-mdc-dma.c プロジェクト: 168519/linux
static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
	size_t period_len, enum dma_transfer_direction dir,
	unsigned long flags)
{
	struct mdc_chan *mchan = to_mdc_chan(chan);
	struct mdc_dma *mdma = mchan->mdma;
	struct mdc_tx_desc *mdesc;
	struct mdc_hw_list_desc *curr, *prev = NULL;
	dma_addr_t curr_phys, prev_phys;

	if (!buf_len && !period_len)
		return NULL;

	if (!is_slave_direction(dir))
		return NULL;

	if (mdc_check_slave_width(mchan, dir) < 0)
		return NULL;

	mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
	if (!mdesc)
		return NULL;
	mdesc->chan = mchan;
	mdesc->cyclic = true;
	mdesc->list_xfer_size = buf_len;
	mdesc->list_period_len = DIV_ROUND_UP(period_len,
					      mdma->max_xfer_size);

	while (buf_len > 0) {
		size_t remainder = min(period_len, buf_len);

		while (remainder > 0) {
			size_t xfer_size;

			curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
					      &curr_phys);
			if (!curr)
				goto free_desc;

			if (!prev) {
				mdesc->list_phys = curr_phys;
				mdesc->list = curr;
			} else {
				prev->node_addr = curr_phys;
				prev->next_desc = curr;
			}

			xfer_size = min_t(size_t, mdma->max_xfer_size,
					  remainder);

			if (dir == DMA_MEM_TO_DEV) {
				mdc_list_desc_config(mchan, curr, dir,
						     buf_addr,
						     mchan->config.dst_addr,
						     xfer_size);
			} else {
				mdc_list_desc_config(mchan, curr, dir,
						     mchan->config.src_addr,
						     buf_addr,
						     xfer_size);
			}

			prev = curr;
			prev_phys = curr_phys;

			mdesc->list_len++;
			buf_addr += xfer_size;
			buf_len -= xfer_size;
			remainder -= xfer_size;
		}
	}
	prev->node_addr = mdesc->list_phys;

	return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);

free_desc:
	mdc_desc_free(&mdesc->vd);

	return NULL;
}
コード例 #14
0
ファイル: dma.c プロジェクト: Ekylypse/linux-kernel-ican-tab
int imapx200_dma_enqueue(unsigned int channel, void *id,
                         dma_addr_t data, int size)
{
    struct imapx200_dma_chan *chan = imap_dma_lookup_channel(channel);
    struct imapx200_dma_buff *next;
    struct imapx200_dma_buff *buff;
    struct dw_lli *lli;
    int ret;

    WARN_ON(!chan);
    if (!chan)
        return -EINVAL;

    buff = kzalloc(sizeof(struct imapx200_dma_buff), GFP_KERNEL);
    if (!buff) {
        printk(KERN_ERR "%s: no memory for buffer\n", __func__);
        return -ENOMEM;
    }


    lli = dma_pool_alloc(dma_pool, GFP_KERNEL, &buff->lli_dma);
    if (!lli) {
        printk(KERN_ERR "%s: no memory for lli\n", __func__);
        ret = -ENOMEM;
        goto err_buff;
    }

    pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
             __func__, buff, data, lli, (u32)buff->lli_dma, size);

    buff->lli = lli;
    buff->pw = id;

    imapx200_dma_fill_lli(chan, lli, data, size);

    if ((next = chan->next) != NULL) {
        struct imapx200_dma_buff *end = chan->end;
        struct dw_lli *endlli = end->lli;

        pr_debug("enquing onto channel\n");

        end->next = buff;
        endlli->llp = buff->lli_dma;

        if (chan->flags ) {
            struct imapx200_dma_buff *curr = chan->curr;
            lli->llp = curr->lli_dma;
        }

        if (next == chan->curr) {
            chan_writel(chan,LLP,buff->lli_dma);
            chan->next = buff;
        }

        chan->end = buff;
    } else {
        pr_debug("enquing onto empty channel\n");

        chan->curr = buff;
        chan->next = buff;
        chan->end = buff;

        imapx200_lli_to_regs(chan, lli);
    }

    dbg_showchan(chan);
    return 0;

err_buff:
    kfree(buff);

    return ret;
}
コード例 #15
0
/**
*  mallocInit
*     allocate DMA buffers 
*
*  @return
*/
static int mallocInit( void )
{
   int i;
   int j;
   int k;
   MALLOC_CBLK *cblkp;
   MALLOC_DMA *dmap;

   cblkp = &mallocCblk;

   /* Initialize memory control */
   memset( cblkp, 0, sizeof(*cblkp) );

   /* Create memory pool for halAudio */
   cblkp->poolp = dma_pool_create( "halAudio HSS memory pool", NULL,
                                   MALLOC_POOL_SIZE, CACHE_LINE_SIZE, 0 );
   if( !cblkp->poolp )
   {
      /* Unable to allocate memory */
      printk( KERN_ERR "Unable to allocate APM memory pool\n" );
      return( -ENOMEM );
   }

   /* Allocate memory for DMA descriptors */
   for( i=0; i<HSS_NUM_CHANNEL; i++ )
   {
      for( j=0; j<DMA_DIR_NUM; j++ )
      {
         dmap = &cblkp->dma[i][j];
         dmap->bufp = dma_pool_alloc( cblkp->poolp, GFP_KERNEL,
                                      &dmap->handle );
         if( !dmap->bufp )
         {
            /* Unable to allocate memory */
            printk( KERN_ERR "Unable to allocate DMA descriptor memory\n" );
            mallocExit();
            return( -ENOMEM );
         }
      }
   }

   /* Allocate memory for sample buffers */
   for( i=0; i<HSS_NUM_CHANNEL; i++ )
   {
      for( j=0; j<DMA_DIR_NUM; j++ )
      {
         for( k=0; k<HSS_NUM_BUF; k++ )
         {
            dmap = &cblkp->buf[i][j][k];
            dmap->bufp = dma_pool_alloc( cblkp->poolp, GFP_KERNEL,
                                         &dmap->handle );
            if( !dmap->bufp )
            {
               /* Unable to allocate memory */
               printk( KERN_ERR "Unable to allocate sample buffer memory \n" );
               mallocExit();
               return( -ENOMEM );
            }
         }
      }
   }

   /* Allocate memory for zero buffer */
   dmap = &cblkp->bufZero;
   dmap->bufp = dma_pool_alloc( cblkp->poolp, GFP_KERNEL, &dmap->handle );
   if( !dmap->bufp )
   {
      /* Unable to allocate memory */
      printk( KERN_ERR "Unable to allocate zero buffer memory\n" );
      mallocExit();
      return( -ENOMEM );
   }
   memset( dmap->bufp, 0, MALLOC_POOL_SIZE );

   /* Allocate memory for scratch buffer */
   dmap = &cblkp->bufScratch;
   dmap->bufp = dma_pool_alloc( cblkp->poolp, GFP_KERNEL, &dmap->handle );
   if( !dmap->bufp )
   {
      /* Unable to allocate memory */
      printk( KERN_ERR "Unable to allocate scratch buffer memory\n" );
      mallocExit();
      return( -ENOMEM );
   }

   return( 0 );
}
コード例 #16
0
ファイル: htt_tx.c プロジェクト: coralreef2046/net-next-nuse
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
	struct ath10k *ar = htt->ar;
	struct device *dev = ar->dev;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
	struct ath10k_hif_sg_item sg_items[2];
	struct htt_data_tx_desc_frag *frags;
	u8 vdev_id = skb_cb->vdev_id;
	u8 tid = skb_cb->htt.tid;
	int prefetch_len;
	int res;
	u8 flags0 = 0;
	u16 msdu_id, flags1 = 0;
	dma_addr_t paddr = 0;
	u32 frags_paddr = 0;
	struct htt_msdu_ext_desc *ext_desc = NULL;
	bool limit_mgmt_desc = false;
	bool is_probe_resp = false;

	if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
	    ar->hw_params.max_probe_resp_desc_thres) {
		limit_mgmt_desc = true;

		if (ieee80211_is_probe_resp(hdr->frame_control))
			is_probe_resp = true;
	}

	res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
	if (res)
		goto err;

	spin_lock_bh(&htt->tx_lock);
	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
	spin_unlock_bh(&htt->tx_lock);
	if (res < 0) {
		goto err_tx_dec;
	}
	msdu_id = res;

	prefetch_len = min(htt->prefetch_len, msdu->len);
	prefetch_len = roundup(prefetch_len, 4);

	skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
					   &paddr);
	if (!skb_cb->htt.txbuf) {
		res = -ENOMEM;
		goto err_free_msdu_id;
	}
	skb_cb->htt.txbuf_paddr = paddr;

	if ((ieee80211_is_action(hdr->frame_control) ||
	     ieee80211_is_deauth(hdr->frame_control) ||
	     ieee80211_is_disassoc(hdr->frame_control)) &&
	     ieee80211_has_protected(hdr->frame_control)) {
		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
	} else if (!skb_cb->htt.nohwcrypt &&
		   skb_cb->txmode == ATH10K_HW_TXRX_RAW &&
		   ieee80211_has_protected(hdr->frame_control)) {
		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
	}

	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
				       DMA_TO_DEVICE);
	res = dma_mapping_error(dev, skb_cb->paddr);
	if (res) {
		res = -EIO;
		goto err_free_txbuf;
	}

	switch (skb_cb->txmode) {
	case ATH10K_HW_TXRX_RAW:
	case ATH10K_HW_TXRX_NATIVE_WIFI:
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
		/* pass through */
	case ATH10K_HW_TXRX_ETHERNET:
		if (ar->hw_params.continuous_frag_desc) {
			memset(&htt->frag_desc.vaddr[msdu_id], 0,
			       sizeof(struct htt_msdu_ext_desc));
			frags = (struct htt_data_tx_desc_frag *)
				&htt->frag_desc.vaddr[msdu_id].frags;
			ext_desc = &htt->frag_desc.vaddr[msdu_id];
			frags[0].tword_addr.paddr_lo =
				__cpu_to_le32(skb_cb->paddr);
			frags[0].tword_addr.paddr_hi = 0;
			frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);

			frags_paddr =  htt->frag_desc.paddr +
				(sizeof(struct htt_msdu_ext_desc) * msdu_id);
		} else {
			frags = skb_cb->htt.txbuf->frags;
			frags[0].dword_addr.paddr =
				__cpu_to_le32(skb_cb->paddr);
			frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
			frags[1].dword_addr.paddr = 0;
			frags[1].dword_addr.len = 0;

			frags_paddr = skb_cb->htt.txbuf_paddr;
		}
		flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
		break;
	case ATH10K_HW_TXRX_MGMT:
		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;

		frags_paddr = skb_cb->paddr;
		break;
	}

	/* Normally all commands go through HTC which manages tx credits for
	 * each endpoint and notifies when tx is completed.
	 *
	 * HTT endpoint is creditless so there's no need to care about HTC
	 * flags. In that case it is trivial to fill the HTC header here.
	 *
	 * MSDU transmission is considered completed upon HTT event. This
	 * implies no relevant resources can be freed until after the event is
	 * received. That's why HTC tx completion handler itself is ignored by
	 * setting NULL to transfer_context for all sg items.
	 *
	 * There is simply no point in pushing HTT TX_FRM through HTC tx path
	 * as it's a waste of resources. By bypassing HTC it is possible to
	 * avoid extra memory allocations, compress data structures and thus
	 * improve performance. */

	skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
	skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
			sizeof(skb_cb->htt.txbuf->cmd_hdr) +
			sizeof(skb_cb->htt.txbuf->cmd_tx) +
			prefetch_len);
	skb_cb->htt.txbuf->htc_hdr.flags = 0;

	if (skb_cb->htt.nohwcrypt)
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;

	if (!skb_cb->is_protected)
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;

	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
	if (msdu->ip_summed == CHECKSUM_PARTIAL &&
	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
		if (ar->hw_params.continuous_frag_desc)
			ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
	}

	/* Prevent firmware from sending up tx inspection requests. There's
	 * nothing ath10k can do with frames requested for inspection so force
	 * it to simply rely a regular tx completion with discard status.
	 */
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;

	skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
	skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
	skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
	skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
	skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
	skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
	skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
	skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);

	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
	ath10k_dbg(ar, ATH10K_DBG_HTT,
		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
		   flags0, flags1, msdu->len, msdu_id, frags_paddr,
		   (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
			msdu->data, msdu->len);
	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
	trace_ath10k_tx_payload(ar, msdu->data, msdu->len);

	sg_items[0].transfer_id = 0;
	sg_items[0].transfer_context = NULL;
	sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
	sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
			    sizeof(skb_cb->htt.txbuf->frags);
	sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
			  sizeof(skb_cb->htt.txbuf->cmd_hdr) +
			  sizeof(skb_cb->htt.txbuf->cmd_tx);

	sg_items[1].transfer_id = 0;
	sg_items[1].transfer_context = NULL;
	sg_items[1].vaddr = msdu->data;
	sg_items[1].paddr = skb_cb->paddr;
	sg_items[1].len = prefetch_len;

	res = ath10k_hif_tx_sg(htt->ar,
			       htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
			       sg_items, ARRAY_SIZE(sg_items));
	if (res)
		goto err_unmap_msdu;

	return 0;

err_unmap_msdu:
	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txbuf:
	dma_pool_free(htt->tx_pool,
		      skb_cb->htt.txbuf,
		      skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
	spin_lock_bh(&htt->tx_lock);
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
	spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
	ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err:
	return res;
}
コード例 #17
0
ファイル: bcm-flexrm-mailbox.c プロジェクト: Lyude/linux
static int flexrm_startup(struct mbox_chan *chan)
{
	u64 d;
	u32 val, off;
	int ret = 0;
	dma_addr_t next_addr;
	struct flexrm_ring *ring = chan->con_priv;

	/* Allocate BD memory */
	ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
				       GFP_KERNEL, &ring->bd_dma_base);
	if (!ring->bd_base) {
		dev_err(ring->mbox->dev,
			"can't allocate BD memory for ring%d\n",
			ring->num);
		ret = -ENOMEM;
		goto fail;
	}

	/* Configure next table pointer entries in BD memory */
	for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) {
		next_addr = off + RING_DESC_SIZE;
		if (next_addr == RING_BD_SIZE)
			next_addr = 0;
		next_addr += ring->bd_dma_base;
		if (RING_BD_ALIGN_CHECK(next_addr))
			d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off),
						    next_addr);
		else
			d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off));
		flexrm_write_desc(ring->bd_base + off, d);
	}

	/* Allocate completion memory */
	ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool,
					 GFP_KERNEL, &ring->cmpl_dma_base);
	if (!ring->cmpl_base) {
		dev_err(ring->mbox->dev,
			"can't allocate completion memory for ring%d\n",
			ring->num);
		ret = -ENOMEM;
		goto fail_free_bd_memory;
	}

	/* Request IRQ */
	if (ring->irq == UINT_MAX) {
		dev_err(ring->mbox->dev,
			"ring%d IRQ not available\n", ring->num);
		ret = -ENODEV;
		goto fail_free_cmpl_memory;
	}
	ret = request_threaded_irq(ring->irq,
				   flexrm_irq_event,
				   flexrm_irq_thread,
				   0, dev_name(ring->mbox->dev), ring);
	if (ret) {
		dev_err(ring->mbox->dev,
			"failed to request ring%d IRQ\n", ring->num);
		goto fail_free_cmpl_memory;
	}
	ring->irq_requested = true;

	/* Set IRQ affinity hint */
	ring->irq_aff_hint = CPU_MASK_NONE;
	val = ring->mbox->num_rings;
	val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
	cpumask_set_cpu((ring->num / val) % num_online_cpus(),
			&ring->irq_aff_hint);
	ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
	if (ret) {
		dev_err(ring->mbox->dev,
			"failed to set IRQ affinity hint for ring%d\n",
			ring->num);
		goto fail_free_irq;
	}

	/* Disable/inactivate ring */
	writel_relaxed(0x0, ring->regs + RING_CONTROL);

	/* Program BD start address */
	val = BD_START_ADDR_VALUE(ring->bd_dma_base);
	writel_relaxed(val, ring->regs + RING_BD_START_ADDR);

	/* BD write pointer will be same as HW write pointer */
	ring->bd_write_offset =
			readl_relaxed(ring->regs + RING_BD_WRITE_PTR);
	ring->bd_write_offset *= RING_DESC_SIZE;

	/* Program completion start address */
	val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
	writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);

	/* Completion read pointer will be same as HW write pointer */
	ring->cmpl_read_offset =
			readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
	ring->cmpl_read_offset *= RING_DESC_SIZE;

	/* Read ring Tx, Rx, and Outstanding counts to clear */
	readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS);
	readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS);
	readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS);
	readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS);
	readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND);

	/* Configure RING_MSI_CONTROL */
	val = 0;
	val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT);
	val |= BIT(MSI_ENABLE_SHIFT);
	val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
	writel_relaxed(val, ring->regs + RING_MSI_CONTROL);

	/* Enable/activate ring */
	val = BIT(CONTROL_ACTIVE_SHIFT);
	writel_relaxed(val, ring->regs + RING_CONTROL);

	/* Reset stats to zero */
	atomic_set(&ring->msg_send_count, 0);
	atomic_set(&ring->msg_cmpl_count, 0);

	return 0;

fail_free_irq:
	free_irq(ring->irq, ring);
	ring->irq_requested = false;
fail_free_cmpl_memory:
	dma_pool_free(ring->mbox->cmpl_pool,
		      ring->cmpl_base, ring->cmpl_dma_base);
	ring->cmpl_base = NULL;
fail_free_bd_memory:
	dma_pool_free(ring->mbox->bd_pool,
		      ring->bd_base, ring->bd_dma_base);
	ring->bd_base = NULL;
fail:
	return ret;
}
コード例 #18
0
ファイル: ixgbe_fcoe.c プロジェクト: insop/linux
/**
 * ixgbe_fcoe_ddp_setup - called to set up ddp context
 * @netdev: the corresponding net_device
 * @xid: the exchange id requesting ddp
 * @sgl: the scatter-gather list for this request
 * @sgc: the number of scatter-gather items
 *
 * Returns : 1 for success and 0 for no ddp
 */
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
				struct scatterlist *sgl, unsigned int sgc,
				int target_mode)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct ixgbe_fcoe *fcoe;
	struct ixgbe_fcoe_ddp *ddp;
	struct ixgbe_fcoe_ddp_pool *ddp_pool;
	struct scatterlist *sg;
	unsigned int i, j, dmacount;
	unsigned int len;
	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
	unsigned int firstoff = 0;
	unsigned int lastsize;
	unsigned int thisoff = 0;
	unsigned int thislen = 0;
	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
	dma_addr_t addr = 0;

	if (!netdev || !sgl)
		return 0;

	adapter = netdev_priv(netdev);
	if (xid >= IXGBE_FCOE_DDP_MAX) {
		e_warn(drv, "xid=0x%x out-of-range\n", xid);
		return 0;
	}

	/* no DDP if we are already down or resetting */
	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
	    test_bit(__IXGBE_RESETTING, &adapter->state))
		return 0;

	fcoe = &adapter->fcoe;
	ddp = &fcoe->ddp[xid];
	if (ddp->sgl) {
		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
		      xid, ddp->sgl, ddp->sgc);
		return 0;
	}
	ixgbe_fcoe_clear_ddp(ddp);


	if (!fcoe->ddp_pool) {
		e_warn(drv, "No ddp_pool resources allocated\n");
		return 0;
	}

	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
	if (!ddp_pool->pool) {
		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
		goto out_noddp;
	}

	/* setup dma from scsi command sgl */
	dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
	if (dmacount == 0) {
		e_err(drv, "xid 0x%x DMA map error\n", xid);
		goto out_noddp;
	}

	/* alloc the udl from per cpu ddp pool */
	ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
	if (!ddp->udl) {
		e_err(drv, "failed allocated ddp context\n");
		goto out_noddp_unmap;
	}
	ddp->pool = ddp_pool->pool;
	ddp->sgl = sgl;
	ddp->sgc = sgc;

	j = 0;
	for_each_sg(sgl, sg, dmacount, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);
		while (len) {
			/* max number of buffers allowed in one DDP context */
			if (j >= IXGBE_BUFFCNT_MAX) {
				ddp_pool->noddp++;
				goto out_noddp_free;
			}

			/* get the offset of length of current buffer */
			thisoff = addr & ((dma_addr_t)bufflen - 1);
			thislen = min((bufflen - thisoff), len);
			/*
			 * all but the 1st buffer (j == 0)
			 * must be aligned on bufflen
			 */
			if ((j != 0) && (thisoff))
				goto out_noddp_free;
			/*
			 * all but the last buffer
			 * ((i == (dmacount - 1)) && (thislen == len))
			 * must end at bufflen
			 */
			if (((i != (dmacount - 1)) || (thislen != len))
			    && ((thislen + thisoff) != bufflen))
				goto out_noddp_free;

			ddp->udl[j] = (u64)(addr - thisoff);
			/* only the first buffer may have none-zero offset */
			if (j == 0)
				firstoff = thisoff;
			len -= thislen;
			addr += thislen;
			j++;
		}
	}
コード例 #19
0
ファイル: lsdma.c プロジェクト: MattHung/sage-graphics
/**
 * lsdma_alloc_descriptors - allocate DMA descriptors
 * @dma: DMA information structure
 *
 * Returns a negative error code on failure and 0 on success.
 **/
static int
lsdma_alloc_descriptors (struct master_dma *dma)
{
	const unsigned int total_pointers =
		dma->pointers_per_buf * dma->buffers;
	dma_addr_t dma_addr, first_dma_addr;
	struct lsdma_desc *desc;
	unsigned int i;

	/* Allocate an array of pointers to descriptors */
	if ((dma->desc = (void **)kmalloc (
		total_pointers * sizeof (*dma->desc),
		GFP_KERNEL)) == NULL) {
		goto NO_DESC_PTR;
	}

	/* Allocate the DMA descriptors */
	if ((dma->desc_pool = dma_pool_create ("lsdma",
		dma->dev,
		sizeof (struct lsdma_desc),
		32,
		0)) == NULL) {
		goto NO_PCI_POOL;
	}
	if ((desc = dma->desc[0] = dma_pool_alloc (dma->desc_pool,
		GFP_KERNEL, &first_dma_addr)) == NULL) {
		goto NO_DESC;
	}

	for (i = 1; i < total_pointers; i++) {
		if ((dma->desc[i] = dma_pool_alloc (dma->desc_pool,
			GFP_KERNEL,
			&dma_addr)) == NULL) {
			unsigned int j;

			for (j = i - 1; j > 0; j--) {
				desc = dma->desc[j - 1];
				dma_addr = mdma_desc_to_dma (desc->next_desc,
					desc->next_desc_h);
				dma_pool_free (dma->desc_pool,
					dma->desc[j],
					dma_addr);
			}
			dma_pool_free (dma->desc_pool,
				dma->desc[0],
				first_dma_addr);
			goto NO_DESC;
		}

		desc->next_desc = mdma_dma_to_desc_low (dma_addr);
		desc->next_desc_h = mdma_dma_to_desc_high (dma_addr);

		desc = dma->desc[i];
	}

	desc->next_desc = mdma_dma_to_desc_low (first_dma_addr);
	desc->next_desc_h = mdma_dma_to_desc_high (first_dma_addr);

	return 0;

NO_DESC:
	dma_pool_destroy (dma->desc_pool);
NO_PCI_POOL:
	kfree (dma->desc);
NO_DESC_PTR:
	return -ENOMEM;
}
コード例 #20
0
ファイル: if_athp_htt_tx.c プロジェクト: erikarn/otus
int ath10k_htt_tx(struct ath10k_htt *htt, struct athp_buf *msdu)
{
#if 0
	struct ath10k *ar = htt->ar;
	//struct device *dev = ar->sc_dev;
	//struct ieee80211_frame *hdr = (struct ieee80211_frame *)mbuf_skb_data(msdu->m);
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
	struct ath10k_hif_sg_item sg_items[2];
	struct htt_data_tx_desc_frag *frags;
	u8 vdev_id = skb_cb->vdev_id;
	u8 tid = skb_cb->htt.tid;
	int prefetch_len;
	int res;
	u8 flags0 = 0;
	u16 msdu_id, flags1 = 0;
	dma_addr_t paddr = 0;
	u32 frags_paddr = 0;
	struct htt_msdu_ext_desc *ext_desc = NULL;

	res = ath10k_htt_tx_inc_pending(htt);
	if (res)
		goto err;

	ATHP_HTT_TX_LOCK(htt);
	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
	ATHP_HTT_TX_UNLOCK(htt);
	if (res < 0) {
		goto err_tx_dec;
	}
	msdu_id = res;

	prefetch_len = min(htt->prefetch_len, mbuf_skb_len(msdu->m));
	prefetch_len = roundup(prefetch_len, 4);

	skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
					   &paddr);
	if (!skb_cb->htt.txbuf) {
		res = -ENOMEM;
		goto err_free_msdu_id;
	}
	skb_cb->htt.txbuf_paddr = paddr;

	if ((IEEE80211_IS_ACTION(hdr) ||
	     IEEE80211_IS_DEAUTH(hdr) ||
	     IEEE80211_IS_DISASSOC(hdr)) &&
	     IEEE80211_HAS_PROT(hdr)) {
		mbuf_skb_put(msdu->m, IEEE80211_CCMP_MIC_LEN);
	} else if (!skb_cb->htt.nohwcrypt &&
		   skb_cb->txmode == ATH10K_HW_TXRX_RAW) {
		mbuf_skb_put(msdu->m, IEEE80211_CCMP_MIC_LEN);
	}

	/* Do the initial load/sync */

	/* XXX TODO: ADRIAN: figure out what I'm missing! */
	res = athp_dma_mbuf_load(ar, &ar->buf_tx.dh, &msdu->mb, msdu->m);
	if (res) {
		res = -EIO;
		goto err_free_txbuf;
	}
	/* Ok, we're not modifying the msdu further, so sync here */
	athp_dma_mbuf_pre_xmit(ar, &ar->buf_tx.dh, &msdu->mb);

	switch (skb_cb->txmode) {
	case ATH10K_HW_TXRX_RAW:
	case ATH10K_HW_TXRX_NATIVE_WIFI:
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
		/* pass through */
	case ATH10K_HW_TXRX_ETHERNET:
		if (ar->hw_params.continuous_frag_desc) {
			memset(&htt->frag_desc.vaddr[msdu_id], 0,
			       sizeof(struct htt_msdu_ext_desc));
			frags = (struct htt_data_tx_desc_frag *)
				&htt->frag_desc.vaddr[msdu_id].frags;
			ext_desc = &htt->frag_desc.vaddr[msdu_id];
			frags[0].tword_addr.paddr_lo =
				__cpu_to_le32(msdu->mb.paddr);
			frags[0].tword_addr.paddr_hi = 0;
			frags[0].tword_addr.len_16 = __cpu_to_le16(mbuf_skb_len(msdu->m));

			frags_paddr =  htt->frag_desc.paddr +
				(sizeof(struct htt_msdu_ext_desc) * msdu_id);
		} else {
			frags = skb_cb->htt.txbuf->frags;
			frags[0].dword_addr.paddr =
				__cpu_to_le32(msdu->mb.paddr);
			frags[0].dword_addr.len = __cpu_to_le32(mbuf_skb_len(msdu->m));
			frags[1].dword_addr.paddr = 0;
			frags[1].dword_addr.len = 0;

			frags_paddr = skb_cb->htt.txbuf_paddr;
		}
		flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
		break;
	case ATH10K_HW_TXRX_MGMT:
		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;

		frags_paddr = msdu->mb.paddr;
		break;
	}

	/* Normally all commands go through HTC which manages tx credits for
	 * each endpoint and notifies when tx is completed.
	 *
	 * HTT endpoint is creditless so there's no need to care about HTC
	 * flags. In that case it is trivial to fill the HTC header here.
	 *
	 * MSDU transmission is considered completed upon HTT event. This
	 * implies no relevant resources can be freed until after the event is
	 * received. That's why HTC tx completion handler itself is ignored by
	 * setting NULL to transfer_context for all sg items.
	 *
	 * There is simply no point in pushing HTT TX_FRM through HTC tx path
	 * as it's a waste of resources. By bypassing HTC it is possible to
	 * avoid extra memory allocations, compress data structures and thus
	 * improve performance. */

	skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
	skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
			sizeof(skb_cb->htt.txbuf->cmd_hdr) +
			sizeof(skb_cb->htt.txbuf->cmd_tx) +
			prefetch_len);
	skb_cb->htt.txbuf->htc_hdr.flags = 0;

	if (skb_cb->htt.nohwcrypt)
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;

	if (!skb_cb->is_protected)
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;

	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);

	/* XXX TODO: ADRIAN: L3/L4 offload */
#if 0
	if (msdu->ip_summed == CHECKSUM_PARTIAL &&
	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
		if (ar->hw_params.continuous_frag_desc)
			ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
	}
#endif

	/* Prevent firmware from sending up tx inspection requests. There's
	 * nothing ath10k can do with frames requested for inspection so force
	 * it to simply rely a regular tx completion with discard status.
	 */
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;

	skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
	skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
	skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
	skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(mbuf_skb_len(msdu->m));
	skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
	skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
	skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
	skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);

#ifdef	ATHP_TRACE_DIAG
	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
#endif
	ath10k_dbg(ar, ATH10K_DBG_HTT,
		   "htt tx flags0 %u flags1 %u len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
		   (unsigned) flags0, (unsigned) flags1, mbuf_skb_len(msdu->m), msdu_id, frags_paddr,
		   (u32)msdu->mb.paddr, vdev_id, tid, skb_cb->htt.freq);
	athp_debug_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
			mbuf_skb_data(msdu->m), mbuf_skb_len(msdu->m));
#ifdef	ATHP_TRACE_DIAG
	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
	trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
#endif
	sg_items[0].transfer_id = 0;
	sg_items[0].transfer_context = NULL;
	sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
	sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
			    sizeof(skb_cb->htt.txbuf->frags);
	sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
			  sizeof(skb_cb->htt.txbuf->cmd_hdr) +
			  sizeof(skb_cb->htt.txbuf->cmd_tx);

	sg_items[1].transfer_id = 0;
	sg_items[1].transfer_context = NULL;
	sg_items[1].vaddr = mbuf_skb_data(msdu->m);
	sg_items[1].paddr = msdu->mb.paddr;
	sg_items[1].len = prefetch_len;

	res = ath10k_hif_tx_sg(htt->ar,
			       htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
			       sg_items, ARRAY_SIZE(sg_items));
	if (res)
		goto err_unmap_msdu;

	return 0;

err_unmap_msdu:
	athp_dma_mbuf_unload(ar, &ar->buf_tx.dh, &msdu->mb);
err_free_txbuf:
	dma_pool_free(htt->tx_pool,
		      skb_cb->htt.txbuf,
		      skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
	ATHP_HTT_TX_LOCK(htt);
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
	ATHP_HTT_TX_UNLOCK(htt);
err_tx_dec:
	ath10k_htt_tx_dec_pending(htt);
err:
	return res;
#else
	device_printf(htt->ar->sc_dev, "%s; TODO implement!\n", __func__);
	return (-EINVAL);
#endif
}
コード例 #21
0
ファイル: dmabounce.c プロジェクト: AdiPat/i9003_Kernel
/* allocate a 'safe' buffer and keep track of it */
static inline struct safe_buffer *
alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
		  struct page *page, unsigned long offset, size_t size,
		  enum dma_data_direction dir)
{
	struct safe_buffer *buf;
	struct dmabounce_pool *pool;
	struct device *dev = device_info->dev;
	unsigned long flags;

	dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
		__func__, ptr, size, dir);

	if (size <= device_info->small.size) {
		pool = &device_info->small;
	} else if (size <= device_info->large.size) {
		pool = &device_info->large;
	} else {
		pool = NULL;
	}

	buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
	if (buf == NULL) {
		dev_warn(dev, "%s: kmalloc failed\n", __func__);
		return NULL;
	}

	buf->ptr = ptr;
	buf->page = page;
	buf->offset = offset;
	buf->size = size;
	buf->direction = dir;
	buf->pool = pool;

	if (pool) {
		buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
					   &buf->safe_dma_addr);
	} else {
		buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
					       GFP_ATOMIC);
	}

	if (buf->safe == NULL) {
		dev_warn(dev,
			 "%s: could not alloc dma memory (size=%d)\n",
			 __func__, size);
		kfree(buf);
		return NULL;
	}

#ifdef STATS
	if (pool)
		pool->allocs++;
	device_info->total_allocs++;
#endif

	write_lock_irqsave(&device_info->lock, flags);
	list_add(&buf->node, &device_info->safe_buffers);
	write_unlock_irqrestore(&device_info->lock, flags);

	return buf;
}
コード例 #22
0
ファイル: img-mdc-dma.c プロジェクト: 168519/linux
static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
	struct dma_chan *chan, struct scatterlist *sgl,
	unsigned int sg_len, enum dma_transfer_direction dir,
	unsigned long flags, void *context)
{
	struct mdc_chan *mchan = to_mdc_chan(chan);
	struct mdc_dma *mdma = mchan->mdma;
	struct mdc_tx_desc *mdesc;
	struct scatterlist *sg;
	struct mdc_hw_list_desc *curr, *prev = NULL;
	dma_addr_t curr_phys, prev_phys;
	unsigned int i;

	if (!sgl)
		return NULL;

	if (!is_slave_direction(dir))
		return NULL;

	if (mdc_check_slave_width(mchan, dir) < 0)
		return NULL;

	mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
	if (!mdesc)
		return NULL;
	mdesc->chan = mchan;

	for_each_sg(sgl, sg, sg_len, i) {
		dma_addr_t buf = sg_dma_address(sg);
		size_t buf_len = sg_dma_len(sg);

		while (buf_len > 0) {
			size_t xfer_size;

			curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
					      &curr_phys);
			if (!curr)
				goto free_desc;

			if (!prev) {
				mdesc->list_phys = curr_phys;
				mdesc->list = curr;
			} else {
				prev->node_addr = curr_phys;
				prev->next_desc = curr;
			}

			xfer_size = min_t(size_t, mdma->max_xfer_size,
					  buf_len);

			if (dir == DMA_MEM_TO_DEV) {
				mdc_list_desc_config(mchan, curr, dir, buf,
						     mchan->config.dst_addr,
						     xfer_size);
			} else {
				mdc_list_desc_config(mchan, curr, dir,
						     mchan->config.src_addr,
						     buf, xfer_size);
			}

			prev = curr;
			prev_phys = curr_phys;

			mdesc->list_len++;
			mdesc->list_xfer_size += xfer_size;
			buf += xfer_size;
			buf_len -= xfer_size;
		}
	}
コード例 #23
0
ファイル: lightnvm.c プロジェクト: ReneNyffenegger/linux
static int nvme_nvm_submit_user_cmd(struct request_queue *q,
				struct nvme_ns *ns,
				struct nvme_nvm_command *vcmd,
				void __user *ubuf, unsigned int bufflen,
				void __user *meta_buf, unsigned int meta_len,
				void __user *ppa_buf, unsigned int ppa_len,
				u32 *result, u64 *status, unsigned int timeout)
{
	bool write = nvme_is_write((struct nvme_command *)vcmd);
	struct nvm_dev *dev = ns->ndev;
	struct gendisk *disk = ns->disk;
	struct request *rq;
	struct bio *bio = NULL;
	__le64 *ppa_list = NULL;
	dma_addr_t ppa_dma;
	__le64 *metadata = NULL;
	dma_addr_t metadata_dma;
	DECLARE_COMPLETION_ONSTACK(wait);
	int ret = 0;

	rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
			NVME_QID_ANY);
	if (IS_ERR(rq)) {
		ret = -ENOMEM;
		goto err_cmd;
	}

	rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;

	if (ppa_buf && ppa_len) {
		ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
		if (!ppa_list) {
			ret = -ENOMEM;
			goto err_rq;
		}
		if (copy_from_user(ppa_list, (void __user *)ppa_buf,
						sizeof(u64) * (ppa_len + 1))) {
			ret = -EFAULT;
			goto err_ppa;
		}
		vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
	} else {
		vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
	}

	if (ubuf && bufflen) {
		ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
		if (ret)
			goto err_ppa;
		bio = rq->bio;

		if (meta_buf && meta_len) {
			metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
								&metadata_dma);
			if (!metadata) {
				ret = -ENOMEM;
				goto err_map;
			}

			if (write) {
				if (copy_from_user(metadata,
						(void __user *)meta_buf,
						meta_len)) {
					ret = -EFAULT;
					goto err_meta;
				}
			}
			vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
		}

		bio->bi_disk = disk;
	}

	blk_execute_rq(q, NULL, rq, 0);

	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;
	else if (nvme_req(rq)->status & 0x7ff)
		ret = -EIO;
	if (result)
		*result = nvme_req(rq)->status & 0x7ff;
	if (status)
		*status = le64_to_cpu(nvme_req(rq)->result.u64);

	if (metadata && !ret && !write) {
		if (copy_to_user(meta_buf, (void *)metadata, meta_len))
			ret = -EFAULT;
	}
err_meta:
	if (meta_buf && meta_len)
		dma_pool_free(dev->dma_pool, metadata, metadata_dma);
err_map:
	if (bio)
		blk_rq_unmap_user(bio);
err_ppa:
	if (ppa_buf && ppa_len)
		dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
err_rq:
	blk_mq_free_request(rq);
err_cmd:
	return ret;
}