Example #1
0
static int rmnet_mhi_init_inbound(struct rmnet_mhi_private *rmnet_mhi_ptr)
{
	u32 i;
	enum MHI_STATUS res;
	rmnet_log(MSG_INFO, "Entered\n");
	rmnet_mhi_ptr->tx_buffers_max =
		mhi_get_max_desc(
			rmnet_mhi_ptr->tx_client_handle);
	rmnet_mhi_ptr->rx_buffers_max =
		mhi_get_max_desc(
			rmnet_mhi_ptr->rx_client_handle);

	for (i = 0; i < rmnet_mhi_ptr->rx_buffers_max; i++) {
		struct sk_buff *skb = 0;
		dma_addr_t dma_addr;
		dma_addr_t *cb_ptr = 0;

		skb = alloc_skb(rmnet_mhi_ptr->mru,
				rmnet_mhi_ptr->allocation_flags);

		if (!skb) {
			rmnet_log(MSG_CRITICAL,
					"SKB allocation failure during open");
			return -ENOMEM;
		}

		skb_reserve(skb, MHI_RX_HEADROOM);
		cb_ptr = (dma_addr_t *)skb->cb;
		dma_addr = dma_map_single(&(rmnet_mhi_ptr->dev->dev), skb->data,
					 (rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
					 DMA_FROM_DEVICE);
		*cb_ptr = dma_addr;
		if (dma_mapping_error(&(rmnet_mhi_ptr->dev->dev), dma_addr)) {
			rmnet_log(MSG_CRITICAL,
				  "DMA mapping for RX buffers has failed");
			kfree_skb(skb);
			return -EIO;
		}
		skb_queue_tail(&(rmnet_mhi_ptr->rx_buffers), skb);
	}

	/* Submit the RX buffers */
	for (i = 0; i < rmnet_mhi_ptr->rx_buffers_max; i++) {
		struct sk_buff *skb = skb_dequeue(&(rmnet_mhi_ptr->rx_buffers));
		res = mhi_queue_xfer(rmnet_mhi_ptr->rx_client_handle,
					*((dma_addr_t *)(skb->cb)),
					rmnet_mhi_ptr->mru - MHI_RX_HEADROOM,
					MHI_EOT);
		if (MHI_STATUS_SUCCESS != res) {
			rmnet_log(MSG_CRITICAL,
					"mhi_queue_xfer failed, error %d", res);
			return -EIO;
		}
		skb_queue_tail(&(rmnet_mhi_ptr->rx_buffers), skb);
	}
	rmnet_log(MSG_INFO, "Exited\n");
	return 0;
}
static int rmnet_mhi_init_inbound(struct rmnet_mhi_private *rmnet_mhi_ptr)
{
	u32 i;
	enum MHI_STATUS res;
	struct mhi_skb_priv *rx_priv;
	u32 cur_mru = rmnet_mhi_ptr->mru;
	struct sk_buff *skb;

	rmnet_log(MSG_INFO, "Entered\n");
	rmnet_mhi_ptr->tx_buffers_max = mhi_get_max_desc(
					rmnet_mhi_ptr->tx_client_handle);
	rmnet_mhi_ptr->rx_buffers_max = mhi_get_max_desc(
					rmnet_mhi_ptr->rx_client_handle);

	for (i = 0; i < rmnet_mhi_ptr->rx_buffers_max; i++) {

		skb = alloc_skb(cur_mru, rmnet_mhi_ptr->allocation_flags);

		if (!skb) {
			rmnet_log(MSG_CRITICAL,
					"SKB allocation failure during open");
			return -ENOMEM;
		}
		rx_priv = (struct mhi_skb_priv *)(skb->cb);

		skb_reserve(skb, MHI_RX_HEADROOM);
		rx_priv->dma_size = cur_mru - MHI_RX_HEADROOM;
		rx_priv->dma_addr = 0;
		skb_queue_tail(&rmnet_mhi_ptr->rx_buffers, skb);
	}

	/* Submit the RX buffers */
	for (i = 0; i < rmnet_mhi_ptr->rx_buffers_max; i++) {
		skb = skb_dequeue(&rmnet_mhi_ptr->rx_buffers);
		rx_priv = (struct mhi_skb_priv *)(skb->cb);
		res = mhi_queue_xfer(rmnet_mhi_ptr->rx_client_handle,
						    skb->data,
						    rx_priv->dma_size,
						    MHI_EOT);
		if (MHI_STATUS_SUCCESS != res) {
			rmnet_log(MSG_CRITICAL,
					"mhi_queue_xfer failed, error %d", res);
			return -EIO;
		}
		skb_queue_tail(&rmnet_mhi_ptr->rx_buffers, skb);
	}
	rmnet_log(MSG_INFO, "Exited\n");
	return 0;
}