static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
{
	int ret = 0;
	struct rmnet_mhi_private **rmnet_mhi_ctxt = NULL;
	enum MHI_STATUS r = MHI_STATUS_SUCCESS;

	memset(tx_interrupts_count, 0, sizeof(tx_interrupts_count));
	memset(rx_interrupts_count, 0, sizeof(rx_interrupts_count));
	memset(rx_interrupts_in_masked_irq, 0,
	       sizeof(rx_interrupts_in_masked_irq));
	memset(rx_napi_skb_burst_min, 0, sizeof(rx_napi_skb_burst_min));
	memset(rx_napi_skb_burst_max, 0, sizeof(rx_napi_skb_burst_max));
	memset(tx_cb_skb_free_burst_min, 0, sizeof(tx_cb_skb_free_burst_min));
	memset(tx_cb_skb_free_burst_max, 0, sizeof(tx_cb_skb_free_burst_max));
	memset(tx_ring_full_count, 0, sizeof(tx_ring_full_count));
	memset(tx_queued_packets_count, 0, sizeof(tx_queued_packets_count));
	memset(rx_napi_budget_overflow, 0, sizeof(rx_napi_budget_overflow));

	rmnet_log(MSG_INFO, "Entered.\n");

	if (rmnet_mhi_ptr == NULL) {
		rmnet_log(MSG_CRITICAL, "Bad input args.\n");
		return -EINVAL;
	}

	rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index] = UINT_MAX;
	tx_cb_skb_free_burst_min[rmnet_mhi_ptr->dev_index] = UINT_MAX;

	skb_queue_head_init(&(rmnet_mhi_ptr->tx_buffers));
	skb_queue_head_init(&(rmnet_mhi_ptr->rx_buffers));

	if (rmnet_mhi_ptr->tx_client_handle != NULL) {
		rmnet_log(MSG_INFO,
			"Opening TX channel\n");
		r = mhi_open_channel(rmnet_mhi_ptr->tx_client_handle);
		if (r != MHI_STATUS_SUCCESS) {
			rmnet_log(MSG_CRITICAL,
				"Failed to start TX chan ret %d\n", r);
			goto mhi_tx_chan_start_fail;
		} else {
			rmnet_mhi_ptr->tx_enabled = 1;
		}
	}
	if (rmnet_mhi_ptr->rx_client_handle != NULL) {
		rmnet_log(MSG_INFO,
			"Opening RX channel\n");
		r = mhi_open_channel(rmnet_mhi_ptr->rx_client_handle);
		if (r != MHI_STATUS_SUCCESS) {
			rmnet_log(MSG_CRITICAL,
				"Failed to start RX chan ret %d\n", r);
			goto mhi_rx_chan_start_fail;
		} else {
			rmnet_mhi_ptr->rx_enabled = 1;
		}
	}
	rmnet_mhi_ptr->dev =
		alloc_netdev(sizeof(struct rmnet_mhi_private *),
			     RMNET_MHI_DEV_NAME,
			     NET_NAME_PREDICTABLE, rmnet_mhi_setup);
	if (!rmnet_mhi_ptr->dev) {
		rmnet_log(MSG_CRITICAL, "Network device allocation failed\n");
		ret = -ENOMEM;
		goto net_dev_alloc_fail;
	}

	rmnet_mhi_ctxt = netdev_priv(rmnet_mhi_ptr->dev);
	*rmnet_mhi_ctxt = rmnet_mhi_ptr;

	ret = dma_set_mask(&(rmnet_mhi_ptr->dev->dev),
						MHI_DMA_MASK);
	if (ret)
		rmnet_mhi_ptr->allocation_flags = GFP_KERNEL;
	else
		rmnet_mhi_ptr->allocation_flags = GFP_DMA;

	r = rmnet_mhi_init_inbound(rmnet_mhi_ptr);
	if (r) {
		rmnet_log(MSG_CRITICAL,
			"Failed to init inbound ret %d\n", r);
	}

	netif_napi_add(rmnet_mhi_ptr->dev, &(rmnet_mhi_ptr->napi),
		       rmnet_mhi_poll, MHI_NAPI_WEIGHT_VALUE);

	rmnet_mhi_ptr->mhi_enabled = 1;
	ret = register_netdev(rmnet_mhi_ptr->dev);
	if (ret) {
		rmnet_log(MSG_CRITICAL,
			  "Network device registration failed\n");
		goto net_dev_reg_fail;
	}
	napi_enable(&(rmnet_mhi_ptr->napi));

	rmnet_log(MSG_INFO, "Exited.\n");

	return 0;

net_dev_reg_fail:
	netif_napi_del(&(rmnet_mhi_ptr->napi));
	free_netdev(rmnet_mhi_ptr->dev);
net_dev_alloc_fail:
	mhi_close_channel(rmnet_mhi_ptr->rx_client_handle);
	rmnet_mhi_ptr->dev = NULL;
mhi_rx_chan_start_fail:
	mhi_close_channel(rmnet_mhi_ptr->tx_client_handle);
mhi_tx_chan_start_fail:
	rmnet_log(MSG_INFO, "Exited ret %d.\n", ret);
	return ret;
}
Esempio n. 2
0
static int rmnet_mhi_open(struct net_device *dev)
{
	MHI_STATUS res = MHI_STATUS_reserved;
	struct rmnet_mhi_private *rmnet_mhi_ptr = netdev_priv(dev);
	int index = 0;

	if (mhi_rmnet_initialized) {
		napi_enable(&(rmnet_mhi_ptr->napi));
		netif_start_queue(dev);
		return 0;
	}
	pr_info("%s(): First time channel open", __func__);
	mhi_rmnet_initialized = 1;

	res = mhi_open_channel(
		&(rmnet_mhi_ptr->tx_client_handle),
		rmnet_mhi_ptr->tx_channel, 0,
		&tx_cbs, (void *)dev);

	if (MHI_STATUS_SUCCESS != res) {
		rmnet_mhi_ptr->tx_client_handle = 0;
		pr_err("%s: mhi_open_channel failed for TX, error is %d",
		       __func__, res);
		goto cleanup;
	}

	res = mhi_open_channel(
		&(rmnet_mhi_ptr->rx_client_handle),
		rmnet_mhi_ptr->rx_channel, 0,
		&rx_cbs, (void *)dev);

	if (MHI_STATUS_SUCCESS != res) {
		rmnet_mhi_ptr->rx_client_handle = 0;
		pr_err("%s: mhi_open_channel failed for RX, error is %d",
		       __func__, res);
		goto cleanup;

	}

	rmnet_mhi_ptr->tx_buffers_max =
		mhi_get_max_buffers(
			rmnet_mhi_ptr->tx_client_handle);
	rmnet_mhi_ptr->rx_buffers_max =
		mhi_get_max_buffers(
			rmnet_mhi_ptr->rx_client_handle);

	skb_queue_head_init(&(rmnet_mhi_ptr->tx_buffers));

	/* Create RX buffers for MHI core */

	skb_queue_head_init(&(rmnet_mhi_ptr->rx_buffers));

	for (index = 0; index < rmnet_mhi_ptr->rx_buffers_max; index++) {
		struct sk_buff *skb = 0;
		dma_addr_t dma_addr;
		uintptr_t *cb_ptr = 0;

		skb = alloc_skb(rmnet_mhi_ptr->mru,
				rmnet_mhi_ptr->allocation_flags);

		if (0 == skb) {
			pr_err("%s: SKB allocation failure during open",
			       __func__);
			goto cleanup;
		}

		skb_reserve(skb, MHI_RX_HEADROOM);

		cb_ptr = (uintptr_t *)skb->cb;

		dma_addr = dma_map_single(&(dev->dev), skb->data,
					  (rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
					  DMA_FROM_DEVICE);
		*cb_ptr = (uintptr_t)dma_addr;
		if (dma_mapping_error(&(dev->dev), dma_addr)) {
			pr_err("%s: DMA mapping for RX buffers has failed",
			       __func__);
			kfree_skb(skb);
			goto cleanup;
		}

		skb_queue_tail(&(rmnet_mhi_ptr->rx_buffers), skb);

	}

	/* Create bounce buffers for TX */

	skb_queue_head_init(&(rmnet_mhi_ptr->tx_bounce_buffers));

	for (index = 0; index < rmnet_mhi_ptr->tx_buffers_max; index++) {
		struct sk_buff *skb = alloc_skb(dev->mtu,
					rmnet_mhi_ptr->allocation_flags);
		struct tx_buffer_priv *tx_priv = 0;
		if (0 == skb) {
			/* TODO: Handle error */
			pr_err("%s: SKB allocation failure during open",
			       __func__);
			goto cleanup;
		}

		tx_priv = (struct tx_buffer_priv *)(skb->cb);
		tx_priv->is_bounce_buffer = true;

		tx_priv->dma_addr = dma_map_single(&(dev->dev),
						   skb->data,
						   dev->mtu,
						   DMA_TO_DEVICE);
		if (dma_mapping_error(&(dev->dev), tx_priv->dma_addr)) {
			pr_err("%s: DMA mapping for TX bounce buffer failed",
			       __func__);
			kfree_skb(skb);
			goto cleanup;
		}

		/* Need to ensure that the bounce buffers are within range */
		if (DMA_RANGE_CHECK(tx_priv->dma_addr,
				    dev->mtu,
				    MHI_DMA_MASK)) {
			pr_err("%s: Bounce buffer is out of MHI address range",
			       __func__);
			kfree_skb(skb);
			goto cleanup;
		}

		skb_queue_tail(&(rmnet_mhi_ptr->tx_bounce_buffers), skb);
	}

	/* Submit the RX buffers */
	for (index = 0; index < rmnet_mhi_ptr->rx_buffers_max; index++) {
		struct sk_buff *skb = skb_dequeue(&(rmnet_mhi_ptr->rx_buffers));
		/* TODO: Rework the casting here */
		res = mhi_queue_xfer(
			rmnet_mhi_ptr->rx_client_handle,
			(uintptr_t)(*(uintptr_t *)(skb->cb)),
			rmnet_mhi_ptr->mru, 0, 0);
		if (MHI_STATUS_SUCCESS != res) {
			pr_err("%s: mhi_queue_xfer failed, error %d",
			       __func__, res);
			/* TODO: Handle this error. Do we reset the MHI Core? */
			goto cleanup;
		}

		 skb_queue_tail(&(rmnet_mhi_ptr->rx_buffers), skb);
	}

	napi_enable(&(rmnet_mhi_ptr->napi));
	netif_start_queue(dev);
	return 0;

cleanup:
	if (0 != rmnet_mhi_ptr->tx_client_handle)
		mhi_close_channel(rmnet_mhi_ptr->tx_client_handle);

	if (0 != rmnet_mhi_ptr->rx_client_handle)
		mhi_close_channel(rmnet_mhi_ptr->rx_client_handle);

  /* Clean TX bounce buffers */
	rmnet_mhi_internal_clean_unmap_buffers(dev,
					    &(rmnet_mhi_ptr->tx_bounce_buffers),
					    DMA_TO_DEVICE);

	/* Clean RX buffers */
	rmnet_mhi_internal_clean_unmap_buffers(dev,
					       &(rmnet_mhi_ptr->rx_buffers),
					       DMA_FROM_DEVICE);

	return -ENODEV;
}