示例#1
0
/*
 * RNDIS filter on send completion callback
 */
static void 
hv_rf_on_send_completion(void *context)
{
	rndis_filter_packet *filter_pkt = (rndis_filter_packet *)context;

	/* Pass it back to the original handler */
	netvsc_xmit_completion(filter_pkt->completion_context);
}
示例#2
0
int netvsc_send(struct hv_device *device,
		struct hv_netvsc_packet *packet)
{
	struct netvsc_device *net_device;
	int ret = 0, m_ret = 0;
	struct vmbus_channel *out_channel;
	u16 q_idx = packet->q_idx;
	u32 pktlen = packet->total_data_buflen, msd_len = 0;
	unsigned int section_index = NETVSC_INVALID_INDEX;
	unsigned long flag;
	struct multi_send_data *msdp;
	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
	bool try_batch;

	net_device = get_outbound_net_device(device);
	if (!net_device)
		return -ENODEV;

	out_channel = net_device->chn_table[q_idx];
	if (!out_channel) {
		out_channel = device->channel;
		q_idx = 0;
		packet->q_idx = 0;
	}
	packet->channel = out_channel;
	packet->send_buf_index = NETVSC_INVALID_INDEX;
	packet->cp_partial = false;

	msdp = &net_device->msd[q_idx];

	/* batch packets in send buffer if possible */
	spin_lock_irqsave(&msdp->lock, flag);
	if (msdp->pkt)
		msd_len = msdp->pkt->total_data_buflen;

	try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count <
		    net_device->max_pkt;

	if (try_batch && msd_len + pktlen + net_device->pkt_align <
	    net_device->send_section_size) {
		section_index = msdp->pkt->send_buf_index;

	} else if (try_batch && msd_len + packet->rmsg_size <
		   net_device->send_section_size) {
		section_index = msdp->pkt->send_buf_index;
		packet->cp_partial = true;

	} else if (packet->is_data_pkt && pktlen + net_device->pkt_align <
		   net_device->send_section_size) {
		section_index = netvsc_get_next_send_section(net_device);
		if (section_index != NETVSC_INVALID_INDEX) {
				msd_send = msdp->pkt;
				msdp->pkt = NULL;
				msdp->count = 0;
				msd_len = 0;
		}
	}

	if (section_index != NETVSC_INVALID_INDEX) {
		netvsc_copy_to_send_buf(net_device,
					section_index, msd_len,
					packet);

		packet->send_buf_index = section_index;

		if (packet->cp_partial) {
			packet->page_buf_cnt -= packet->rmsg_pgcnt;
			packet->total_data_buflen = msd_len + packet->rmsg_size;
		} else {
			packet->page_buf_cnt = 0;
			packet->total_data_buflen += msd_len;
		}

		if (msdp->pkt)
			netvsc_xmit_completion(msdp->pkt);

		if (packet->xmit_more && !packet->cp_partial) {
			msdp->pkt = packet;
			msdp->count++;
		} else {
			cur_send = packet;
			msdp->pkt = NULL;
			msdp->count = 0;
		}
	} else {
		msd_send = msdp->pkt;
		msdp->pkt = NULL;
		msdp->count = 0;
		cur_send = packet;
	}

	spin_unlock_irqrestore(&msdp->lock, flag);

	if (msd_send) {
		m_ret = netvsc_send_pkt(msd_send, net_device);

		if (m_ret != 0) {
			netvsc_free_send_slot(net_device,
					      msd_send->send_buf_index);
			netvsc_xmit_completion(msd_send);
		}
	}

	if (cur_send)
		ret = netvsc_send_pkt(cur_send, net_device);

	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
		netvsc_free_send_slot(net_device, section_index);

	return ret;
}
示例#3
0
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
{
	struct net_device_context *net_device_ctx = netdev_priv(net);
	struct driver_context *driver_ctx =
	    driver_to_driver_context(net_device_ctx->device_ctx->device.driver);
	struct netvsc_driver_context *net_drv_ctx =
		(struct netvsc_driver_context *)driver_ctx;
	struct netvsc_driver *net_drv_obj = &net_drv_ctx->drv_obj;
	struct hv_netvsc_packet *packet;
	int ret;
	unsigned int i, num_pages;

	DPRINT_ENTER(NETVSC_DRV);

	DPRINT_DBG(NETVSC_DRV, "xmit packet - len %d data_len %d",
		   skb->len, skb->data_len);

	/* Add 1 for skb->data and additional one for RNDIS */
	num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
	if (num_pages > net_device_ctx->avail)
		return NETDEV_TX_BUSY;

	/* Allocate a netvsc packet based on # of frags. */
	packet = kzalloc(sizeof(struct hv_netvsc_packet) +
			 (num_pages * sizeof(struct hv_page_buffer)) +
			 net_drv_obj->RequestExtSize, GFP_ATOMIC);
	if (!packet) {
		/* out of memory, silently drop packet */
		DPRINT_ERR(NETVSC_DRV, "unable to allocate hv_netvsc_packet");

		dev_kfree_skb(skb);
		net->stats.tx_dropped++;
		return NETDEV_TX_OK;
	}

	packet->Extension = (void *)(unsigned long)packet +
				sizeof(struct hv_netvsc_packet) +
				    (num_pages * sizeof(struct hv_page_buffer));

	/* Setup the rndis header */
	packet->PageBufferCount = num_pages;

	/* TODO: Flush all write buffers/ memory fence ??? */
	/* wmb(); */

	/* Initialize it from the skb */
	packet->TotalDataBufferLength	= skb->len;

	/* Start filling in the page buffers starting after RNDIS buffer. */
	packet->PageBuffers[1].Pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
	packet->PageBuffers[1].Offset
		= (unsigned long)skb->data & (PAGE_SIZE - 1);
	packet->PageBuffers[1].Length = skb_headlen(skb);

	/* Additional fragments are after SKB data */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		skb_frag_t *f = &skb_shinfo(skb)->frags[i];

		packet->PageBuffers[i+2].Pfn = page_to_pfn(f->page);
		packet->PageBuffers[i+2].Offset = f->page_offset;
		packet->PageBuffers[i+2].Length = f->size;
	}

	/* Set the completion routine */
	packet->Completion.Send.OnSendCompletion = netvsc_xmit_completion;
	packet->Completion.Send.SendCompletionContext = packet;
	packet->Completion.Send.SendCompletionTid = (unsigned long)skb;

	ret = net_drv_obj->OnSend(&net_device_ctx->device_ctx->device_obj,
				  packet);
	if (ret == 0) {
		net->stats.tx_bytes += skb->len;
		net->stats.tx_packets++;

		DPRINT_DBG(NETVSC_DRV, "# of xmits %lu total size %lu",
			   net->stats.tx_packets,
			   net->stats.tx_bytes);

		if ((net_device_ctx->avail -= num_pages) < PACKET_PAGES_LOWATER)
			netif_stop_queue(net);
	} else {
		/* we are shutting down or bus overloaded, just drop packet */
		net->stats.tx_dropped++;
		netvsc_xmit_completion(packet);
	}

	DPRINT_EXIT(NETVSC_DRV);
	return NETDEV_TX_OK;
}
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
{
	struct net_device_context *net_device_ctx = netdev_priv(net);
	struct driver_context *driver_ctx =
	    driver_to_driver_context(net_device_ctx->device_ctx->device.driver);
	struct netvsc_driver_context *net_drv_ctx =
		(struct netvsc_driver_context *)driver_ctx;
	struct netvsc_driver *net_drv_obj = &net_drv_ctx->drv_obj;
	struct hv_netvsc_packet *packet;
	int i;
	int ret;
	int num_frags;
	int retries = 0;

	DPRINT_ENTER(NETVSC_DRV);

	/* Support only 1 chain of frags */
	ASSERT(skb_shinfo(skb)->frag_list == NULL);
	ASSERT(skb->dev == net);

	DPRINT_DBG(NETVSC_DRV, "xmit packet - len %d data_len %d",
		   skb->len, skb->data_len);

	/* Add 1 for skb->data and any additional ones requested */
	num_frags = skb_shinfo(skb)->nr_frags + 1 +
		    net_drv_obj->AdditionalRequestPageBufferCount;

	/* Allocate a netvsc packet based on # of frags. */
	packet = kzalloc(sizeof(struct hv_netvsc_packet) +
			 (num_frags * sizeof(struct hv_page_buffer)) +
			 net_drv_obj->RequestExtSize, GFP_ATOMIC);
	if (!packet) {
		DPRINT_ERR(NETVSC_DRV, "unable to allocate hv_netvsc_packet");
		return -1;
	}

	packet->Extension = (void *)(unsigned long)packet +
				sizeof(struct hv_netvsc_packet) +
				    (num_frags * sizeof(struct hv_page_buffer));

	/* Setup the rndis header */
	packet->PageBufferCount = num_frags;

	/* TODO: Flush all write buffers/ memory fence ??? */
	/* wmb(); */

	/* Initialize it from the skb */
	ASSERT(skb->data);
	packet->TotalDataBufferLength	= skb->len;

	/*
	 * Start filling in the page buffers starting at
	 * AdditionalRequestPageBufferCount offset
	 */
	packet->PageBuffers[net_drv_obj->AdditionalRequestPageBufferCount].Pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
	packet->PageBuffers[net_drv_obj->AdditionalRequestPageBufferCount].Offset = (unsigned long)skb->data & (PAGE_SIZE - 1);
	packet->PageBuffers[net_drv_obj->AdditionalRequestPageBufferCount].Length = skb->len - skb->data_len;

	ASSERT((skb->len - skb->data_len) <= PAGE_SIZE);

	for (i = net_drv_obj->AdditionalRequestPageBufferCount + 1;
	     i < num_frags; i++) {
		packet->PageBuffers[i].Pfn =
			page_to_pfn(skb_shinfo(skb)->frags[i-(net_drv_obj->AdditionalRequestPageBufferCount+1)].page);
		packet->PageBuffers[i].Offset =
			skb_shinfo(skb)->frags[i-(net_drv_obj->AdditionalRequestPageBufferCount+1)].page_offset;
		packet->PageBuffers[i].Length =
			skb_shinfo(skb)->frags[i-(net_drv_obj->AdditionalRequestPageBufferCount+1)].size;
	}

	/* Set the completion routine */
	packet->Completion.Send.OnSendCompletion = netvsc_xmit_completion;
	packet->Completion.Send.SendCompletionContext = packet;
	packet->Completion.Send.SendCompletionTid = (unsigned long)skb;

retry_send:
	ret = net_drv_obj->OnSend(&net_device_ctx->device_ctx->device_obj,
				  packet);

	if (ret == 0) {
		ret = NETDEV_TX_OK;
		net_device_ctx->stats.tx_bytes += skb->len;
		net_device_ctx->stats.tx_packets++;
	} else {
		retries++;
		if (retries < 4) {
			DPRINT_ERR(NETVSC_DRV, "unable to send..."
					"retrying %d...", retries);
			udelay(100);
			goto retry_send;
		}

		/* no more room or we are shutting down */
		DPRINT_ERR(NETVSC_DRV, "unable to send (%d)..."
			   "marking net device (%p) busy", ret, net);
		DPRINT_INFO(NETVSC_DRV, "net device (%p) stopping", net);

		ret = NETDEV_TX_BUSY;
		net_device_ctx->stats.tx_dropped++;

		netif_stop_queue(net);

		/*
		 * Null it since the caller will free it instead of the
		 * completion routine
		 */
		packet->Completion.Send.SendCompletionTid = 0;

		/*
		 * Release the resources since we will not get any send
		 * completion
		 */
		netvsc_xmit_completion((void *)packet);
	}

	DPRINT_DBG(NETVSC_DRV, "# of xmits %lu total size %lu",
		   net_device_ctx->stats.tx_packets,
		   net_device_ctx->stats.tx_bytes);

	DPRINT_EXIT(NETVSC_DRV);
	return ret;
}
static int hn_start_locked (struct ifnet *ifp)
{
	int ret = 0;
	hn_softc_t *sc = ifp->if_softc;
	NETVSC_DRIVER_OBJECT *net_drv_obj = &g_netvsc_drv.drv_obj;
	struct device_context *device_ctx = vmbus_get_devctx(sc->hn_dev);

	int i = 0;
	unsigned char *buf;

	NETVSC_PACKET* packet;
	int num_frags = 0;
	int retries = 0;
	struct mbuf *m_head, *m;
	int len = 0;
	int xlen = 0;

	DPRINT_ENTER(NETVSC_DRV);

	while (!IFQ_DRV_IS_EMPTY(&sc->hn_ifp->if_snd)) {
		IFQ_DRV_DEQUEUE(&sc->hn_ifp->if_snd, m_head);
		if (m_head == NULL) {
			break;
		}

		len = 0;
		num_frags = 0;
		xlen = 0;

		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len != 0) {
				num_frags++;
				len += m->m_len;
			}
		}

		DPRINT_DBG(NETVSC_DRV, "xmit packet - len %d", len);

		// Add 1 for skb->data and any additional ones requested
		num_frags += net_drv_obj->AdditionalRequestPageBufferCount;

		// Allocate a netvsc packet based on # of frags.
		buf = malloc(16 + sizeof(NETVSC_PACKET) + 
		    (num_frags * sizeof(PAGE_BUFFER)) + 
		    net_drv_obj->RequestExtSize, 
		    M_DEVBUF, M_ZERO | M_WAITOK);

		if (buf == NULL) {
			DPRINT_ERR(NETVSC_DRV, "unable to allocate NETVSC_PACKET");
			return -1;
		}

		packet = (NETVSC_PACKET *)(buf + 16);
		*(vm_offset_t *)buf = 0;

		packet->Extension = (void*)((unsigned long)packet + 
		    sizeof(NETVSC_PACKET) + (num_frags * sizeof(PAGE_BUFFER))) ;

		// Setup the rndis header
		packet->PageBufferCount = num_frags;

		// TODO: Flush all write buffers/ memory fence ???
		//wmb();
	
		// Initialize it from the mbuf
		packet->TotalDataBufferLength	= len;

		// Start filling in the page buffers starting at
		// AdditionalRequestPageBufferCount offset

		i = net_drv_obj->AdditionalRequestPageBufferCount;
		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len) {
				vm_offset_t paddr = vtophys(mtod(m, vm_offset_t));
				packet->PageBuffers[i].Pfn = paddr >> PAGE_SHIFT;
				packet->PageBuffers[i].Offset = paddr & (PAGE_SIZE - 1);
				packet->PageBuffers[i].Length = m->m_len;
				DPRINT_DBG(NETVSC_DRV, 
						"vaddr: %p, pfn: %llx, Off: %x, len: %x\n", 
						paddr, packet->PageBuffers[i].Pfn, 
						packet->PageBuffers[i].Offset, 
						packet->PageBuffers[i].Length);

				i++;
			}
		}


		// Set the completion routine
		/*
		 * Fixme:  Research the netvsc_xmit_completion() function
		 * and figure out what to do about it.  It is currently too
		 * messed up to port easily.
		 */
		packet->Completion.Send.OnSendCompletion = netvsc_xmit_completion;
		packet->Completion.Send.SendCompletionContext = packet;
		packet->Completion.Send.SendCompletionTid = (ULONG_PTR)m_head;
retry_send:
		critical_enter();
		ret = net_drv_obj->OnSend(&device_ctx->device_obj, packet);
		critical_exit();

		if (ret == 0) {
			ifp->if_opackets++;
			if (ifp->if_bpf)
				bpf_mtap(ifp->if_bpf, m_head);
//			if (ifp->if_timer == 0)
//				ifp->if_timer = 5;
		} else {
			retries++;
			if (retries < 4) {
				DPRINT_ERR(NETVSC_DRV,
				    "unable to send...retrying %d...", retries);
				goto retry_send;
			}

			DPRINT_INFO(NETVSC_DRV, "net device (%p) stopping", sc);
			IF_PREPEND(&ifp->if_snd, m_head);
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;

			ret = -1;
//			net_device_ctx->stats.tx_dropped++;

			// Null it since the caller will free it instead of
			// the completion routine
			packet->Completion.Send.SendCompletionTid = 0;

			// Release the resources since we will not get any
			// send completion
			netvsc_xmit_completion((void*)packet);
		}
	}
示例#6
0
/*
 * Start a transmit of one or more packets
 */
static int
hn_start_locked(struct ifnet *ifp)
{
	hn_softc_t *sc = ifp->if_softc;
	struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
	uint8_t *buf;
	netvsc_packet *packet;
	struct mbuf *m_head, *m;
	struct mbuf *mc_head = NULL;
	int i;
	int num_frags;
	int len;
	int xlen;
	int rppi_size;
	int retries = 0;
	int ret = 0;

	while (!IFQ_DRV_IS_EMPTY(&sc->hn_ifp->if_snd)) {
		IFQ_DRV_DEQUEUE(&sc->hn_ifp->if_snd, m_head);
		if (m_head == NULL) {
			break;
		}

		len = 0;
		num_frags = 0;
		xlen = 0;

		/* Walk the mbuf list computing total length and num frags */
		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len != 0) {
				num_frags++;
				len += m->m_len;
			}
		}

		/*
		 * Reserve the number of pages requested.  Currently,
		 * one page is reserved for the message in the RNDIS
		 * filter packet
		 */
		num_frags += HV_RF_NUM_TX_RESERVED_PAGE_BUFS;

		/* If exceeds # page_buffers in netvsc_packet */
		if (num_frags > NETVSC_PACKET_MAXPAGE) {
			m_freem(m);

			return (EINVAL);
		}

		rppi_size = 0;
		if (m_head->m_flags & M_VLANTAG) {
			rppi_size = sizeof(rndis_per_packet_info) + 
			    sizeof(ndis_8021q_info);
		}

		/*
		 * Allocate a buffer with space for a netvsc packet plus a
		 * number of reserved areas.  First comes a (currently 16
		 * bytes, currently unused) reserved data area.  Second is
		 * the netvsc_packet, which includes (currently 4) page
		 * buffers.  Third (optional) is a rndis_per_packet_info
		 * struct, but only if a VLAN tag should be inserted into the
		 * Ethernet frame by the Hyper-V infrastructure.  Fourth is
		 * an area reserved for an rndis_filter_packet struct.
		 * Changed malloc to M_NOWAIT to avoid sleep under spin lock.
		 * No longer reserving extra space for page buffers, as they
		 * are already part of the netvsc_packet.
		 */
		buf = malloc(HV_NV_PACKET_OFFSET_IN_BUF +
		    sizeof(netvsc_packet) + rppi_size +
		    sizeof(rndis_filter_packet),
		    M_DEVBUF, M_ZERO | M_NOWAIT);
		if (buf == NULL) {
			m_freem(m);

			return (ENOMEM);
		}

		packet = (netvsc_packet *)(buf + HV_NV_PACKET_OFFSET_IN_BUF);
		*(vm_offset_t *)buf = HV_NV_SC_PTR_OFFSET_IN_BUF;

		/*
		 * extension points to the area reserved for the
		 * rndis_filter_packet, which is placed just after
		 * the netvsc_packet (and rppi struct, if present;
		 * length is updated later).
		 */
		packet->extension = packet + 1;

		/* Set up the rndis header */
		packet->page_buf_count = num_frags;

		/* Initialize it from the mbuf */
		packet->tot_data_buf_len = len;

		/*
		 * If the Hyper-V infrastructure needs to embed a VLAN tag,
		 * initialize netvsc_packet and rppi struct values as needed.
		 */
		if (rppi_size) {
			/* Lower layers need the VLAN TCI */
			packet->vlan_tci = m_head->m_pkthdr.ether_vtag;
		}

		/*
		 * Fill the page buffers with mbuf info starting at index
		 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
		 */
		i = HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len) {
				vm_offset_t paddr =
				    vtophys(mtod(m, vm_offset_t));
				packet->page_buffers[i].pfn =
				    paddr >> PAGE_SHIFT;
				packet->page_buffers[i].offset =
				    paddr & (PAGE_SIZE - 1);
				packet->page_buffers[i].length = m->m_len;
				i++;
			}
		}

		/*
		 * If bpf, copy the mbuf chain.  This is less expensive than
		 * it appears; the mbuf clusters are not copied, only their
		 * reference counts are incremented.
		 * Needed to avoid a race condition where the completion
		 * callback is invoked, freeing the mbuf chain, before the
		 * bpf_mtap code has a chance to run.
		 */
		if (ifp->if_bpf) {
			mc_head = m_copypacket(m_head, M_DONTWAIT);
		}
retry_send:
		/* Set the completion routine */
		packet->compl.send.on_send_completion = netvsc_xmit_completion;
		packet->compl.send.send_completion_context = packet;
		packet->compl.send.send_completion_tid = (uint64_t)m_head;

		/* Removed critical_enter(), does not appear necessary */
		ret = hv_rf_on_send(device_ctx, packet);

		if (ret == 0) {
			ifp->if_opackets++;
			/* if bpf && mc_head, call bpf_mtap code */
			if (mc_head) {
				ETHER_BPF_MTAP(ifp, mc_head);
			}
		} else {
			retries++;
			if (retries < 4) {
				goto retry_send;
			}

			IF_PREPEND(&ifp->if_snd, m_head);
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;

			/*
			 * Null the mbuf pointer so the completion function
			 * does not free the mbuf chain.  We just pushed the
			 * mbuf chain back on the if_snd queue.
			 */
			packet->compl.send.send_completion_tid = 0;

			/*
			 * Release the resources since we will not get any
			 * send completion
			 */
			netvsc_xmit_completion(packet);
		}

		/* if bpf && mc_head, free the mbuf chain copy */
		if (mc_head) {
			m_freem(mc_head);
		}
	}