/*
 * Get a packet of any Ethernet type, with our address or
 * the broadcast address.  Save the Ether type in arg 5.
 * NOTE: Caller must leave room for the Ether header.
 */
ssize_t
readether(struct iodesc *d, void *pkt, size_t len, saseconds_t tleft,
	u_int16_t *etype)
{
	ssize_t n;
	struct ether_header *eh;

#ifdef ETHER_DEBUG
 	if (debug)
		printf("readether: called\n");
#endif

	eh = (struct ether_header *)pkt - 1;
	len += sizeof(*eh);

	n = netif_get(d, eh, len, tleft);
	if (n == -1 || (size_t)n < sizeof(*eh))
		return -1;

	/* Validate Ethernet address. */
	if (memcmp(d->myea, eh->ether_dhost, ETHER_ADDR_LEN) != 0 &&
	    memcmp(bcea, eh->ether_dhost, ETHER_ADDR_LEN) != 0) {
#ifdef ETHER_DEBUG
		if (debug)
			printf("readether: not ours (ea=%s)\n",
			    ether_sprintf(eh->ether_dhost));
#endif
		return -1;
	}
	*etype = ntohs(eh->ether_type);

	n -= sizeof(*eh);
	return n;
}
Esempio n. 2
0
int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	netif_t *netif = netdev_priv(dev);

	BUG_ON(skb->dev != dev);

	/* Drop the packet if the target domain has no receive buffers. */
	if (unlikely(!netif_schedulable(dev) || netbk_queue_full(netif)))
		goto drop;

	/*
	 * Copy the packet here if it's destined for a flipping interface
	 * but isn't flippable (e.g. extra references to data).
	 */
	if (!netif->copying_receiver) {
		struct sk_buff *nskb = netbk_copy_skb(skb);
		if ( unlikely(nskb == NULL) )
			goto drop;
		/* Copy only the header fields we use in this driver. */
		nskb->dev = skb->dev;
		nskb->ip_summed = skb->ip_summed;
		nskb->proto_data_valid = skb->proto_data_valid;
		dev_kfree_skb(skb);
		skb = nskb;
	}

	netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
				   !!skb_shinfo(skb)->gso_size;
	netif_get(netif);

	if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
		netif->rx.sring->req_event = netif->rx_req_cons_peek +
			netbk_max_required_rx_slots(netif);
		mb(); /* request notification /then/ check & stop the queue */
		if (netbk_queue_full(netif)) {
			netif_stop_queue(dev);
			/*
			 * Schedule 500ms timeout to restart the queue, thus
			 * ensuring that an inactive queue will be drained.
			 * Packets will be immediately be dropped until more
			 * receive buffers become available (see
			 * netbk_queue_full() check above).
			 */
			netif->tx_queue_timeout.data = (unsigned long)netif;
			netif->tx_queue_timeout.function = tx_queue_callback;
			__mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
		}
	}

	skb_queue_tail(&rx_queue, skb);
	tasklet_schedule(&net_rx_tasklet);

	return 0;

 drop:
	netif->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return 0;
}
Esempio n. 3
0
int netif_map(netif_t *netif, unsigned long tx_ring_ref,
	      unsigned long rx_ring_ref, unsigned int evtchn)
{
	int err = -ENOMEM;
	netif_tx_sring_t *txs;
	netif_rx_sring_t *rxs;
	struct evtchn_bind_interdomain bind_interdomain;

	/* Already connected through? */
	if (netif->irq)
		return 0;

	netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
	if (netif->tx_comms_area == NULL)
		return -ENOMEM;
	netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
	if (netif->rx_comms_area == NULL)
		goto err_rx;

	err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
	if (err)
		goto err_map;

	bind_interdomain.remote_dom = netif->domid;
	bind_interdomain.remote_port = evtchn;

	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
					  &bind_interdomain);
	if (err)
		goto err_hypervisor;

	netif->evtchn = bind_interdomain.local_port;

	netif->irq = bind_evtchn_to_irqhandler(
		netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
	disable_irq(netif->irq);

	txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
	BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);

	rxs = (netif_rx_sring_t *)
		((char *)netif->rx_comms_area->addr);
	BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);

	netif->rx_req_cons_peek = 0;

	netif_get(netif);
	wmb(); /* Other CPUs see new state before interface is started. */

	rtnl_lock();
	netif->status = CONNECTED;
	wmb();
	if (netif_running(netif->dev))
		__netif_up(netif);
	rtnl_unlock();

	return 0;
err_hypervisor:
	unmap_frontend_pages(netif);
err_map:
	free_vm_area(netif->rx_comms_area);
err_rx:
	free_vm_area(netif->tx_comms_area);
	return err;
}