Ejemplo n.º 1
0
static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
{
	struct net_device_stats *stats = &priv->ndev->stats;
	struct can_frame *cf;
	struct sk_buff *skb;
	u32 data, mbx_mask;
	unsigned long flags;

	skb = alloc_can_skb(priv->ndev, &cf);
	if (!skb) {
		if (printk_ratelimit())
			dev_err(priv->ndev->dev.parent,
				"ti_hecc_rx_pkt: alloc_can_skb() failed\n");
		return -ENOMEM;
	}

	mbx_mask = BIT(mbxno);
	data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
	if (data & HECC_CANMID_IDE)
		cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
	else
		cf->can_id = (data >> 18) & CAN_SFF_MASK;
	data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
	if (data & HECC_CANMCF_RTR)
		cf->can_id |= CAN_RTR_FLAG;
	cf->can_dlc = get_can_dlc(data & 0xF);
	data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
	*(u32 *)(cf->data) = cpu_to_be32(data);
	if (cf->can_dlc > 4) {
		data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
		*(u32 *)(cf->data + 4) = cpu_to_be32(data);
	} else {
		*(u32 *)(cf->data + 4) = 0;
	}
	spin_lock_irqsave(&priv->mbx_lock, flags);
	hecc_clear_bit(priv, HECC_CANME, mbx_mask);
	hecc_write(priv, HECC_CANRMP, mbx_mask);
	/* enable mailbox only if it is part of rx buffer mailboxes */
	if (priv->rx_next < HECC_RX_BUFFER_MBOX)
		hecc_set_bit(priv, HECC_CANME, mbx_mask);
	spin_unlock_irqrestore(&priv->mbx_lock, flags);

	stats->rx_bytes += cf->can_dlc;
	netif_receive_skb(skb);
	stats->rx_packets++;

	return 0;
}
Ejemplo n.º 2
0
static int qinq_rcv(struct sk_buff *skb, struct net_device *dev,
                    struct packet_type *pt, struct net_device *orig_dev)
{
    struct dsa_switch_tree *dst;
    struct dsa_switch *ds;
    struct vlan_hdr *vhdr;
    int source_port;

    dst = dev->dsa_ptr;
    if (unlikely(dst == NULL))
        goto out_drop;
    ds = dst->ds[0];

    skb = skb_unshare(skb, GFP_ATOMIC);
    if (skb == NULL)
        goto out;

    if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
        goto out_drop;

    vhdr = (struct vlan_hdr *)skb->data;
    source_port = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
    if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL)
        goto out_drop;

    /* Remove the outermost VLAN tag and update checksum. */
    skb_pull_rcsum(skb, VLAN_HLEN);
    memmove(skb->data - ETH_HLEN,
            skb->data - ETH_HLEN - VLAN_HLEN,
            2 * ETH_ALEN);

    skb->dev = ds->ports[source_port];
    skb_push(skb, ETH_HLEN);
    skb->pkt_type = PACKET_HOST;
    skb->protocol = eth_type_trans(skb, skb->dev);

    skb->dev->stats.rx_packets++;
    skb->dev->stats.rx_bytes += skb->len;

    netif_receive_skb(skb);

    return 0;

out_drop:
    kfree_skb(skb);
out:
    return 0;
}
Ejemplo n.º 3
0
static void lro_flush(struct net_lro_mgr *lro_mgr,
		      struct net_lro_desc *lro_desc)
{
	if (lro_desc->pkt_aggr_cnt > 1)
		lro_update_tcp_ip_header(lro_desc);

	skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss;

	if (lro_mgr->features & LRO_F_NAPI)
		netif_receive_skb(lro_desc->parent);
	else
		netif_rx(lro_desc->parent);

	LRO_INC_STATS(lro_mgr, flushed);
	lro_clear_desc(lro_desc);
}
Ejemplo n.º 4
0
void lro_receive_frags(struct net_lro_mgr *lro_mgr,
		       struct skb_frag_struct *frags,
		       int len, int true_size, void *priv, __wsum sum)
{
	struct sk_buff *skb;

	skb = __lro_proc_segment(lro_mgr, frags, len, true_size, NULL, 0,
				 priv, sum);
	if (!skb)
		return;

	if (lro_mgr->features & LRO_F_NAPI)
		netif_receive_skb(skb);
	else
		netif_rx(skb);
}
Ejemplo n.º 5
0
static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
{
	u16 flags, data;
	int i;
	unsigned int val;
	struct c_can_priv *priv = netdev_priv(dev);
	struct net_device_stats *stats = &dev->stats;
	struct sk_buff *skb;
	struct can_frame *frame;

	skb = alloc_can_skb(dev, &frame);
	if (!skb) {
		stats->rx_dropped++;
		return -ENOMEM;
	}

	frame->can_dlc = get_can_dlc(ctrl & 0x0F);

	flags =	priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface));
	val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) |
		(flags << 16);

	if (flags & IF_ARB_MSGXTD)
		frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
	else
		frame->can_id = (val >> 18) & CAN_SFF_MASK;

	if (flags & IF_ARB_TRANSMIT)
		frame->can_id |= CAN_RTR_FLAG;
	else {
		for (i = 0; i < frame->can_dlc; i += 2) {
			data = priv->read_reg(priv,
				C_CAN_IFACE(DATA1_REG, iface) + i / 2);
			frame->data[i] = data;
			frame->data[i + 1] = data >> 8;
		}
	}

	netif_receive_skb(skb);

	stats->rx_packets++;
	stats->rx_bytes += frame->can_dlc;

	can_led_event(dev, CAN_LED_EVENT_RX);

	return 0;
}
Ejemplo n.º 6
0
a_status_t
__adf_net_indicate_packet(adf_net_handle_t hdl, struct sk_buff *skb,
                          uint32_t len)
{
    struct net_device *netdev   = hdl_to_netdev(hdl);
    __adf_softc_t  *sc          = hdl_to_softc(hdl);
    /**
     * For pseudo devices IP checksum has to computed
     */
    if(adf_os_unlikely(skb->ip_summed == CHECKSUM_UNNECESSARY))
        __adf_net_ip_cksum(skb);

    /**
     * also pulls the ether header
     */
    skb->protocol           =   eth_type_trans(skb, netdev);
    skb->dev                =   netdev;
    netdev->last_rx         =   jiffies;
#ifdef LIMIT_MTU_SIZE

    if (skb->len >=  LIMITED_MTU) {
        skb->h.raw = skb->nh.raw = skb->data;

        skb->dst = (struct dst_entry *)&__fake_rtable;
        skb->pkt_type = PACKET_HOST;
        dst_hold(skb->dst);

#if 0
        printk("addrs : sa : %x : da:%x\n", skb->nh.iph->saddr, skb->nh.iph->daddr);
        printk("head : %p tail : %p iph %p %p\n", skb->head, skb->tail,
                skb->nh.iph, skb->mac.raw);
#endif 

        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(LIMITED_MTU - 4 ));
        dev_kfree_skb_any(skb);
        return A_STATUS_OK;
    }
#endif


    
    if(sc->vlgrp)       __vlan_hwaccel_put_tag(skb, sc->vid);
    if (in_irq())  	netif_rx(skb);
    else                netif_receive_skb(skb);

    return A_STATUS_OK;
}
Ejemplo n.º 7
0
/* Handle a received packet.  Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel,
		     struct efx_rx_buffer *rx_buf, bool checksummed)
{
	struct efx_nic *efx = channel->efx;
	struct sk_buff *skb;

	/* If we're in loopback test, then pass the packet directly to the
	 * loopback layer, and free the rx_buf here
	 */
	if (unlikely(efx->loopback_selftest)) {
		efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
		efx_free_rx_buffer(efx, rx_buf);
		return;
	}

	if (rx_buf->skb) {
		prefetch(skb_shinfo(rx_buf->skb));

		skb_put(rx_buf->skb, rx_buf->len);

		/* Move past the ethernet header. rx_buf->data still points
		 * at the ethernet header */
		rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
						       efx->net_dev);

		skb_record_rx_queue(rx_buf->skb, channel->channel);
	}

	if (likely(checksummed || rx_buf->page)) {
		efx_rx_packet_lro(channel, rx_buf, checksummed);
		return;
	}

	/* We now own the SKB */
	skb = rx_buf->skb;
	rx_buf->skb = NULL;
	EFX_BUG_ON_PARANOID(!skb);

	/* Set the SKB flags */
	skb->ip_summed = CHECKSUM_NONE;

	/* Pass the packet up */
	netif_receive_skb(skb);

	/* Update allocation strategy method */
	channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
}
Ejemplo n.º 8
0
static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
		       struct packet_type *pt, struct net_device *orig_dev)
{
	struct dsa_switch_tree *dst = dev->dsa_ptr;
	struct dsa_switch *ds;
	u8 *trailer;
	int source_port;

	if (unlikely(dst == NULL))
		goto out_drop;
	ds = dst->ds[0];

	skb = skb_unshare(skb, GFP_ATOMIC);
	if (skb == NULL)
		goto out;

	if (skb_linearize(skb))
		goto out_drop;

	trailer = skb_tail_pointer(skb) - 4;
	if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 ||
	    (trailer[3] & 0xef) != 0x00 || trailer[3] != 0x00)
		goto out_drop;

	source_port = trailer[1] & 7;
	if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL)
		goto out_drop;

	pskb_trim_rcsum(skb, skb->len - 4);

	skb->dev = ds->ports[source_port];
	skb_push(skb, ETH_HLEN);
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, skb->dev);

	skb->dev->stats.rx_packets++;
	skb->dev->stats.rx_bytes += skb->len;

	netif_receive_skb(skb);

	return 0;

out_drop:
	kfree_skb(skb);
out:
	return 0;
}
Ejemplo n.º 9
0
a_status_t 
__adf_net_indicate_vlanpkt(adf_net_handle_t hdl, struct sk_buff *skb, 
                           uint32_t len, adf_net_vid_t *vid)
{
    __adf_softc_t   *sc = hdl_to_softc(hdl);
    struct net_device *netdev = hdl_to_netdev(hdl);

    skb->protocol           =   eth_type_trans(skb, netdev);
    skb->dev                =   netdev;
    netdev->last_rx         =   jiffies;
    if(sc->vlgrp) {
        __vlan_hwaccel_put_tag(skb, vid->val);
    }
    (in_irq() ? netif_rx(skb) : netif_receive_skb(skb));

    return A_STATUS_OK;
}
Ejemplo n.º 10
0
static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
			      struct cp_desc *desc)
{
	skb->protocol = eth_type_trans (skb, cp->dev);

	cp->net_stats.rx_packets++;
	cp->net_stats.rx_bytes += skb->len;
	cp->dev->last_rx = jiffies;

#if CP_VLAN_TAG_USED
	if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
		vlan_hwaccel_receive_skb(skb, cp->vlgrp,
					 be16_to_cpu(desc->opts2 & 0xffff));
	} else
#endif
		netif_receive_skb(skb);
}
Ejemplo n.º 11
0
/**
 * gelic_net_pass_skb_up - takes an skb from a descriptor and passes it on
 * @descr: descriptor to process
 * @card: card structure
 *
 * iommu-unmaps the skb, fills out skb structure and passes the data to the
 * stack. The descriptor state is not changed.
 */
static void gelic_net_pass_skb_up(struct gelic_net_descr *descr,
				 struct gelic_net_card *card)
{
	struct sk_buff *skb;
	struct net_device *netdev;
	u32 data_status, data_error;

	data_status = descr->data_status;
	data_error = descr->data_error;
	netdev = card->netdev;
	/* unmap skb buffer */
	skb = descr->skb;
	dma_unmap_single(ctodev(card), descr->buf_addr, GELIC_NET_MAX_MTU,
			 DMA_FROM_DEVICE);

	skb_put(skb, descr->valid_size? descr->valid_size : descr->result_size);
	if (!descr->valid_size)
		dev_info(ctodev(card), "buffer full %x %x %x\n",
			 descr->result_size, descr->buf_size,
			 descr->dmac_cmd_status);

	descr->skb = NULL;
	/*
	 * the card put 2 bytes vlan tag in front
	 * of the ethernet frame
	 */
	skb_pull(skb, 2);
	skb->protocol = eth_type_trans(skb, netdev);

	/* checksum offload */
	if (card->rx_csum) {
		if ((data_status & GELIC_NET_DATA_STATUS_CHK_MASK) &&
		    (!(data_error & GELIC_NET_DATA_ERROR_CHK_MASK)))
			skb->ip_summed = CHECKSUM_UNNECESSARY;
		else
			skb->ip_summed = CHECKSUM_NONE;
	} else
		skb->ip_summed = CHECKSUM_NONE;

	/* update netdevice statistics */
	card->netdev->stats.rx_packets++;
	card->netdev->stats.rx_bytes += skb->len;

	/* pass skb up to stack */
	netif_receive_skb(skb);
}
Ejemplo n.º 12
0
static int ethoc_rx(struct net_device *dev, int limit)
{
	struct ethoc *priv = netdev_priv(dev);
	int count;

	for (count = 0; count < limit; ++count) {
		unsigned int entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
		ethoc_read_bd(priv, entry, &bd);
		if (bd.stat & RX_BD_EMPTY)
			break;

		if (ethoc_update_rx_stats(priv, &bd) == 0) {
			int size = bd.stat >> 16;
			struct sk_buff *skb = netdev_alloc_skb(dev, size);

			size -= 4; /* strip the CRC */
			skb_reserve(skb, 2); /* align TCP/IP header */

			if (likely(skb)) {
				void *src = phys_to_virt(bd.addr);
				memcpy_fromio(skb_put(skb, size), src, size);
				skb->protocol = eth_type_trans(skb, dev);
				priv->stats.rx_packets++;
				priv->stats.rx_bytes += size;
				netif_receive_skb(skb);
			} else {
				if (net_ratelimit())
					dev_warn(&dev->dev, "low on memory - "
							"packet dropped\n");

				priv->stats.rx_dropped++;
				break;
			}
		}

		/* clear the buffer descriptor so it can be reused */
		bd.stat &= ~RX_BD_STATS;
		bd.stat |=  RX_BD_EMPTY;
		ethoc_write_bd(priv, entry, &bd);
		priv->cur_rx++;
	}
Ejemplo n.º 13
0
/* Polling function */
static int blaze_poll(struct napi_struct *napi, int budget)
{
	int npackets = 0;
	struct sk_buff *skb;
	struct blaze_priv *priv = container_of(napi, struct blaze_priv,
								napi);
	struct net_device *dev = priv->dev;
	struct blaze_packet *pkt;
    
	while (npackets < budget && priv->rx_queue) {
		pkt = blaze_dequeue_buf(dev);
		skb = dev_alloc_skb(pkt->datalen + 2);
		if (! skb) {
			if (printk_ratelimit())
				printk(KERN_NOTICE "blaze: packet " \
							"dropped\n");
			priv->stats.rx_dropped++;
			blaze_release_buffer(pkt);
			continue;
		}
		skb_reserve(skb, 2); /* align IP on 16B boundary */  
		memcpy(skb_put(skb, pkt->datalen), pkt->data, pkt->datalen);
		skb->dev = dev;
		skb->protocol = eth_type_trans(skb, dev);
		skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
		netif_receive_skb(skb);
		
        	/* Maintain stats */
		npackets++;
		priv->stats.rx_packets++;
		priv->stats.rx_bytes += pkt->datalen;
		blaze_release_buffer(pkt);
	}
	/* If we processed all packets, tell the kernel and reenable ints */
	if (! priv->rx_queue) {
		napi_complete(napi);
		blaze_rx_ints(dev, 1);
		return 0;
	}

	/* We couldn't process everything. */
	return npackets;
}
Ejemplo n.º 14
0
static INLINE int mailbox_rx_irq_handler(unsigned int ch)   //  return: < 0 - descriptor not available, 0 - received one packet
{
    unsigned int ndev = ch;
    struct sk_buff *skb;
    struct sk_buff *new_skb;
    volatile struct rx_descriptor *desc;
    struct rx_descriptor reg_desc;
    int netif_rx_ret;

    desc = &g_ptm_priv_data.itf[ndev].rx_desc[g_ptm_priv_data.itf[ndev].rx_desc_pos];
    if ( desc->own || !desc->c )    //  if PP32 hold descriptor or descriptor not completed
        return -EAGAIN;
    if ( ++g_ptm_priv_data.itf[ndev].rx_desc_pos == dma_rx_descriptor_length )
        g_ptm_priv_data.itf[ndev].rx_desc_pos = 0;

    reg_desc = *desc;
    skb = get_skb_rx_pointer(reg_desc.dataptr);

    if ( !reg_desc.err ) {
        new_skb = alloc_skb_rx();
        if ( new_skb != NULL ) {
            skb_reserve(skb, reg_desc.byteoff);
            skb_put(skb, reg_desc.datalen);

            dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 0);

            //  parse protocol header
            skb->dev = g_net_dev[ndev];
            skb->protocol = eth_type_trans(skb, skb->dev);

            g_net_dev[ndev]->last_rx = jiffies;

            netif_rx_ret = netif_receive_skb(skb);

            if ( netif_rx_ret != NET_RX_DROP ) {
                g_ptm_priv_data.itf[ndev].stats.rx_packets++;
                g_ptm_priv_data.itf[ndev].stats.rx_bytes += reg_desc.datalen;
            }

            reg_desc.dataptr = ((unsigned int)new_skb->data >> 2) & 0x0FFFFFFF;
            reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
        }
Ejemplo n.º 15
0
static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
{
	struct net_device_stats *stats = &dev->stats;
	struct c_can_priv *priv = netdev_priv(dev);
	struct can_frame *frame;
	struct sk_buff *skb;
	u32 arb, data;

	skb = alloc_can_skb(dev, &frame);
	if (!skb) {
		stats->rx_dropped++;
		return -ENOMEM;
	}

	frame->can_dlc = get_can_dlc(ctrl & 0x0F);

	arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));

	if (arb & IF_ARB_MSGXTD)
		frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
	else
		frame->can_id = (arb >> 18) & CAN_SFF_MASK;

	if (arb & IF_ARB_TRANSMIT) {
		frame->can_id |= CAN_RTR_FLAG;
	} else {
		int i, dreg = C_CAN_IFACE(DATA1_REG, iface);

		for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
			data = priv->read_reg(priv, dreg);
			frame->data[i] = data;
			frame->data[i + 1] = data >> 8;
		}
	}

	stats->rx_packets++;
	stats->rx_bytes += frame->can_dlc;

	netif_receive_skb(skb);
	return 0;
}
Ejemplo n.º 16
0
static void
roq_eth_pass_skb_up(struct roq_eth_priv *vdev, struct sk_buff *skb, int size)
{
	struct sk_buff *skb_shrinked;
	int err;

	skb_put(skb, size);
	skb->dev = vdev->ndev;
	skb->protocol = eth_type_trans(skb, vdev->ndev);
	skb->ip_summed = CHECKSUM_UNNECESSARY;

	vdev->stats.rx_packets++;
	vdev->stats.rx_bytes += skb->len;

	if (size < ETH_DATA_LEN) {
		/*
		 * this call shrinks data buffer to real data length,
		 * otherwise ping doesn't work
		 */ 
		skb_shrinked = skb_copy_expand(skb, skb_headroom(skb),
					       0, GFP_ATOMIC);
		dev_kfree_skb(skb);
	} else
		skb_shrinked = skb;

//printk("%s:%d\n", __func__, __LINE__);

	if (likely(skb_shrinked)) {
		err = netif_receive_skb(skb_shrinked);
		if (unlikely(err)) {
			if (err == NET_RX_DROP)
				pr_warn("netif_receive_skb: dropped: %d : %d\n",
					skb->len, size);
			else
				pr_warn("netif_receive_skb: %d\n", err);
		}
	} else {
		pr_warn("skb_copy_expand: dropped\n");
	}
	return;
}
Ejemplo n.º 17
0
/* VLAN rx hw acceleration helper.  This acts like netif_{rx,receive_skb}(). */
int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
		      u16 vlan_tci, int polling)
{
	if (netpoll_rx(skb))
		return NET_RX_DROP;

	if (skb_bond_should_drop(skb))
		goto drop;

	skb->vlan_tci = vlan_tci;
	skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);

	if (!skb->dev)
		goto drop;

	return (polling ? netif_receive_skb(skb) : netif_rx(skb));

drop:
	dev_kfree_skb_any(skb);
	return NET_RX_DROP;
}
 /**
  * Eth_napi_struct_poll - NAPI Rx polling callback
  **/
 static int Eth_napi_struct_poll(struct napi_struct *napi, int budget) {
    int npackets = 0;
    struct sk_buff *skb;
    struct eth_priv *priv = container_of(napi, struct eth_priv, napi);
    struct net_device *dev = priv->dev;
    struct eth_packet *pkt;
 
    while (npackets < budget && priv->rx_queue) {
        pkt = eth_dequeue_buf(dev);
        skb = dev_alloc_skb(pkt->datalen + 2);
        if (!skb) {
            if (printk_ratelimit())
                printk(KERN_NOTICE "Eth: packet dropped\n");
            priv->stats.rx_dropped++;
            eth_release_buffer(pkt);
            continue;
        }
        skb_reserve(skb, 2);  //align IP on 16B boundary
        memcpy(skb_put(skb, pkt->datalen), pkt->data, pkt->datalen);
        skb->dev = dev;
        skb->protocol = eth_type_trans(skb, dev);
        skb->ip_summed = CHECKSUM_UNNECESSARY; // don't check it
        netif_receive_skb(skb);
        /* Maintain stats */
        npackets++;
        priv->stats.rx_packets++;
        priv->stats.rx_bytes += pkt->datalen;
        eth_release_buffer(pkt);
    }
    /* If we processed all packets, we're done; tell the kernel and re-enable interruptions */
    /* If budget not fully consumed, exit the polling mode */
    if (npackets < budget) {
        napi_complete(napi);
        /* Enabling the normal interruption */
        Eth_rx_ints(dev, 1);
    }

    return npackets;
}
/* VLAN rx hw acceleration helper.  This acts like netif_{rx,receive_skb}(). */
int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
		      u16 vlan_tci, int polling)
{
	if (netpoll_rx(skb))
		return NET_RX_DROP;

	if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
		goto drop;

	skb->skb_iif = skb->dev->ifindex;
	__vlan_hwaccel_put_tag(skb, vlan_tci);
	skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);

	if (!skb->dev)
		goto drop;

	return (polling ? netif_receive_skb(skb) : netif_rx(skb));

drop:
	dev_kfree_skb_any(skb);
	return NET_RX_DROP;
}
Ejemplo n.º 20
0
s32 mpls_route(struct sk_buff * skb, u16 proto)
{   
    /*Here process EXPLICIT-NULL pop ,6PE, 6VPE, VRF*/
	nf_reset(skb);
	ns_reset(skb);
	dst_release(skb->dst);
	skb->dst = NULL ;
	skb->nh.raw = skb->data;
	skb->vid = 0;
	skb->protocol = htons(proto) ;

	skb->ff_flag = ff_set_flag(skb, DRV_FF_FLAG_LINUX_FORWARD);

    MPLS_DEBUG_COUNTER_INC(mpls_route);

    MPLS_DEBUG_FORWARD("mpls_route: skb->iif %x, skb->dev %s, proto %d.\n"
        , skb->iif, skb->dev ? skb->dev->name : "Unknown", proto);

    MPLS_DEBUG_SKB(skb, "mpls_route: \n");

    return netif_receive_skb(skb);
}
Ejemplo n.º 21
0
void cpsw_rx_handler(void *token, int len, int status)
{
	struct sk_buff		*skb = token;
	struct net_device	*ndev = skb->dev;
	struct cpsw_priv	*priv = netdev_priv(ndev);
	int			ret = 0;

	/*                                       */
	if (unlikely(!netif_running(ndev)) ||
			unlikely(!netif_carrier_ok(ndev))) {
		dev_kfree_skb_any(skb);
		return;
	}
	if (likely(status >= 0)) {
		skb_put(skb, len);
		skb->protocol = eth_type_trans(skb, ndev);
		netif_receive_skb(skb);
		priv->stats.rx_bytes += len;
		priv->stats.rx_packets++;
		skb = NULL;
	}

	if (unlikely(!netif_running(ndev))) {
		if (skb)
			dev_kfree_skb_any(skb);
		return;
	}

	if (likely(!skb)) {
		skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
		if (WARN_ON(!skb))
			return;

		ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
					skb_tailroom(skb), GFP_KERNEL);
	}
	WARN_ON(ret < 0);
}
Ejemplo n.º 22
0
int eth0_pack_rcv(struct sk_buff *skb,struct net_device *dev,struct packet_type *pt, struct net_device *orig_dev)
{
	struct nm_packet *pkt = (struct nm_packet *)skb->data;
	struct net_device *dev_nm = nm_dev[ntohs(pkt->port)];
	struct nm_adapter *adapter = netdev_priv(dev_nm);
	struct sk_buff *new_skb;
	//printk("skb:%p,len:%d,skb->protocol:%X,skb_type:%d,head-data:%d, head:0x%lx, data:0x%lx\n",
				//	skb,skb->len,skb->protocol,skb->pkt_type,skb->head-skb->data, 
				//(unsigned long)skb->head, (unsigned long)skb->data);

	skb_pull(skb,sizeof(pkt->port));

	new_skb = netdev_alloc_skb(dev_nm,NM_PKT_SIZE);
	skb_reserve(new_skb,2);
	memcpy(skb_put(new_skb,skb->len),skb->data,skb->len);
	adapter->stats.rx_packets++;
	adapter->stats.rx_bytes += skb->len;
	new_skb->protocol = eth_type_trans(new_skb, dev_nm);
	//printk("%s recv pkt ! skb protocol:0x%04X,len:%d\n",dev_nm->name,new_skb->protocol,skb->len);
	dev_kfree_skb(skb);
	netif_receive_skb(new_skb);
	return 0;
}
Ejemplo n.º 23
0
int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
		     unsigned int vlan_tci, struct sk_buff *skb)
{
	int err = NET_RX_SUCCESS;

	if (netpoll_receive_skb(skb))
		return NET_RX_DROP;

	switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
	case -1:
		return netif_receive_skb(skb);

	case 2:
		err = NET_RX_DROP;
		/* fall through */

	case 1:
		kfree_skb(skb);
		break;
	}

	return err;
}
Ejemplo n.º 24
0
static int sn_poll_action_single(struct sn_queue *rx_queue, int budget)
{
	struct napi_struct *napi = &rx_queue->rx.napi;
	int poll_cnt = 0;

	while (poll_cnt < budget) {
		struct sk_buff *skb;
		struct sn_rx_metadata rx_meta;
		int ret;

		skb = rx_queue->dev->ops->do_rx(rx_queue, &rx_meta);
		if (!skb)
			return poll_cnt;

		rx_queue->rx.stats.packets++;
		rx_queue->rx.stats.bytes += skb->len;

		ret = sn_process_rx_metadata(skb, &rx_meta);
		if (unlikely(ret)) {
			dev_kfree_skb(skb);
			continue;
		}

		skb_record_rx_queue(skb, rx_queue->queue_id);
		skb->protocol = eth_type_trans(skb, napi->dev);
#ifdef CONFIG_NET_RX_BUSY_POLL
		skb_mark_napi_id(skb, napi);
#endif

		netif_receive_skb(skb);

		poll_cnt++;
	}

	return poll_cnt;
}
Ejemplo n.º 25
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;

 restart_poll:
	do {
		struct sk_buff *skb;

		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); 
			adapter->rx_invalid_buffer++;
			ibmveth_debug_printk("recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);
			if (csum_good)
				skb->ip_summed = CHECKSUM_UNNECESSARY;

			ibmveth_rxq_harvest_buffer(adapter);

			skb_reserve(skb, offset);
			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			netif_receive_skb(skb);	

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	} while (frames_processed < budget);

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		ibmveth_assert(lpar_rc == H_SUCCESS);

		napi_complete(napi);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}
static void ri_tasklet(unsigned long dev)
{

	struct net_device *_dev = (struct net_device *)dev;
	struct ifb_private *dp = netdev_priv(_dev);
	struct net_device_stats *stats = &_dev->stats;
	struct netdev_queue *txq;
	struct sk_buff *skb;

	txq = netdev_get_tx_queue(_dev, 0);
	if ((skb = skb_peek(&dp->tq)) == NULL) {
		if (__netif_tx_trylock(txq)) {
			skb_queue_splice_tail_init(&dp->rq, &dp->tq);
			__netif_tx_unlock(txq);
		} else {
			/* reschedule */
			goto resched;
		}
	}

	while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
		u32 from = G_TC_FROM(skb->tc_verd);

		skb->tc_verd = 0;
		skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
		stats->tx_packets++;
		stats->tx_bytes +=skb->len;

		rcu_read_lock();
		skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif);
		if (!skb->dev) {
			rcu_read_unlock();
			dev_kfree_skb(skb);
			stats->tx_dropped++;
			if (skb_queue_len(&dp->tq) != 0)
				goto resched;
			break;
		}
		rcu_read_unlock();
		skb->skb_iif = _dev->ifindex;

		if (from & AT_EGRESS) {
			dev_queue_xmit(skb);
		} else if (from & AT_INGRESS) {
			skb_pull(skb, skb->dev->hard_header_len);
			netif_receive_skb(skb);
		} else
			BUG();
	}

	if (__netif_tx_trylock(txq)) {
		if ((skb = skb_peek(&dp->rq)) == NULL) {
			dp->tasklet_pending = 0;
			if (netif_queue_stopped(_dev))
				netif_wake_queue(_dev);
		} else {
			__netif_tx_unlock(txq);
			goto resched;
		}
		__netif_tx_unlock(txq);
	} else {
resched:
		dp->tasklet_pending = 1;
		tasklet_schedule(&dp->ifb_tasklet);
	}

}
Ejemplo n.º 27
0
/* NAPI receive function */
static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
{
	struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
	struct net_device *dev = fep->ndev;
	const struct fs_platform_info *fpi = fep->fpi;
	cbd_t __iomem *bdp;
	struct sk_buff *skb, *skbn, *skbt;
	int received = 0;
	u16 pkt_len, sc;
	int curidx;

	if (budget <= 0)
		return received;

	/*
	 * First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	/* clear RX status bits for napi*/
	(*fep->ops->napi_clear_rx_event)(dev);

	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
		curidx = bdp - fep->rx_bd_base;

		/*
		 * Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((sc & BD_ENET_RX_LAST) == 0)
			dev_warn(fep->dev, "rcv is not +last\n");

		/*
		 * Check for errors.
		 */
		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			fep->stats.rx_errors++;
			/* Frame too long or too short. */
			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
				fep->stats.rx_length_errors++;
			/* Frame alignment */
			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
				fep->stats.rx_frame_errors++;
			/* CRC Error */
			if (sc & BD_ENET_RX_CR)
				fep->stats.rx_crc_errors++;
			/* FIFO overrun */
			if (sc & BD_ENET_RX_OV)
				fep->stats.rx_crc_errors++;

			skb = fep->rx_skbuff[curidx];

			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
				DMA_FROM_DEVICE);

			skbn = skb;

		} else {
			skb = fep->rx_skbuff[curidx];

			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
				DMA_FROM_DEVICE);

			/*
			 * Process the incoming frame.
			 */
			fep->stats.rx_packets++;
			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */
			fep->stats.rx_bytes += pkt_len + 4;

			if (pkt_len <= fpi->rx_copybreak) {
				/* +2 to make IP header L1 cache aligned */
				skbn = netdev_alloc_skb(dev, pkt_len + 2);
				if (skbn != NULL) {
					skb_reserve(skbn, 2);	/* align IP header */
					skb_copy_from_linear_data(skb,
						      skbn->data, pkt_len);
					/* swap */
					skbt = skb;
					skb = skbn;
					skbn = skbt;
				}
			} else {
				skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);

				if (skbn)
					skb_align(skbn, ENET_RX_ALIGN);
			}

			if (skbn != NULL) {
				skb_put(skb, pkt_len);	/* Make room */
				skb->protocol = eth_type_trans(skb, dev);
				received++;
				netif_receive_skb(skb);
			} else {
				fep->stats.rx_dropped++;
				skbn = skb;
			}
		}

		fep->rx_skbuff[curidx] = skbn;
		CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
			     L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
			     DMA_FROM_DEVICE));
		CBDW_DATLEN(bdp, 0);
		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);

		/*
		 * Update BD pointer to next entry.
		 */
		if ((sc & BD_ENET_RX_WRAP) == 0)
			bdp++;
		else
			bdp = fep->rx_bd_base;

		(*fep->ops->rx_bd_done)(dev);

		if (received >= budget)
			break;
	}

	fep->cur_rx = bdp;

	if (received < budget) {
		/* done */
		napi_complete(napi);
		(*fep->ops->napi_enable_rx)(dev);
	}
	return received;
}
Ejemplo n.º 28
0
static int c_can_handle_bus_err(struct net_device *dev,
				enum c_can_lec_type lec_type)
{
	struct c_can_priv *priv = netdev_priv(dev);
	struct net_device_stats *stats = &dev->stats;
	struct can_frame *cf;
	struct sk_buff *skb;

	/*
	 * early exit if no lec update or no error.
	 * no lec update means that no CAN bus event has been detected
	 * since CPU wrote 0x7 value to status reg.
	 */
	if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
		return 0;

	/* propagate the error condition to the CAN stack */
	skb = alloc_can_err_skb(dev, &cf);
	if (unlikely(!skb))
		return 0;

	/*
	 * check for 'last error code' which tells us the
	 * type of the last error to occur on the CAN bus
	 */

	/* common for all type of bus errors */
	priv->can.can_stats.bus_error++;
	stats->rx_errors++;
	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
	cf->data[2] |= CAN_ERR_PROT_UNSPEC;

	switch (lec_type) {
	case LEC_STUFF_ERROR:
		netdev_dbg(dev, "stuff error\n");
		cf->data[2] |= CAN_ERR_PROT_STUFF;
		break;
	case LEC_FORM_ERROR:
		netdev_dbg(dev, "form error\n");
		cf->data[2] |= CAN_ERR_PROT_FORM;
		break;
	case LEC_ACK_ERROR:
		netdev_dbg(dev, "ack error\n");
		cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
				CAN_ERR_PROT_LOC_ACK_DEL);
		break;
	case LEC_BIT1_ERROR:
		netdev_dbg(dev, "bit1 error\n");
		cf->data[2] |= CAN_ERR_PROT_BIT1;
		break;
	case LEC_BIT0_ERROR:
		netdev_dbg(dev, "bit0 error\n");
		cf->data[2] |= CAN_ERR_PROT_BIT0;
		break;
	case LEC_CRC_ERROR:
		netdev_dbg(dev, "CRC error\n");
		cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
				CAN_ERR_PROT_LOC_CRC_DEL);
		break;
	default:
		break;
	}

	/* set a `lec` value so that we can check for updates later */
	priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);

	netif_receive_skb(skb);
	stats->rx_packets++;
	stats->rx_bytes += cf->can_dlc;

	return 1;
}
Ejemplo n.º 29
0
static int c_can_handle_state_change(struct net_device *dev,
				enum c_can_bus_error_types error_type)
{
	unsigned int reg_err_counter;
	unsigned int rx_err_passive;
	struct c_can_priv *priv = netdev_priv(dev);
	struct net_device_stats *stats = &dev->stats;
	struct can_frame *cf;
	struct sk_buff *skb;
	struct can_berr_counter bec;

	/* propagate the error condition to the CAN stack */
	skb = alloc_can_err_skb(dev, &cf);
	if (unlikely(!skb))
		return 0;

	c_can_get_berr_counter(dev, &bec);
	reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
	rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
				ERR_CNT_RP_SHIFT;

	switch (error_type) {
	case C_CAN_ERROR_WARNING:
		/* error warning state */
		priv->can.can_stats.error_warning++;
		priv->can.state = CAN_STATE_ERROR_WARNING;
		cf->can_id |= CAN_ERR_CRTL;
		cf->data[1] = (bec.txerr > bec.rxerr) ?
			CAN_ERR_CRTL_TX_WARNING :
			CAN_ERR_CRTL_RX_WARNING;
		cf->data[6] = bec.txerr;
		cf->data[7] = bec.rxerr;

		break;
	case C_CAN_ERROR_PASSIVE:
		/* error passive state */
		priv->can.can_stats.error_passive++;
		priv->can.state = CAN_STATE_ERROR_PASSIVE;
		cf->can_id |= CAN_ERR_CRTL;
		if (rx_err_passive)
			cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
		if (bec.txerr > 127)
			cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;

		cf->data[6] = bec.txerr;
		cf->data[7] = bec.rxerr;
		break;
	case C_CAN_BUS_OFF:
		/* bus-off state */
		priv->can.state = CAN_STATE_BUS_OFF;
		cf->can_id |= CAN_ERR_BUSOFF;
		/*
		 * disable all interrupts in bus-off mode to ensure that
		 * the CPU is not hogged down
		 */
		c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
		can_bus_off(dev);
		break;
	default:
		break;
	}

	netif_receive_skb(skb);
	stats->rx_packets++;
	stats->rx_bytes += cf->can_dlc;

	return 1;
}
Ejemplo n.º 30
0
static int ti_hecc_error(struct net_device *ndev, int int_status,
	int err_status)
{
	struct ti_hecc_priv *priv = netdev_priv(ndev);
	struct net_device_stats *stats = &ndev->stats;
	struct can_frame *cf;
	struct sk_buff *skb;

	/* propogate the error condition to the can stack */
	skb = alloc_can_err_skb(ndev, &cf);
	if (!skb) {
		if (printk_ratelimit())
			dev_err(priv->ndev->dev.parent,
				"ti_hecc_error: alloc_can_err_skb() failed\n");
		return -ENOMEM;
	}

	if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
		if ((int_status & HECC_CANGIF_BOIF) == 0) {
			priv->can.state = CAN_STATE_ERROR_WARNING;
			++priv->can.can_stats.error_warning;
			cf->can_id |= CAN_ERR_CRTL;
			if (hecc_read(priv, HECC_CANTEC) > 96)
				cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
			if (hecc_read(priv, HECC_CANREC) > 96)
				cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
		}
		hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
		dev_dbg(priv->ndev->dev.parent, "Error Warning interrupt\n");
		hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
	}

	if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
		if ((int_status & HECC_CANGIF_BOIF) == 0) {
			priv->can.state = CAN_STATE_ERROR_PASSIVE;
			++priv->can.can_stats.error_passive;
			cf->can_id |= CAN_ERR_CRTL;
			if (hecc_read(priv, HECC_CANTEC) > 127)
				cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
			if (hecc_read(priv, HECC_CANREC) > 127)
				cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
		}
		hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
		dev_dbg(priv->ndev->dev.parent, "Error passive interrupt\n");
		hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
	}

	/*
	 * Need to check busoff condition in error status register too to
	 * ensure warning interrupts don't hog the system
	 */
	if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
		priv->can.state = CAN_STATE_BUS_OFF;
		cf->can_id |= CAN_ERR_BUSOFF;
		hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
		hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
		/* Disable all interrupts in bus-off to avoid int hog */
		hecc_write(priv, HECC_CANGIM, 0);
		can_bus_off(ndev);
	}

	if (err_status & HECC_BUS_ERROR) {
		++priv->can.can_stats.bus_error;
		cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
		cf->data[2] |= CAN_ERR_PROT_UNSPEC;
		if (err_status & HECC_CANES_FE) {
			hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
			cf->data[2] |= CAN_ERR_PROT_FORM;
		}
		if (err_status & HECC_CANES_BE) {
			hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
			cf->data[2] |= CAN_ERR_PROT_BIT;
		}
		if (err_status & HECC_CANES_SE) {
			hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
			cf->data[2] |= CAN_ERR_PROT_STUFF;
		}
		if (err_status & HECC_CANES_CRCE) {
			hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
			cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
					CAN_ERR_PROT_LOC_CRC_DEL;
		}
		if (err_status & HECC_CANES_ACKE) {
			hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
			cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
					CAN_ERR_PROT_LOC_ACK_DEL;
		}
	}

	netif_receive_skb(skb);
	stats->rx_packets++;
	stats->rx_bytes += cf->can_dlc;
	return 0;
}