Ejemplo n.º 1
0
/**
 * dn_return_short - Return a short packet to its sender
 * @skb: The packet to return
 *
 */
static int dn_return_short(struct sk_buff *skb)
{
	struct dn_skb_cb *cb;
	unsigned char *ptr;
	__le16 *src;
	__le16 *dst;
	__le16 tmp;

	/* Add back headers */
	skb_push(skb, skb->data - skb->nh.raw);

	if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
		return NET_RX_DROP;

	cb = DN_SKB_CB(skb);
	/* Skip packet length and point to flags */
	ptr = skb->data + 2;
	*ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;

	dst = (__le16 *)ptr;
	ptr += 2;
	src = (__le16 *)ptr;
	ptr += 2;
	*ptr = 0; /* Zero hop count */

	/* Swap source and destination */
	tmp  = *src;
	*src = *dst;
	*dst = tmp;

	skb->pkt_type = PACKET_OUTGOING;
	dn_rt_finish_output(skb, NULL, NULL);
	return NET_RX_SUCCESS;
}
Ejemplo n.º 2
0
/**
 * @brief Protocol drivers Receive function
 * 
 * @param skb
 * @param dev
 * @param type
 * @param orig_dev
 * 
 * @return int
 */
int
__gmac_recv(struct sk_buff  *skb, struct net_device  *dev,
            struct packet_type  *type, struct net_device   *orig_dev)
{
    athhdr_t           *ath;     
    ethhdr_t           *eth;
    hif_gmac_node_t    *node = NULL;
    a_uint32_t          idx = 0;
    hif_gmac_softc_t   *sc = __gmac_pkt.af_packet_priv;

    if (skb_shared(skb)) 
        skb = skb_unshare(skb, GFP_ATOMIC);

    eth = eth_hdr(skb); /* Linux specific */
    ath = ath_hdr(skb);

    /* printk("%s\n",__func__); */
    /**
     * Query MAC Table
     */
    node = __gmac_qry_tbl(eth->h_source);
    
    idx = ((node->hdr.ath.type.proto == ath->type.proto) &&
            chk_ucast(eth->h_dest));

    sc->recv_fn[idx](skb, node, ath->type.proto);

    return 0;
}
Ejemplo n.º 3
0
static int qinq_rcv(struct sk_buff *skb, struct net_device *dev,
                    struct packet_type *pt, struct net_device *orig_dev)
{
    struct dsa_switch_tree *dst;
    struct dsa_switch *ds;
    struct vlan_hdr *vhdr;
    int source_port;

    dst = dev->dsa_ptr;
    if (unlikely(dst == NULL))
        goto out_drop;
    ds = dst->ds[0];

    skb = skb_unshare(skb, GFP_ATOMIC);
    if (skb == NULL)
        goto out;

    if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
        goto out_drop;

    vhdr = (struct vlan_hdr *)skb->data;
    source_port = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
    if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL)
        goto out_drop;

    /* Remove the outermost VLAN tag and update checksum. */
    skb_pull_rcsum(skb, VLAN_HLEN);
    memmove(skb->data - ETH_HLEN,
            skb->data - ETH_HLEN - VLAN_HLEN,
            2 * ETH_ALEN);

    skb->dev = ds->ports[source_port];
    skb_push(skb, ETH_HLEN);
    skb->pkt_type = PACKET_HOST;
    skb->protocol = eth_type_trans(skb, skb->dev);

    skb->dev->stats.rx_packets++;
    skb->dev->stats.rx_bytes += skb->len;

    netif_receive_skb(skb);

    return 0;

out_drop:
    kfree_skb(skb);
out:
    return 0;
}
Ejemplo n.º 4
0
static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
		       struct packet_type *pt, struct net_device *orig_dev)
{
	struct dsa_switch_tree *dst = dev->dsa_ptr;
	struct dsa_switch *ds;
	u8 *trailer;
	int source_port;

	if (unlikely(dst == NULL))
		goto out_drop;
	ds = dst->ds[0];

	skb = skb_unshare(skb, GFP_ATOMIC);
	if (skb == NULL)
		goto out;

	if (skb_linearize(skb))
		goto out_drop;

	trailer = skb_tail_pointer(skb) - 4;
	if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 ||
	    (trailer[3] & 0xef) != 0x00 || trailer[3] != 0x00)
		goto out_drop;

	source_port = trailer[1] & 7;
	if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL)
		goto out_drop;

	pskb_trim_rcsum(skb, skb->len - 4);

	skb->dev = ds->ports[source_port];
	skb_push(skb, ETH_HLEN);
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, skb->dev);

	skb->dev->stats.rx_packets++;
	skb->dev->stats.rx_bytes += skb->len;

	netif_receive_skb(skb);

	return 0;

out_drop:
	kfree_skb(skb);
out:
	return 0;
}
Ejemplo n.º 5
0
static int spx_route_skb(struct spx_opt *pdata, struct sk_buff *skb, int type)
{
	struct sk_buff *skb2;
	int err = 0;

	skb = skb_unshare(skb, GFP_ATOMIC);
	if(skb == NULL)
		return (-ENOBUFS);

	switch(type)
	{
		case (CONREQ):
		case (DATA):
			if(!skb_queue_empty(&pdata->retransmit_queue))
			{
				skb_queue_tail(&pdata->transmit_queue, skb);
				return 0;
			}

		case (TQUEUE):
			pdata->retransmit.expires = jiffies + spx_calc_rtt(0);
			add_timer(&pdata->retransmit);

			skb2 = skb_clone(skb, GFP_BUFFER);
	                if(skb2 == NULL)
        	                return -ENOBUFS;
        	        skb_queue_tail(&pdata->retransmit_queue, skb2);

		case (ACK):
		case (CONACK):
		case (WDREQ):
		case (WDACK):
		case (DISCON):
		case (DISACK):
		case (RETRAN):
		default:
			/* Send data */
        		err = ipxrtr_route_skb(skb);
        		if(err)
                		kfree_skb(skb);
	}

	return (err);
}
Ejemplo n.º 6
0
static int
usb_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
	int ret;
	unsigned long flags;

	if (next_tx_skb) {
		printk("%s: called with next_tx_skb != NULL\n", __FUNCTION__);
		return 1;
	}

	if (skb_shared (skb)) {
		struct sk_buff  *skb2 = skb_unshare(skb, GFP_ATOMIC);
		if (!skb2) {
			usbe_info.stats.tx_dropped++;
			dev_kfree_skb(skb);
			return 1;
		}
		skb = skb2;
	}

	if ((skb->len % usb_wsize) == 0) {
		skb->len++; // other side will ignore this one, anyway.
	}

	local_irq_save(flags);
	if (cur_tx_skb) {
		next_tx_skb = skb;
		netif_stop_queue(dev);
	} else {
		cur_tx_skb = skb;
		dev->trans_start = jiffies;
		ret = sa1100_usb_send(skb->data, skb->len, usb_send_callback);
		if (ret) {
			/* If the USB core can't accept the packet, we drop it. */
			dev_kfree_skb(skb);
			cur_tx_skb = NULL;
			usbe_info.stats.tx_carrier_errors++;
		}
	}
	local_irq_restore(flags);
	return 0;
}
Ejemplo n.º 7
0
/**
 * dn_return_long - Return a long packet to its sender
 * @skb: The long format packet to return
 *
 */
static int dn_return_long(struct sk_buff *skb)
{
	struct dn_skb_cb *cb;
	unsigned char *ptr;
	unsigned char *src_addr, *dst_addr;
	unsigned char tmp[ETH_ALEN];

	/* Add back all headers */
	skb_push(skb, skb->data - skb->nh.raw);

	if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
		return NET_RX_DROP;

	cb = DN_SKB_CB(skb);
	/* Ignore packet length and point to flags */
	ptr = skb->data + 2;

	/* Skip padding */
	if (*ptr & DN_RT_F_PF) {
		char padlen = (*ptr & ~DN_RT_F_PF);
		ptr += padlen;
	}

	*ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
	ptr += 2;
	dst_addr = ptr;
	ptr += 8;
	src_addr = ptr;
	ptr += 6;
	*ptr = 0; /* Zero hop count */

	/* Swap source and destination */
	memcpy(tmp, src_addr, ETH_ALEN);
	memcpy(src_addr, dst_addr, ETH_ALEN);
	memcpy(dst_addr, tmp, ETH_ALEN);

	skb->pkt_type = PACKET_OUTGOING;
	dn_rt_finish_output(skb, dst_addr, src_addr);
	return NET_RX_SUCCESS;
}
Ejemplo n.º 8
0
/* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta)
 * Convert Ethernet header into a suitable IEEE 802.11 header depending on
 * device configuration. */
netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
				   struct net_device *dev)
{
	struct hostap_interface *iface;
	local_info_t *local;
	int need_headroom, need_tailroom = 0;
	struct ieee80211_hdr hdr;
	u16 fc, ethertype = 0;
	enum {
		WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME
	} use_wds = WDS_NO;
	u8 *encaps_data;
	int hdr_len, encaps_len, skip_header_bytes;
	int to_assoc_ap = 0;
	struct hostap_skb_tx_data *meta;

	iface = netdev_priv(dev);
	local = iface->local;

	if (skb->len < ETH_HLEN) {
		printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb "
		       "(len=%d)\n", dev->name, skb->len);
		kfree_skb(skb);
		return NETDEV_TX_OK;
	}

	if (local->ddev != dev) {
		use_wds = (local->iw_mode == IW_MODE_MASTER &&
			   !(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ?
			WDS_OWN_FRAME : WDS_COMPLIANT_FRAME;
		if (dev == local->stadev) {
			to_assoc_ap = 1;
			use_wds = WDS_NO;
		} else if (dev == local->apdev) {
			printk(KERN_DEBUG "%s: prism2_tx: trying to use "
			       "AP device with Ethernet net dev\n", dev->name);
			kfree_skb(skb);
			return NETDEV_TX_OK;
		}
	} else {
		if (local->iw_mode == IW_MODE_REPEAT) {
			printk(KERN_DEBUG "%s: prism2_tx: trying to use "
			       "non-WDS link in Repeater mode\n", dev->name);
			kfree_skb(skb);
			return NETDEV_TX_OK;
		} else if (local->iw_mode == IW_MODE_INFRA &&
			   (local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
			   !ether_addr_equal(skb->data + ETH_ALEN, dev->dev_addr)) {
			/* AP client mode: send frames with foreign src addr
			 * using 4-addr WDS frames */
			use_wds = WDS_COMPLIANT_FRAME;
		}
	}

	/* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload
	 * ==>
	 * Prism2 TX frame with 802.11 header:
	 * txdesc (address order depending on used mode; includes dst_addr and
	 * src_addr), possible encapsulation (RFC1042/Bridge-Tunnel;
	 * proto[2], payload {, possible addr4[6]} */

	ethertype = (skb->data[12] << 8) | skb->data[13];

	memset(&hdr, 0, sizeof(hdr));

	/* Length of data after IEEE 802.11 header */
	encaps_data = NULL;
	encaps_len = 0;
	skip_header_bytes = ETH_HLEN;
	if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
		encaps_data = bridge_tunnel_header;
		encaps_len = sizeof(bridge_tunnel_header);
		skip_header_bytes -= 2;
	} else if (ethertype >= 0x600) {
		encaps_data = rfc1042_header;
		encaps_len = sizeof(rfc1042_header);
		skip_header_bytes -= 2;
	}

	fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
	hdr_len = IEEE80211_DATA_HDR3_LEN;

	if (use_wds != WDS_NO) {
		/* Note! Prism2 station firmware has problems with sending real
		 * 802.11 frames with four addresses; until these problems can
		 * be fixed or worked around, 4-addr frames needed for WDS are
		 * using incompatible format: FromDS flag is not set and the
		 * fourth address is added after the frame payload; it is
		 * assumed, that the receiving station knows how to handle this
		 * frame format */

		if (use_wds == WDS_COMPLIANT_FRAME) {
			fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
			/* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA,
			 * Addr4 = SA */
			skb_copy_from_linear_data_offset(skb, ETH_ALEN,
							 &hdr.addr4, ETH_ALEN);
			hdr_len += ETH_ALEN;
		} else {
			/* bogus 4-addr format to workaround Prism2 station
			 * f/w bug */
			fc |= IEEE80211_FCTL_TODS;
			/* From DS: Addr1 = DA (used as RA),
			 * Addr2 = BSSID (used as TA), Addr3 = SA (used as DA),
			 */

			/* SA from skb->data + ETH_ALEN will be added after
			 * frame payload; use hdr.addr4 as a temporary buffer
			 */
			skb_copy_from_linear_data_offset(skb, ETH_ALEN,
							 &hdr.addr4, ETH_ALEN);
			need_tailroom += ETH_ALEN;
		}

		/* send broadcast and multicast frames to broadcast RA, if
		 * configured; otherwise, use unicast RA of the WDS link */
		if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) &&
		    is_multicast_ether_addr(skb->data))
			eth_broadcast_addr(hdr.addr1);
		else if (iface->type == HOSTAP_INTERFACE_WDS)
			memcpy(&hdr.addr1, iface->u.wds.remote_addr,
			       ETH_ALEN);
		else
			memcpy(&hdr.addr1, local->bssid, ETH_ALEN);
		memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
		skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
	} else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) {
		fc |= IEEE80211_FCTL_FROMDS;
		/* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */
		skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
		memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3,
						 ETH_ALEN);
	} else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) {
		fc |= IEEE80211_FCTL_TODS;
		/* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
		memcpy(&hdr.addr1, to_assoc_ap ?
		       local->assoc_ap_addr : local->bssid, ETH_ALEN);
		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
						 ETH_ALEN);
		skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
	} else if (local->iw_mode == IW_MODE_ADHOC) {
		/* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
		skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
						 ETH_ALEN);
		memcpy(&hdr.addr3, local->bssid, ETH_ALEN);
	}

	hdr.frame_control = cpu_to_le16(fc);

	skb_pull(skb, skip_header_bytes);
	need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len;
	if (skb_tailroom(skb) < need_tailroom) {
		skb = skb_unshare(skb, GFP_ATOMIC);
		if (skb == NULL) {
			iface->stats.tx_dropped++;
			return NETDEV_TX_OK;
		}
		if (pskb_expand_head(skb, need_headroom, need_tailroom,
				     GFP_ATOMIC)) {
			kfree_skb(skb);
			iface->stats.tx_dropped++;
			return NETDEV_TX_OK;
		}
	} else if (skb_headroom(skb) < need_headroom) {
		struct sk_buff *tmp = skb;
		skb = skb_realloc_headroom(skb, need_headroom);
		kfree_skb(tmp);
		if (skb == NULL) {
			iface->stats.tx_dropped++;
			return NETDEV_TX_OK;
		}
	} else {
		skb = skb_unshare(skb, GFP_ATOMIC);
		if (skb == NULL) {
			iface->stats.tx_dropped++;
			return NETDEV_TX_OK;
		}
	}

	if (encaps_data)
		memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
	memcpy(skb_push(skb, hdr_len), &hdr, hdr_len);
	if (use_wds == WDS_OWN_FRAME) {
		memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN);
	}

	iface->stats.tx_packets++;
	iface->stats.tx_bytes += skb->len;

	skb_reset_mac_header(skb);
	meta = (struct hostap_skb_tx_data *) skb->cb;
	memset(meta, 0, sizeof(*meta));
	meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
	if (use_wds)
		meta->flags |= HOSTAP_TX_FLAGS_WDS;
	meta->ethertype = ethertype;
	meta->iface = iface;

	/* Send IEEE 802.11 encapsulated frame using the master radio device */
	skb->dev = local->dev;
	dev_queue_xmit(skb);
	return NETDEV_TX_OK;
}
Ejemplo n.º 9
0
/* Called only from software IRQ */
static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
					  struct lib80211_crypt_data *crypt)
{
	struct hostap_interface *iface;
	local_info_t *local;
	struct ieee80211_hdr *hdr;
	int prefix_len, postfix_len, hdr_len, res;

	iface = netdev_priv(skb->dev);
	local = iface->local;

	if (skb->len < IEEE80211_DATA_HDR3_LEN) {
		kfree_skb(skb);
		return NULL;
	}

	if (local->tkip_countermeasures &&
	    strcmp(crypt->ops->name, "TKIP") == 0) {
		hdr = (struct ieee80211_hdr *) skb->data;
		if (net_ratelimit()) {
			printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
			       "TX packet to %pM\n",
			       local->dev->name, hdr->addr1);
		}
		kfree_skb(skb);
		return NULL;
	}

	skb = skb_unshare(skb, GFP_ATOMIC);
	if (skb == NULL)
		return NULL;

	prefix_len = crypt->ops->extra_mpdu_prefix_len +
		crypt->ops->extra_msdu_prefix_len;
	postfix_len = crypt->ops->extra_mpdu_postfix_len +
		crypt->ops->extra_msdu_postfix_len;
	if ((skb_headroom(skb) < prefix_len ||
	     skb_tailroom(skb) < postfix_len) &&
	    pskb_expand_head(skb, prefix_len, postfix_len, GFP_ATOMIC)) {
		kfree_skb(skb);
		return NULL;
	}

	hdr = (struct ieee80211_hdr *) skb->data;
	hdr_len = hostap_80211_get_hdrlen(hdr->frame_control);

	/* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
	 * call both MSDU and MPDU encryption functions from here. */
	atomic_inc(&crypt->refcnt);
	res = 0;
	if (crypt->ops->encrypt_msdu)
		res = crypt->ops->encrypt_msdu(skb, hdr_len, crypt->priv);
	if (res == 0 && crypt->ops->encrypt_mpdu)
		res = crypt->ops->encrypt_mpdu(skb, hdr_len, crypt->priv);
	atomic_dec(&crypt->refcnt);
	if (res < 0) {
		kfree_skb(skb);
		return NULL;
	}

	return skb;
}
Ejemplo n.º 10
0
/*
 * Insert one skb into qdisc.
 * Note: parent depends on return value to account for queue length.
 * 	NET_XMIT_DROP: queue length didn't change.
 *      NET_XMIT_SUCCESS: one skb was queued.
 */
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	/* We don't fill cb now as skb_unshare() may invalidate it */
	struct netem_skb_cb *cb;
	struct sk_buff *skb2;
	int ret;
	int count = 1;

	pr_debug("netem_enqueue skb=%p\n", skb);

	/* Random duplication */
	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
		++count;

	/* Random packet drop 0 => none, ~0 => all */
	if (q->loss && q->loss >= get_crandom(&q->loss_cor))
		--count;

	if (count == 0) {
		sch->qstats.drops++;
		kfree_skb(skb);
		return NET_XMIT_BYPASS;
	}

	skb_orphan(skb);

	/*
	 * If we need to duplicate packet, then re-insert at top of the
	 * qdisc tree, since parent queuer expects that only one
	 * skb will be queued.
	 */
	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
		struct Qdisc *rootq = sch->dev->qdisc;
		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
		q->duplicate = 0;

		rootq->enqueue(skb2, rootq);
		q->duplicate = dupsave;
	}

	/*
	 * Randomized packet corruption.
	 * Make copy if needed since we are modifying
	 * If packet is going to be hardware checksummed, then
	 * do it now in software before we mangle it.
	 */
	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
		if (!(skb = skb_unshare(skb, GFP_ATOMIC))
		    || (skb->ip_summed == CHECKSUM_PARTIAL
			&& skb_checksum_help(skb))) {
			sch->qstats.drops++;
			return NET_XMIT_DROP;
		}

		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
	}

	cb = (struct netem_skb_cb *)skb->cb;
	if (q->gap == 0 		/* not doing reordering */
	    || q->counter < q->gap 	/* inside last reordering gap */
	    || q->reorder < get_crandom(&q->reorder_cor)) {
		psched_time_t now;
		psched_tdiff_t delay;

		delay = tabledist(q->latency, q->jitter,
				  &q->delay_cor, q->delay_dist);

		now = psched_get_time();
		cb->time_to_send = now + delay;
		++q->counter;
		ret = q->qdisc->enqueue(skb, q->qdisc);
	} else {
		/*
		 * Do re-ordering by putting one out of N packets at the front
		 * of the queue.
		 */
		cb->time_to_send = psched_get_time();
		q->counter = 0;
		ret = q->qdisc->ops->requeue(skb, q->qdisc);
	}

	if (likely(ret == NET_XMIT_SUCCESS)) {
		sch->q.qlen++;
		sch->bstats.bytes += skb->len;
		sch->bstats.packets++;
	} else
		sch->qstats.drops++;

	pr_debug("netem: enqueue ret %d\n", ret);
	return ret;
}
Ejemplo n.º 11
0
/*
 * Insert one skb into qdisc.
 * Note: parent depends on return value to account for queue length.
 * 	NET_XMIT_DROP: queue length didn't change.
 *      NET_XMIT_SUCCESS: one skb was queued.
 */
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	/* We don't fill cb now as skb_unshare() may invalidate it */
	struct netem_skb_cb *cb;
	struct sk_buff *skb2;
	int count = 1;

	/* Random duplication */
	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
		++count;

	/* Drop packet? */
	if (loss_event(q))
		--count;

	if (count == 0) {
		sch->qstats.drops++;
		kfree_skb(skb);
		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
	}

	skb_orphan(skb);

	/*
	 * If we need to duplicate packet, then re-insert at top of the
	 * qdisc tree, since parent queuer expects that only one
	 * skb will be queued.
	 */
	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
		struct Qdisc *rootq = qdisc_root(sch);
		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
		q->duplicate = 0;

		qdisc_enqueue_root(skb2, rootq);
		q->duplicate = dupsave;
	}

	/*
	 * Randomized packet corruption.
	 * Make copy if needed since we are modifying
	 * If packet is going to be hardware checksummed, then
	 * do it now in software before we mangle it.
	 */
	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
		    (skb->ip_summed == CHECKSUM_PARTIAL &&
		     skb_checksum_help(skb)))
			return qdisc_drop(skb, sch);

		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
	}

	if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
		return qdisc_reshape_fail(skb, sch);

	sch->qstats.backlog += qdisc_pkt_len(skb);

	cb = netem_skb_cb(skb);
	if (q->gap == 0 ||		/* not doing reordering */
	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
	    q->reorder < get_crandom(&q->reorder_cor)) {
		psched_time_t now;
		psched_tdiff_t delay;

		delay = tabledist(q->latency, q->jitter,
				  &q->delay_cor, q->delay_dist);

		now = psched_get_time();

		if (q->rate) {
			struct sk_buff_head *list = &sch->q;

			delay += packet_len_2_sched_time(skb->len, q);

			if (!skb_queue_empty(list)) {
				/*
				 * Last packet in queue is reference point (now).
				 * First packet in queue is already in flight,
				 * calculate this time bonus and substract
				 * from delay.
				 */
				delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
				now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
			}
		}

		cb->time_to_send = now + delay;
		++q->counter;
		tfifo_enqueue(skb, sch);
	} else {
		/*
		 * Do re-ordering by putting one out of N packets at the front
		 * of the queue.
		 */
		cb->time_to_send = psched_get_time();
		q->counter = 0;

		__skb_queue_head(&sch->q, skb);
		sch->qstats.requeues++;
	}

	return NET_XMIT_SUCCESS;
}
Ejemplo n.º 12
0
int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct net_device_stats *stats = vlan_dev_get_stats(dev);
	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);

	/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
	 *
	 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
	 */

	if (veth->h_vlan_proto != __constant_htons(ETH_P_8021Q)) {
		unsigned short veth_TCI;

		/* This is not a VLAN frame...but we can fix that! */
		VLAN_DEV_INFO(dev)->cnt_encap_on_xmit++;

#ifdef VLAN_DEBUG
		printk(VLAN_DBG "%s: proto to encap: 0x%hx (hbo)\n",
			__FUNCTION__, htons(veth->h_vlan_proto));
#endif

		if (skb_headroom(skb) < VLAN_HLEN) {
			struct sk_buff *sk_tmp = skb;
			skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN);
			kfree_skb(sk_tmp);
			if (skb == NULL) {
				stats->tx_dropped++;
				return 0;
			}
			VLAN_DEV_INFO(dev)->cnt_inc_headroom_on_tx++;
		} else {
			if (!(skb = skb_unshare(skb, GFP_ATOMIC))) {
				printk(KERN_ERR "vlan: failed to unshare skbuff\n");
				stats->tx_dropped++;
				return 0;
			}
		}
		veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);

		/* Move the mac addresses to the beginning of the new header. */
		memmove(skb->data, skb->data + VLAN_HLEN, 12);

		/* first, the ethernet type */
		/* put_unaligned(__constant_htons(ETH_P_8021Q), &veth->h_vlan_proto); */
		veth->h_vlan_proto = __constant_htons(ETH_P_8021Q);

		/* Now, construct the second two bytes. This field looks something
		 * like:
		 * usr_priority: 3 bits	 (high bits)
		 * CFI		 1 bit
		 * VLAN ID	 12 bits (low bits)
		 */
		veth_TCI = VLAN_DEV_INFO(dev)->vlan_id;
		veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);

		veth->h_vlan_TCI = htons(veth_TCI);
	}

	skb->dev = VLAN_DEV_INFO(dev)->real_dev;

#ifdef VLAN_DEBUG
	printk(VLAN_DBG "%s: about to send skb: %p to dev: %s\n",
		__FUNCTION__, skb, skb->dev->name);
	printk(VLAN_DBG "  %2hx.%2hx.%2hx.%2xh.%2hx.%2hx %2hx.%2hx.%2hx.%2hx.%2hx.%2hx %4hx %4hx %4hx\n",
	       veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], veth->h_dest[3], veth->h_dest[4], veth->h_dest[5],
	       veth->h_source[0], veth->h_source[1], veth->h_source[2], veth->h_source[3], veth->h_source[4], veth->h_source[5],
	       veth->h_vlan_proto, veth->h_vlan_TCI, veth->h_vlan_encapsulated_proto);
#endif

	stats->tx_packets++; /* for statics only */
	stats->tx_bytes += skb->len;

	dev_queue_xmit(skb);

	return 0;
}