Example #1
0
static inline int serval_ip_local_out(struct sk_buff *skb)
{
        int err;
        
#if defined(OS_LINUX_KERNEL)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25))
	err = ip_local_out(skb);
#else
        struct iphdr *iph = ip_hdr(skb);
        
        iph->tot_len = htons(skb->len);
	ip_send_check(iph);

        err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, skb->dst->dev,
                      dst_output);
#endif
#else /* OS_USER */
       
        /* Calculate checksum */
        ip_send_check(ip_hdr(skb));

        err = dev_queue_xmit(skb);
#endif

        if (err < 0) {
		LOG_ERR("packet_xmit failed err=%d\n", err);
	}

        return err;
}
Example #2
0
/* Transmit routine used by generic_netmap_txsync(). Returns 0 on success
   and -1 on error (which may be packet drops or other errors). */
int generic_xmit_frame(struct ifnet *ifp, struct mbuf *m,
	void *addr, u_int len, u_int ring_nr)
{
    netdev_tx_t ret;

    /* Empty the sk_buff. */
    if (unlikely(skb_headroom(m)))
	skb_push(m, skb_headroom(m));
    skb_trim(m, 0);

    /* TODO Support the slot flags (NS_MOREFRAG, NS_INDIRECT). */
    skb_copy_to_linear_data(m, addr, len); // skb_store_bits(m, 0, addr, len);
    skb_put(m, len);
    NM_ATOMIC_INC(&m->users);
    m->dev = ifp;
    /* Tell generic_ndo_start_xmit() to pass this mbuf to the driver. */
    m->priority = NM_MAGIC_PRIORITY_TX;
    skb_set_queue_mapping(m, ring_nr);

    ret = dev_queue_xmit(m);

    if (likely(ret == NET_XMIT_SUCCESS)) {
        return 0;
    }
    if (unlikely(ret != NET_XMIT_DROP)) {
        /* If something goes wrong in the TX path, there is nothing
           intelligent we can do (for now) apart from error reporting. */
        RD(5, "dev_queue_xmit failed: HARD ERROR %d", ret);
    }
    return -1;
}
Example #3
0
void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
{
	unsigned char *dptr;

	skb_reset_network_header(skb);

	switch (nb->dev->type) {
		case ARPHRD_X25:
			dptr  = skb_push(skb, 1);
			*dptr = 0x00;
			break;

#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
		case ARPHRD_ETHER:
			kfree_skb(skb);
			return;
#endif
		default:
			kfree_skb(skb);
			return;
	}

	skb->protocol = htons(ETH_P_X25);
	skb->dev      = nb->dev;

	dev_queue_xmit(skb);
}
Example #4
0
static void cisco_keepalive_send(struct net_device *dev, u32 type,
				 u32 par1, u32 par2)
{
	struct sk_buff *skb;
	cisco_packet *data;

	skb = dev_alloc_skb(sizeof(hdlc_header) + sizeof(cisco_packet));
	if (!skb) {
		printk(KERN_WARNING
		       "%s: Memory squeeze on cisco_keepalive_send()\n",
		       dev->name);
		return;
	}
	skb_reserve(skb, 4);
	cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
	data = (cisco_packet*)skb->tail;

	data->type = htonl(type);
	data->par1 = htonl(par1);
	data->par2 = htonl(par2);
	data->rel = 0xFFFF;
	/* we will need do_div here if 1000 % HZ != 0 */
	data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));

	skb_put(skb, sizeof(cisco_packet));
	skb->priority = TC_PRIO_CONTROL;
	skb->dev = dev;
	skb->nh.raw = skb->data;

	dev_queue_xmit(skb);
}
Example #5
0
static void
alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
{
	struct sk_buff *skb = NULL;
	struct learning_pkt pkt;
	char *data = NULL;
	int i;
	unsigned int size = sizeof(struct learning_pkt);

	memset(&pkt, 0, size);
	memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
	memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
	pkt.type = __constant_htons(ETH_P_LOOP);

	for (i=0; i < MAX_LP_RETRY; i++) {
		skb = NULL;
		skb = dev_alloc_skb(size);
		if (!skb) {
			return;
		}

		data = skb_put(skb, size);
		memcpy(data, &pkt, size);
		skb->mac.raw = data;
		skb->nh.raw = data + ETH_HLEN;
		skb->protocol = pkt.type;
		skb->priority = TC_PRIO_CONTROL;
		skb->dev = slave->dev;
		dev_queue_xmit(skb);
	}

}
Example #6
0
static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct dsa_slave_priv *p = netdev_priv(dev);
	struct sk_buff *nskb;

	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;

	/* Transmit function may have to reallocate the original SKB */
	nskb = p->xmit(skb, dev);
	if (!nskb)
		return NETDEV_TX_OK;

	/* SKB for netpoll still need to be mangled with the protocol-specific
	 * tag to be successfully transmitted
	 */
	if (unlikely(netpoll_tx_running(dev)))
		return dsa_netpoll_send_skb(p, nskb);

	/* Queue the SKB for transmission on the parent interface, but
	 * do not modify its EtherType
	 */
	nskb->dev = p->parent->dst->master_netdev;
	dev_queue_xmit(nskb);

	return NETDEV_TX_OK;
}
Example #7
0
/* TODO: only support linux for now */
void wlan_offchan_send_data_frame(struct ieee80211_node *ni, struct net_device *netdev)
{
#if defined(LINUX) || defined(__linux__)
    struct ieee80211vap *vap = ni->ni_vap;
    struct ieee80211com *ic = ni->ni_ic;
    wbuf_t wbuf;
    struct ieee80211_qosframe *qwh;
    const u_int8_t dst[6] = {0x00, 0x02, 0x03, 0x04, 0x05, 0x06};
    struct sk_buff *skb;

    wbuf = wbuf_alloc(ic->ic_osdev, WBUF_TX_DATA, 1000);
    if (wbuf == NULL)
    {
        return ;
    }
    ieee80211_prepare_qosnulldata(ni, wbuf, WME_AC_VO);

    qwh = (struct ieee80211_qosframe *)wbuf_header(wbuf);
    ieee80211_send_setup(vap, ni, (struct ieee80211_frame *)qwh,
        IEEE80211_FC0_TYPE_DATA,
        vap->iv_myaddr, /* SA */
        dst,            /* DA */
        ni->ni_bssid);

    wbuf_set_pktlen(wbuf, 1000);
    /* force with NONPAUSE_TID */
    wbuf_set_tid(wbuf, OFFCHAN_EXT_TID_NONPAUSE);

    skb = (struct sk_buff *)wbuf;
    skb->dev = netdev;

    dev_queue_xmit(skb);
#endif
}
Example #8
0
netdev_tx_t
vhost_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct vhost_priv *vp;
    struct vr_interface *vifp;
    struct net_device *pdev;

    vp = netdev_priv(dev);
    vifp = vp->vp_vifp;
    if (!vifp) {
        if (!(pdev = vp->vp_phys_dev)) {
            (void)__sync_fetch_and_add(&dev->stats.tx_dropped, 1);
            kfree_skb(skb);
            return NETDEV_TX_OK;
        }

        skb->dev = pdev;
        dev_queue_xmit(skb);
    } else {
        linux_to_vr(vifp, skb);
    }

    (void)__sync_fetch_and_add(&dev->stats.tx_packets, 1);
    (void)__sync_fetch_and_add(&dev->stats.tx_bytes, skb->len);

    return NETDEV_TX_OK;
}
Example #9
0
int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
{
	u8 mac_da[ETH_ALEN], mac_sa[ETH_ALEN], dsap;
	struct sk_buff *nskb;
	int rc = 1;
	u32 data_size;

	llc_pdu_decode_sa(skb, mac_da);
	llc_pdu_decode_da(skb, mac_sa);
	llc_pdu_decode_ssap(skb, &dsap);

	/* The test request command is type U (llc_len = 3) */
	data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
	nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
	if (!nskb)
		goto out;
	llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap,
			    LLC_PDU_RSP);
	llc_pdu_init_as_test_rsp(nskb, skb);
	rc = llc_mac_hdr_init(nskb, mac_sa, mac_da);
	if (likely(!rc))
		rc = dev_queue_xmit(nskb);
out:
	return rc;
}
Example #10
0
void x25_terminate_link(struct x25_neigh *nb)
{
	struct sk_buff *skb;
	unsigned char *ptr;

#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
	if (nb->dev->type == ARPHRD_ETHER)
		return;
#endif
	if (nb->dev->type != ARPHRD_X25)
		return;

	skb = alloc_skb(1, GFP_ATOMIC);
	if (!skb) {
		printk(KERN_ERR "x25_dev: out of memory\n");
		return;
	}

	ptr  = skb_put(skb, 1);
	*ptr = X25_IFACE_DISCONNECT;

	skb->protocol = htons(ETH_P_X25);
	skb->dev      = nb->dev;
	dev_queue_xmit(skb);
}
Example #11
0
static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
						    struct net_device *dev)
{
	int i = skb_get_queue_mapping(skb);
	struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
	u16 vlan_tci;
	unsigned int len;
	int ret;

	vlan_tci = vlan_dev_info(dev)->vlan_id;
	vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
	skb = __vlan_hwaccel_put_tag(skb, vlan_tci);

	skb->dev = vlan_dev_info(dev)->real_dev;
	len = skb->len;
	ret = dev_queue_xmit(skb);

	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
		txq->tx_packets++;
		txq->tx_bytes += len;
	} else
		txq->tx_dropped++;

	return ret;
}
static void cisco_keepalive_send(struct net_device *dev, u32 type,
				 __be32 par1, __be32 par2)
{
	struct sk_buff *skb;
	struct cisco_packet *data;

	skb = dev_alloc_skb(sizeof(struct hdlc_header) +
			    sizeof(struct cisco_packet));
	if (!skb) {
		netdev_warn(dev, "Memory squeeze on cisco_keepalive_send()\n");
		return;
	}
	skb_reserve(skb, 4);
	cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
	data = (struct cisco_packet*)(skb->data + 4);

	data->type = htonl(type);
	data->par1 = par1;
	data->par2 = par2;
	data->rel = cpu_to_be16(0xFFFF);
	
	data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));

	skb_put(skb, sizeof(struct cisco_packet));
	skb->priority = TC_PRIO_CONTROL;
	skb->dev = dev;
	skb_reset_network_header(skb);

	dev_queue_xmit(skb);
}
Example #13
0
static int __dev_queue_push_xmit(struct sk_buff *skb)
{
	skb_push(skb, ETH_HLEN);
	dev_queue_xmit(skb);

	return 0;
}
Example #14
0
static void cisco_keepalive_send(hdlc_device *hdlc, u32 type,
				 u32 par1, u32 par2)
{
	struct sk_buff *skb;
	cisco_packet *data;

	skb = dev_alloc_skb(sizeof(hdlc_header)+sizeof(cisco_packet));
	if (!skb) {
		printk(KERN_WARNING "%s: Memory squeeze on cisco_keepalive_send()\n",
			       hdlc_to_name(hdlc));
		return;
	}
	skb_reserve(skb, 4);
	cisco_hard_header(skb, hdlc_to_dev(hdlc), CISCO_KEEPALIVE,
			  NULL, NULL, 0);
	data = (cisco_packet*)skb->tail;

	data->type = htonl(type);
	data->par1 = htonl(par1);
	data->par2 = htonl(par2);
	data->rel = 0xFFFF;
	data->time = htonl(jiffies * 1000 / HZ);

	skb_put(skb, sizeof(cisco_packet));
	skb->priority = TC_PRIO_CONTROL;
	skb->dev = hdlc_to_dev(hdlc);

	dev_queue_xmit(skb);
}
Example #15
0
/**
 * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
 * @skb: the packet to be sent
 * @b: the bearer through which the packet is to be sent
 * @dest: peer destination address
 */
int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
		     struct tipc_bearer *b, struct tipc_media_addr *dest)
{
	struct net_device *dev;
	int delta;
	void *tipc_ptr;

	dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
	if (!dev)
		return 0;

	/* Send RESET message even if bearer is detached from device */
	tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
	if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
		goto drop;

	delta = dev->hard_header_len - skb_headroom(skb);
	if ((delta > 0) &&
	    pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
		goto drop;

	skb_reset_network_header(skb);
	skb->dev = dev;
	skb->protocol = htons(ETH_P_TIPC);
	dev_hard_header(skb, dev, ETH_P_TIPC, dest->value,
			dev->dev_addr, skb->len);
	dev_queue_xmit(skb);
	return 0;
drop:
	kfree_skb(skb);
	return 0;
}
Example #16
0
void x25_establish_link(struct x25_neigh *nb)
{
	struct sk_buff *skb;
	unsigned char *ptr;

	switch (nb->dev->type) {
	case ARPHRD_X25:
		if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
			printk(KERN_ERR "x25_dev: out of memory\n");
			return;
		}
		ptr  = skb_put(skb, 1);
		*ptr = X25_IFACE_CONNECT;
		break;

#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
	case ARPHRD_ETHER:
		return;
#endif
	default:
		return;
	}

	skb->protocol = htons(ETH_P_X25);
	skb->dev      = nb->dev;

	dev_queue_xmit(skb);
}
int wdl_modify_and_transmit(struct sk_buff *skb, struct net_device *netdev, uint8_t dest_mac[6], uint8_t src_mac[6], struct mymanet_header mymanethdr)
{

	int orig_header_room=0;
	uint32_t sec_holder;
	int i = 0;
	int prob_fwd;

	uint8_t eth_type_manifold[2];
	eth_type_manifold[0] = 0x33;
	eth_type_manifold[1] = 0x33;


	orig_header_room = (int)(skb->data - skb->head);
	if(orig_header_room < (ETHERNET_HEADER_SIZE + MANIFOLD_HEADER_SIZE)){
		printk(KERN_ALERT"ERROR : Insufficient headroom for constructing Manifold Packet\n");
		kfree_skb(skb);
		return -1;
	}

	sec_holder = htonl(mymanethdr.timestamp);
	mymanethdr.timestamp = sec_holder;

#if MYMANET_STORE_PATH	

	if(mymanethdr.hops_remaining == 3) {
		memcpy(mymanethdr.hop1_mac, src_mac, 6);
	} else if (mymanethdr.hops_remaining == 2) {
		memcpy(mymanethdr.hop2_mac, src_mac, 6);
	} else if (mymanethdr.hops_remaining == 1) {
		memcpy(mymanethdr.hop3_mac, src_mac, 6);	  
	} 

#endif	  

	memcpy(skb_push(skb, MANIFOLD_HEADER_SIZE), &mymanethdr, MANIFOLD_HEADER_SIZE);

	memcpy(skb_push(skb, 2), eth_type_manifold, 2);
	memcpy(skb_push(skb, 6), src_mac, 6);
	memcpy(skb_push(skb, 6), dest_mac, 6);

	add_or_update_stat_entry(mymanethdr.final_destination, 2, mymanethdr.session_id, mymanethdr.final_destination);

#if DEBUG
	printk(KERN_ALERT "MODIFIED and TRANSMITTED pkt from  %x:%x:%x:%x:%x:%x to %x:%x:%x:%x:%x:%x type %x:%x", 
			src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5],
			dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5], 
			eth_type_manifold[0], eth_type_manifold[1]);
#endif
			
	if(netdev->flags & IFF_UP)
	{
		return dev_queue_xmit(skb);
	}
	else{
		printk(KERN_ALERT "\n\nDevice was DOWN !! \n\n");
		return NETDEV_TX_BUSY;	
	}
}
int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct net_device_stats *stats = vlan_dev_get_stats(dev);
	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);

	/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
	 *
	 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
	 */

	if (veth->h_vlan_proto != __constant_htons(ETH_P_8021Q)) {
		int orig_headroom = skb_headroom(skb);
		unsigned short veth_TCI;

		/* This is not a VLAN frame...but we can fix that! */
		VLAN_DEV_INFO(dev)->cnt_encap_on_xmit++;

#ifdef VLAN_DEBUG
		printk(VLAN_DBG "%s: proto to encap: 0x%hx (hbo)\n",
			__FUNCTION__, htons(veth->h_vlan_proto));
#endif
		/* Construct the second two bytes. This field looks something
		 * like:
		 * usr_priority: 3 bits	 (high bits)
		 * CFI		 1 bit
		 * VLAN ID	 12 bits (low bits)
		 */
		veth_TCI = VLAN_DEV_INFO(dev)->vlan_id;
		veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);

		skb = __vlan_put_tag(skb, veth_TCI);
		if (!skb) {
			stats->tx_dropped++;
			return 0;
		}

		if (orig_headroom < VLAN_HLEN) {
			VLAN_DEV_INFO(dev)->cnt_inc_headroom_on_tx++;
		}
	}

#ifdef VLAN_DEBUG
	printk(VLAN_DBG "%s: about to send skb: %p to dev: %s\n",
		__FUNCTION__, skb, skb->dev->name);
	printk(VLAN_DBG "  %2hx.%2hx.%2hx.%2xh.%2hx.%2hx %2hx.%2hx.%2hx.%2hx.%2hx.%2hx %4hx %4hx %4hx\n",
	       veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], veth->h_dest[3], veth->h_dest[4], veth->h_dest[5],
	       veth->h_source[0], veth->h_source[1], veth->h_source[2], veth->h_source[3], veth->h_source[4], veth->h_source[5],
	       veth->h_vlan_proto, veth->h_vlan_TCI, veth->h_vlan_encapsulated_proto);
#endif

	stats->tx_packets++; /* for statics only */
	stats->tx_bytes += skb->len;

	skb->dev = VLAN_DEV_INFO(dev)->real_dev;
	dev_queue_xmit(skb);

	return 0;
}
Example #19
0
static void
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
	if (bridge_dev) {
		skb->dev = bridge_dev;
		dev_queue_xmit(skb);
	}
}
Example #20
0
static int p8023_request(struct datalink_proto *dl,
			 struct sk_buff *skb, unsigned char *dest_node)
{
	struct net_device *dev = skb->dev;

	dev_hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len);
	return dev_queue_xmit(skb);
}
/*
 * Prepends an ISI header and sends a datagram.
 */
static int pn_send(struct sk_buff *skb, struct net_device *dev,
                   u16 dst, u16 src, u8 res, u8 irq)
{
    struct phonethdr *ph;
    int err;

    if (skb->len + 2 > 0xffff /* Phonet length field limit */ ||
            skb->len + sizeof(struct phonethdr) > dev->mtu) {
        err = -EMSGSIZE;
        goto drop;
    }

    /* Broadcast sending is not implemented */
    if (pn_addr(dst) == PNADDR_BROADCAST) {
        err = -EOPNOTSUPP;
        goto drop;
    }

    skb_reset_transport_header(skb);
    WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */
    skb_push(skb, sizeof(struct phonethdr));
    skb_reset_network_header(skb);
    ph = pn_hdr(skb);
    ph->pn_rdev = pn_dev(dst);
    ph->pn_sdev = pn_dev(src);
    ph->pn_res = res;
    ph->pn_length = __cpu_to_be16(skb->len + 2 - sizeof(*ph));
    ph->pn_robj = pn_obj(dst);
    ph->pn_sobj = pn_obj(src);

    skb->protocol = htons(ETH_P_PHONET);
    skb->priority = 0;
    skb->dev = dev;

    if (pn_addr(src) == pn_addr(dst)) {
        skb_reset_mac_header(skb);
        skb->pkt_type = PACKET_LOOPBACK;
        skb_orphan(skb);
        if (irq)
            netif_rx(skb);
        else
            netif_rx_ni(skb);
        err = 0;
    } else {
        err = dev_hard_header(skb, dev, ntohs(skb->protocol),
                              NULL, NULL, skb->len);
        if (err < 0) {
            err = -EHOSTUNREACH;
            goto drop;
        }
        err = dev_queue_xmit(skb);
    }

    return err;
drop:
    kfree_skb(skb);
    return err;
}
Example #22
0
static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
		      struct tcf_result *res)
{
	struct tcf_mirred *m = a->priv;
	struct net_device *dev;
	struct sk_buff *skb2 = NULL;
	u32 at = G_TC_AT(skb->tc_verd);

	spin_lock(&m->tcf_lock);

	dev = m->tcfm_dev;
	m->tcf_tm.lastuse = jiffies;

	if (!(dev->flags&IFF_UP) ) {
		if (net_ratelimit())
			printk("mirred to Houston: device %s is gone!\n",
			       dev->name);
bad_mirred:
		if (skb2 != NULL)
			kfree_skb(skb2);
		m->tcf_qstats.overlimits++;
		m->tcf_bstats.bytes += qdisc_pkt_len(skb);
		m->tcf_bstats.packets++;
		spin_unlock(&m->tcf_lock);
		/* should we be asking for packet to be dropped?
		 * may make sense for redirect case only
		*/
		return TC_ACT_SHOT;
	}

	skb2 = skb_act_clone(skb, GFP_ATOMIC);
	if (skb2 == NULL)
		goto bad_mirred;
	if (m->tcfm_eaction != TCA_EGRESS_MIRROR &&
	    m->tcfm_eaction != TCA_EGRESS_REDIR) {
		if (net_ratelimit())
			printk("tcf_mirred unknown action %d\n",
			       m->tcfm_eaction);
		goto bad_mirred;
	}

	m->tcf_bstats.bytes += qdisc_pkt_len(skb2);
	m->tcf_bstats.packets++;
	if (!(at & AT_EGRESS))
		if (m->tcfm_ok_push)
			skb_push(skb2, skb2->dev->hard_header_len);

	/* mirror is always swallowed */
	if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
		skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at);

	skb2->dev = dev;
	skb2->iif = skb->dev->ifindex;
	dev_queue_xmit(skb2);
	spin_unlock(&m->tcf_lock);
	return m->tcf_action;
}
Example #23
0
static int pEII_request(struct datalink_proto *dl,
			struct sk_buff *skb, unsigned char *dest_node)
{
	struct net_device *dev = skb->dev;

	skb->protocol = htons(ETH_P_IPX);
	dev_hard_header(skb, dev, ETH_P_IPX, dest_node, NULL, skb->len);
	return dev_queue_xmit(skb);
}
Example #24
0
File: aoenet.c Project: E-LLP/n900
void
aoenet_xmit(struct sk_buff_head *queue)
{
	struct sk_buff *skb, *tmp;

	skb_queue_walk_safe(queue, skb, tmp) {
		__skb_unlink(skb, queue);
		dev_queue_xmit(skb);
	}
Example #25
0
static void arp_send_q(struct arp_table *entry, unsigned char *hw_dest)
{
	struct sk_buff *skb;

	unsigned long flags;

	/*
	 *	Empty the entire queue, building its data up ready to send
	 */
	
	if(!(entry->flags&ATF_COM))
	{
		printk("arp_send_q: incomplete entry for %s\n",
				in_ntoa(entry->ip));
		return;
	}

	save_flags(flags);
	
	cli();
	while((skb = skb_dequeue(&entry->skb)) != NULL)
	{
		IS_SKB(skb);
		skb_device_lock(skb);
		restore_flags(flags);
		if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
		{
			skb->arp  = 1;
			if(skb->sk==NULL)
				dev_queue_xmit(skb, skb->dev, 0);
			else
				dev_queue_xmit(skb,skb->dev,skb->sk->priority);
		}
		else
		{
			/* This routine is only ever called when 'entry' is
			   complete. Thus this can't fail. */
			printk("arp_send_q: The impossible occurred. Please notify Alan.\n");
			printk("arp_send_q: active entity %s\n",in_ntoa(entry->ip));
			printk("arp_send_q: failed to find %s\n",in_ntoa(skb->raddr));
		}
	}
	restore_flags(flags);
}
Example #26
0
static void aarp_send_reply(struct device *dev, struct at_addr *us, struct at_addr *them, unsigned char *sha)
{
	int len=dev->hard_header_len+sizeof(struct elapaarp)+aarp_dl->header_length;
	struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC);
	struct elapaarp *eah;
	
	if(skb==NULL)
		return;
	
	/*
	 *	Set up the buffer.
	 */		

	skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length);
	eah		=	(struct elapaarp *)skb_put(skb,sizeof(struct elapaarp));	 
	skb->protocol   =       htons(ETH_P_ATALK);
	skb->nh.raw     =       skb->h.raw = (void *) eah;
	skb->dev	=	dev;
	
	/*
	 *	Set up the ARP.
	 */
	 
	eah->hw_type	=	htons(AARP_HW_TYPE_ETHERNET);
	eah->pa_type	=	htons(ETH_P_ATALK);
	eah->hw_len	=	ETH_ALEN;	
	eah->pa_len	=	AARP_PA_ALEN;
	eah->function	=	htons(AARP_REPLY);
	
	memcpy(eah->hw_src, dev->dev_addr, ETH_ALEN);
	
	eah->pa_src_zero=	0;
	eah->pa_src_net	=	us->s_net;
	eah->pa_src_node=	us->s_node;
	
	if(sha==NULL)
		memset(eah->hw_dst, '\0', ETH_ALEN);
	else
		memcpy(eah->hw_dst, sha, ETH_ALEN);
	
	eah->pa_dst_zero=	0;
	eah->pa_dst_net	=	them->s_net;
	eah->pa_dst_node=	them->s_node;
	
	/*
	 *	Add ELAP headers and set target to the AARP multicast.
	 */
	 
	aarp_dl->datalink_header(aarp_dl, skb, sha);	

	/*
	 *	Send it.
	 */	
	dev_queue_xmit(skb);
	
}
Example #27
0
static void send_hsr_supervision_frame(struct net_device *hsr_dev, u8 type)
{
	struct hsr_priv *hsr_priv;
	struct sk_buff *skb;
	int hlen, tlen;
	struct hsr_sup_tag *hsr_stag;
	struct hsr_sup_payload *hsr_sp;
	unsigned long irqflags;

	hlen = LL_RESERVED_SPACE(hsr_dev);
	tlen = hsr_dev->needed_tailroom;
	skb = alloc_skb(hsr_pad(sizeof(struct hsr_sup_payload)) + hlen + tlen,
			GFP_ATOMIC);

	if (skb == NULL)
		return;

	hsr_priv = netdev_priv(hsr_dev);

	skb_reserve(skb, hlen);

	skb->dev = hsr_dev;
	skb->protocol = htons(ETH_P_PRP);
	skb->priority = TC_PRIO_CONTROL;

	if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
			    hsr_priv->sup_multicast_addr,
			    skb->dev->dev_addr, skb->len) < 0)
		goto out;

	skb_pull(skb, sizeof(struct ethhdr));
	hsr_stag = (typeof(hsr_stag)) skb->data;

	set_hsr_stag_path(hsr_stag, 0xf);
	set_hsr_stag_HSR_Ver(hsr_stag, 0);

	spin_lock_irqsave(&hsr_priv->seqnr_lock, irqflags);
	hsr_stag->sequence_nr = htons(hsr_priv->sequence_nr);
	hsr_priv->sequence_nr++;
	spin_unlock_irqrestore(&hsr_priv->seqnr_lock, irqflags);

	hsr_stag->HSR_TLV_Type = type;
	hsr_stag->HSR_TLV_Length = 12;

	skb_push(skb, sizeof(struct ethhdr));

	/* Payload: MacAddressA */
	hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp));
	ether_addr_copy(hsr_sp->MacAddressA, hsr_dev->dev_addr);

	dev_queue_xmit(skb);
	return;

out:
	kfree_skb(skb);
}
Example #28
0
int virt_send_ack(struct virt_priv *virt, struct device_node *slave, 
        struct remote_link *link)
{
    struct sk_buff *skb;
    struct net_device *dev = slave->dev;
    struct tunhdr *tunhdr;
    struct pathinfo *path;
    __be16 sport;

    unsigned alloc_size = sizeof(struct tunhdr) + sizeof(struct udphdr) +
        sizeof(struct iphdr) + LL_RESERVED_SPACE(dev);

    path = lookup_pathinfo(&virt->network, slave, link);
    if(!path)
        return -EINVAL;

    skb = alloc_skb(alloc_size, GFP_ATOMIC);
    if(!skb) {
        virt_path_put(path);
        return -ENOMEM;
    }

    skb_reserve(skb, alloc_size);

    tunhdr = virt_build_tunhdr(skb, NULL, NULL);
    virt_finish_tunhdr(tunhdr, path, NULL, link->node);

    /* TODO: We may want to split traffic among different ports, which
     * may change how we send ACKs.  For now, everything uses the same
     * source port. */
    sport = htons(virt_tunnel_source_port());
    virt_build_udp_header(skb, sport, link->rif.data_port);
    virt_build_ip_header(skb, slave->lif.ip4, link->rif.ip4);

    skb->dev = dev;
    skb->protocol = htons(ETH_P_IP);

    dev_hard_header(skb, dev, ETH_P_IP, slave->next_hop_addr, dev->dev_addr, skb->len);
    skb_reset_mac_header(skb);

    /* Update link statistics -- these may not be accurate if the packet gets
     * dropped after dev_queue_xmit. */
    slave->stats.tx_packets++;
    slave->stats.tx_bytes += skb->len;

    /* Update device statistics. */
    virt->stats.tx_packets++;
    virt->stats.tx_bytes += skb->len;

    /* Decrement refcnt. */
    virt_path_put(path);

    dev_queue_xmit(skb);

    return 0;
}
Example #29
0
File: slave.c Project: Abioy/kasan
static netdev_tx_t dsa_slave_notag_xmit(struct sk_buff *skb,
					struct net_device *dev)
{
	struct dsa_slave_priv *p = netdev_priv(dev);

	skb->dev = p->parent->dst->master_netdev;
	dev_queue_xmit(skb);

	return NETDEV_TX_OK;
}
Example #30
0
static int ztdeth_flush(void)
{
	struct sk_buff *skb;

	/* Handle all transmissions now */
	while ((skb = skb_dequeue(&skbs))) {
		dev_queue_xmit(skb);
	}
	return 0;
}