Beispiel #1
0
static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
{
	panic("Disabled");
#if 0 // AKAROS_PORT
	struct sk_buff *skb;
	struct ethhdr *ethh;
	unsigned char *packet;
	unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
	unsigned int i;
	int err;


	/* build the pkt before xmit */
	skb = netdev_alloc_skb(priv->dev,
			       MLX4_LOOPBACK_TEST_PAYLOAD + ETHERHDRSIZE + NET_IP_ALIGN);
	if (!skb)
		return -ENOMEM;

	skb_reserve(skb, NET_IP_ALIGN);

	ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
	packet	= (unsigned char *)skb_put(skb, packet_size);
	memcpy(ethh->h_dest, priv->dev->ea, Eaddrlen);
	eth_zero_addr(ethh->h_source);
	ethh->h_proto = cpu_to_be16(ETH_P_ARP);
	skb_set_mac_header(skb, 0);
	for (i = 0; i < packet_size; ++i)	/* fill our packet */
		packet[i] = (unsigned char)(i & 0xff);

	/* xmit the pkt */
	err = mlx4_en_xmit(skb, priv->dev);
	return err;
#endif
}
Beispiel #2
0
/*
 * Description: This function is called to send up the packet buffer
 *              to the IP stack.
 *
 * Input:       emfi - EMF instance information
 *              skb  - Pointer to the packet buffer.
 */
void
emf_sendup(emf_info_t *emfi, struct sk_buff *skb)
{
	/* Called only when frame is received from one of the LAN ports */
	ASSERT(skb->dev->br_port);
	ASSERT(skb->protocol == __constant_htons(ETH_P_IP));

	/* Send the buffer as if the packet is being sent by bridge */
	skb->dev = emfi->br_dev;
	switch (skb->pkt_type) {
	case PACKET_MULTICAST:
	case PACKET_BROADCAST:
	case PACKET_HOST:
		break;
	case PACKET_OTHERHOST:
	default:
		skb->pkt_type = PACKET_HOST;
		break;
	}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
	skb_set_mac_header(skb, -ETH_HLEN);
#else
	skb->mac.raw = skb->data - ETH_HLEN;
#endif

	netif_rx(skb);

	return;
}
Beispiel #3
0
static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
{
	struct ipv6hdr *ip6h;
	const unsigned char *old_mac;
	int size = sizeof(struct ipv6hdr);
	int err;

	err = skb_cow_head(skb, size + skb->mac_len);
	if (err)
		goto out;

	__skb_push(skb, size);
	skb_reset_network_header(skb);

	old_mac = skb_mac_header(skb);
	skb_set_mac_header(skb, -skb->mac_len);
	memmove(skb_mac_header(skb), old_mac, skb->mac_len);

	xfrm6_beet_make_header(skb);

	ip6h = ipv6_hdr(skb);
	ip6h->payload_len = htons(skb->len - size);
	ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6);
	ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6);
	err = 0;
out:
	return err;
}
static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
{
	struct sk_buff *skb;
	struct ethhdr *ethh;
	unsigned char *packet;
	unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
	unsigned int i;
	int err;


	/* build the pkt before xmit */
	skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
	if (!skb) {
		en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
		return -ENOMEM;
	}
	skb_reserve(skb, NET_IP_ALIGN);

	ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
	packet	= (unsigned char *)skb_put(skb, packet_size);
	memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
	memset(ethh->h_source, 0, ETH_ALEN);
	ethh->h_proto = htons(ETH_P_ARP);
	skb_set_mac_header(skb, 0);
	for (i = 0; i < packet_size; ++i)	/* fill our packet */
		packet[i] = (unsigned char)(i & 0xff);

	/* xmit the pkt */
	err = mlx4_en_xmit(skb, priv->dev);
	return err;
}
static int port_net_recv_req(struct ccci_port *port, struct ccci_request* req)
{
	struct sk_buff *skb = req->skb;
	struct net_device *dev = ((struct netdev_entity *)port->private_data)->ndev;
	unsigned int packet_type;
	int skb_len = req->skb->len;
	
	CCCI_DBG_MSG(port->modem->index, NET, "incomming on CH%d\n", port->rx_ch);
	list_del(&req->entry); // dequeue from queue's list
	skb_pull(skb, sizeof(struct ccci_header));
	packet_type = skb->data[0] & 0xF0;
	ccmni_make_etherframe(skb->data-ETH_HLEN, dev->dev_addr, packet_type);
    skb_set_mac_header(skb, -ETH_HLEN);
	skb->dev = dev;
	if(packet_type == 0x60) {            
		skb->protocol  = htons(ETH_P_IPV6);
	}
	else {
		skb->protocol  = htons(ETH_P_IP);
	}
	skb->ip_summed = CHECKSUM_NONE;
#if defined(CCCI_USE_NAPI) || defined(DUM_NAPI)
	netif_receive_skb(skb);
#else
	netif_rx(skb);
#endif
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += skb_len;
	req->policy = NOOP;
	ccci_free_req(req);
	wake_lock_timeout(&port->rx_wakelock, HZ);
	return 0;
}
static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
	const unsigned char *old_mac;
	int err = -EINVAL;

	if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
		goto out;

	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
		goto out;

	if (skb_cloned(skb) &&
	    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
		goto out;

	if (x->props.flags & XFRM_STATE_DECAP_DSCP)
		ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb));
	if (!(x->props.flags & XFRM_STATE_NOECN))
		ipip_ecn_decapsulate(skb);

	old_mac = skb_mac_header(skb);
	skb_set_mac_header(skb, -skb->mac_len);
	memmove(skb_mac_header(skb), old_mac, skb->mac_len);
	skb_reset_network_header(skb);
	err = 0;

out:
	return err;
}
Beispiel #7
0
static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev)
{
	struct ethhdr *eth;
	unsigned char *rawp;

	skb_set_mac_header(skb, MYRI_PAD_LEN);
	skb_pull(skb, dev->hard_header_len);
	eth = eth_hdr(skb);

#ifdef DEBUG_HEADER
	DHDR(("myri_type_trans: "));
	dump_ehdr(eth);
#endif
	if (*eth->h_dest & 1) {
		if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN)==0)
			skb->pkt_type = PACKET_BROADCAST;
		else
			skb->pkt_type = PACKET_MULTICAST;
	} else if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) {
		if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
			skb->pkt_type = PACKET_OTHERHOST;
	}

	if (ntohs(eth->h_proto) >= 1536)
		return eth->h_proto;

	rawp = skb->data;

	
	if (*(unsigned short *)rawp == 0xFFFF)
		return htons(ETH_P_802_3);

	
	return htons(ETH_P_802_2);
}
Beispiel #8
0
int skb_from_pkt(void *pkt, u32 pkt_len, struct sk_buff **skb)
{
	*skb = alloc_skb(LL_MAX_HEADER + pkt_len, GFP_ATOMIC);
	if (!*skb) {
		log_err("Could not allocate a skb.");
		return -ENOMEM;
	}

	skb_reserve(*skb, LL_MAX_HEADER); /* Reserve space for Link Layer data. */
	skb_put(*skb, pkt_len); /* L3 + L4 + payload. */

	skb_set_mac_header(*skb, 0);
	skb_set_network_header(*skb, 0);
	skb_set_transport_header(*skb, net_hdr_size(pkt));

	(*skb)->ip_summed = CHECKSUM_UNNECESSARY;
	switch (get_l3_proto(pkt)) {
	case 6:
		(*skb)->protocol = htons(ETH_P_IPV6);
		break;
	case 4:
		(*skb)->protocol = htons(ETH_P_IP);
		break;
	default:
		log_err("Invalid mode: %u.", get_l3_proto(pkt));
		kfree_skb(*skb);
		return -EINVAL;
	}

	/* Copy packet content to skb. */
	memcpy(skb_network_header(*skb), pkt, pkt_len);

	return 0;
}
Beispiel #9
0
/*
 * i2400m_net_erx - pass a network packet to the stack (extended version)
 *
 * @i2400m: device descriptor
 * @skb: the skb where the packet is - the skb should be set to point
 *     at the IP packet; this function will add ethernet headers if
 *     needed.
 * @cs: packet type
 *
 * This is only used now for firmware >= v1.4. Note it is quite
 * similar to i2400m_net_rx() (used only for v1.3 firmware).
 *
 * This function is normally run from a thread context. However, we
 * still use netif_rx() instead of netif_receive_skb() as was
 * recommended in the mailing list. Reason is in some stress tests
 * when sending/receiving a lot of data we seem to hit a softlock in
 * the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using
 * netif_rx() took care of the issue.
 *
 * This is, of course, still open to do more research on why running
 * with netif_receive_skb() hits this softlock. FIXME.
 */
void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
		    enum i2400m_cs cs)
{
	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
	struct device *dev = i2400m_dev(i2400m);
	int protocol;

	d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n",
		  i2400m, skb, skb->len, cs);
	switch(cs) {
	case I2400M_CS_IPV4_0:
	case I2400M_CS_IPV4:
		protocol = ETH_P_IP;
		i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
					  skb->data - ETH_HLEN,
					  cpu_to_be16(ETH_P_IP));
		skb_set_mac_header(skb, -ETH_HLEN);
		skb->dev = i2400m->wimax_dev.net_dev;
		skb->protocol = htons(ETH_P_IP);
		net_dev->stats.rx_packets++;
		net_dev->stats.rx_bytes += skb->len;
		break;
	default:
		dev_err(dev, "ERX: BUG? CS type %u unsupported\n", cs);
		goto error;

	}
	d_printf(3, dev, "ERX: receiving %d bytes to the network stack\n",
		 skb->len);
	d_dump(4, dev, skb->data, skb->len);
	netif_rx_ni(skb);	/* see notes in function header */
error:
	d_fnend(2, dev, "(i2400m %p skb %p [%u] cs %d) = void\n",
		i2400m, skb, skb->len, cs);
}
Beispiel #10
0
static int writebuf_from_LL(int driverID, int channel, int ack,
			    struct sk_buff *skb)
{
	struct cardstate *cs = gigaset_get_cs_by_id(driverID);
	struct bc_state *bcs;
	unsigned char *ack_header;
	unsigned len;

	if (!cs) {
		pr_err("%s: invalid driver ID (%d)\n", __func__, driverID);
		return -ENODEV;
	}
	if (channel < 0 || channel >= cs->channels) {
		dev_err(cs->dev, "%s: invalid channel ID (%d)\n",
			__func__, channel);
		return -ENODEV;
	}
	bcs = &cs->bcs[channel];

	/*                                 */
	if (skb_linearize(skb) < 0) {
		dev_err(cs->dev, "%s: skb_linearize failed\n", __func__);
		return -ENOMEM;
	}
	len = skb->len;

	gig_dbg(DEBUG_LLDATA,
		"Receiving data from LL (id: %d, ch: %d, ack: %d, sz: %d)",
		driverID, channel, ack, len);

	if (!len) {
		if (ack)
			dev_notice(cs->dev, "%s: not ACKing empty packet\n",
				   __func__);
		return 0;
	}
	if (len > MAX_BUF_SIZE) {
		dev_err(cs->dev, "%s: packet too large (%d bytes)\n",
			__func__, len);
		return -EINVAL;
	}

	/*                               */
	if (skb_headroom(skb) < HW_HDR_LEN) {
		/*                     */
		dev_err(cs->dev, "%s: insufficient skb headroom\n", __func__);
		return -ENOMEM;
	}
	skb_set_mac_header(skb, -HW_HDR_LEN);
	skb->mac_len = HW_HDR_LEN;
	ack_header = skb_mac_header(skb);
	if (ack) {
		ack_header[0] = len & 0xff;
		ack_header[1] = len >> 8;
	} else {
Beispiel #11
0
static int port_net_recv_req(struct ccci_port *port, struct ccci_request* req)
{
    struct sk_buff *skb = req->skb;
    struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
    struct net_device *dev = nent->ndev;
    unsigned int packet_type;
    int skb_len = req->skb->len;

#ifndef FEATURE_SEQ_CHECK_EN
    struct ccci_header *ccci_h = (struct ccci_header*)req->skb->data;
    CCCI_DBG_MSG(port->modem->index, NET, "recv on %s, curr_seq=%d\n", port->name, ccci_h->reserved);
    if(unlikely(nent->rx_seq_num!=0 && (ccci_h->reserved-nent->rx_seq_num)!=1)) {
        CCCI_ERR_MSG(port->modem->index, NET, "possible packet lost on %s %d->%d\n",
                     port->name, nent->rx_seq_num, ccci_h->reserved);
    }
    nent->rx_seq_num = ccci_h->reserved;
#else
    CCCI_DBG_MSG(port->modem->index, NET, "recv on %s\n", port->name);
#endif

    list_del(&req->entry); // dequeue from queue's list
    skb_pull(skb, sizeof(struct ccci_header));
    packet_type = skb->data[0] & 0xF0;
    ccmni_make_etherframe(skb->data-ETH_HLEN, dev->dev_addr, packet_type);
    skb_set_mac_header(skb, -ETH_HLEN);
    skb->dev = dev;
    if(packet_type == 0x60) {
        skb->protocol  = htons(ETH_P_IPV6);
    } else {
        skb->protocol  = htons(ETH_P_IP);
    }
    skb->ip_summed = CHECKSUM_NONE;
    if(likely(port->modem->capability & MODEM_CAP_NAPI)) {
        netif_receive_skb(skb);
    } else {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
        if(!in_interrupt()) {
            netif_rx_ni(skb);
        } else {
            netif_rx(skb);
        }
#else
        netif_rx(skb);
#endif
    }
    dev->stats.rx_packets++;
    dev->stats.rx_bytes += skb_len;
    req->policy = NOOP;
    req->skb = NULL;
    ccci_free_req(req);
    wake_lock_timeout(&port->rx_wakelock, HZ);
    return 0;
}
static int ccmni_v2_receive(ccmni_v2_instance_t *ccmni, const unsigned char *ccmni_ptr, int ccmni_len)
{
	int						packet_type, ret = 0;
	struct sk_buff			*skb;
	ccmni_v2_ctl_block_t	*ctl_b = (ccmni_v2_ctl_block_t*)ccmni->owner;
	int						md_id = ctl_b->m_md_id;

	if ((ccmni == NULL) || (ccmni_ptr == NULL) || (ccmni_len <= 0))
	{
		CCCI_MSG_INF(md_id, "net", "CCMNI%d_receive: invalid private data\n", ccmni->channel);
		ret = -1;
	}

	skb = dev_alloc_skb(ccmni_len);

	if (skb)
	{
		packet_type = ccmni_ptr[0] & 0xF0;
		memcpy(skb_put(skb, ccmni_len), ccmni_ptr, ccmni_len);
		ccmni_make_etherframe(skb->data - ETH_HLEN, ccmni->dev->dev_addr, packet_type);
		skb_set_mac_header(skb, -ETH_HLEN);

		skb->dev = ccmni->dev;
		if(packet_type == IPV6_VERSION)
		{
			skb->protocol  = htons(ETH_P_IPV6);
		}
		else
		{
			skb->protocol  = htons(ETH_P_IP);
		}
		//skb->ip_summed = CHECKSUM_UNNECESSARY;
		skb->ip_summed = CHECKSUM_NONE;

		ret = netif_rx(skb);

		CCCI_CCMNI_MSG(md_id, "CCMNI%d invoke netif_rx()=%d\n", ccmni->channel, ret);

		ccmni->dev->stats.rx_packets++;
		ccmni->dev->stats.rx_bytes	+= ccmni_len;
		CCCI_CCMNI_MSG(md_id, "CCMNI%d rx_pkts=%d, stats_rx_bytes=%d\n",ccmni->channel, \
			ccmni->dev->stats.rx_packets,ccmni->dev->stats.rx_bytes);

		ret = 0;
	}
	else
	{
		CCCI_MSG_INF(md_id, "net", "CCMNI%d Socket buffer allocate fail\n", ccmni->channel);
		ret = -CCCI_ERR_MEM_CHECK_FAIL;
	}

	return ret;
}
Beispiel #13
0
static void
rmnet_deliver_skb(struct sk_buff *skb)
{
	struct rmnet_priv *priv = netdev_priv(skb->dev);

	skb_reset_transport_header(skb);
	skb_reset_network_header(skb);
	rmnet_vnd_rx_fixup(skb, skb->dev);

	skb->pkt_type = PACKET_HOST;
	skb_set_mac_header(skb, 0);
	gro_cells_receive(&priv->gro_cells, skb);
}
static void init_skbuff(const struct ipq_packet_msg *ipq_msg, struct sk_buff *skb)
{
    memcpy(skb->head, ipq_msg->payload, ipq_msg->data_len);
    skb->len = skb->data_len = ipq_msg->data_len;

    // Becasue the payload of ipq_msg l3 packet. so there is no mac header
    skb_set_mac_header(skb, ~0U);

    skb_set_network_header(skb, 0);

    struct iphdr *iph = (struct iphdr*)skb_network_header(skb);
    
    skb_set_transport_header(skb, iph->ihl<<2);
    
}
Beispiel #15
0
static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
{
	struct iphdr *iph;
	int optlen = 0;
	int err = -EINVAL;

	if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) {
		struct ip_beet_phdr *ph;
		int phlen;

		if (!pskb_may_pull(skb, sizeof(*ph)))
			goto out;

		ph = (struct ip_beet_phdr *)skb->data;

		phlen = sizeof(*ph) + ph->padlen;
		optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen);
		if (optlen < 0 || optlen & 3 || optlen > 250)
			goto out;

		XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr;

		if (!pskb_may_pull(skb, phlen))
			goto out;
		__skb_pull(skb, phlen);
	}

	skb_push(skb, sizeof(*iph));
	skb_reset_network_header(skb);

	memmove(skb->data - skb->mac_len, skb_mac_header(skb),
		skb->mac_len);
	skb_set_mac_header(skb, -skb->mac_len);

	xfrm4_beet_make_header(skb);

	iph = ip_hdr(skb);

	iph->ihl += optlen / 4;
	iph->tot_len = htons(skb->len);
	iph->daddr = x->sel.daddr.a4;
	iph->saddr = x->sel.saddr.a4;
	iph->check = 0;
	iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
	err = 0;
out:
	return err;
}
static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
{
	struct ipv6hdr *iph;
	u8 *prevhdr;
	int hdr_len;

	iph = ipv6_hdr(skb);

	hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
	skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
	skb_set_network_header(skb, -x->props.header_len);
	skb->transport_header = skb->network_header + hdr_len;
	__skb_pull(skb, hdr_len);
	memmove(ipv6_hdr(skb), iph, hdr_len);
	return 0;
}
Beispiel #17
0
static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
		struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	skb_set_mac_header(skb, 0);
	skb_set_network_header(skb, 0);
	skb_set_transport_header(skb, 0);

	
	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
	skb->priority = 7;

	info->control.vif = &sdata->vif;
	ieee80211_set_qos_hdr(sdata, skb);
}
Beispiel #18
0
/*  Headroom is not adjusted.  Caller should ensure that skb has sufficient
 *  headroom in case the frame is encrypted. */
static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
		struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	skb_set_mac_header(skb, 0);
	skb_set_network_header(skb, 0);
	skb_set_transport_header(skb, 0);

	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
	skb->priority = 7;

	info->control.vif = &sdata->vif;
	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
	ieee80211_set_qos_hdr(sdata, skb);
}
static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
				      struct sk_buff *tx_skb)
{
	struct mac80211_hwsim_data *data = hw->priv;
	struct sk_buff *skb;
	struct hwsim_radiotap_hdr *hdr;
	u16 flags;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_skb);
	struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info);

	if (!netif_running(hwsim_mon))
		return;

	skb = skb_copy_expand(tx_skb, sizeof(*hdr), 0, GFP_ATOMIC);
	if (skb == NULL)
		return;

	hdr = (struct hwsim_radiotap_hdr *) skb_push(skb, sizeof(*hdr));
	hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION;
	hdr->hdr.it_pad = 0;
	hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
	hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
					  (1 << IEEE80211_RADIOTAP_RATE) |
					  (1 << IEEE80211_RADIOTAP_CHANNEL));
	hdr->rt_flags = 0;
	hdr->rt_rate = txrate->bitrate / 5;
	hdr->rt_channel = cpu_to_le16(data->channel->center_freq);
	flags = IEEE80211_CHAN_2GHZ;
	if (txrate->flags & IEEE80211_RATE_ERP_G)
		flags |= IEEE80211_CHAN_OFDM;
	else
		flags |= IEEE80211_CHAN_CCK;
	hdr->rt_chbitmask = cpu_to_le16(flags);

	skb->dev = hwsim_mon;
	skb_set_mac_header(skb, 0);
	skb->ip_summed = CHECKSUM_UNNECESSARY;
	skb->pkt_type = PACKET_OTHERHOST;
	skb->protocol = htons(ETH_P_802_2);
	memset(skb->cb, 0, sizeof(skb->cb));
	netif_rx(skb);
}
Beispiel #20
0
/*
 * i2400m_net_rx - pass a network packet to the stack
 *
 * @i2400m: device instance
 * @skb_rx: the skb where the buffer pointed to by @buf is
 * @i: 1 if payload is the only one
 * @buf: pointer to the buffer containing the data
 * @len: buffer's length
 *
 * This is only used now for the v1.3 firmware. It will be deprecated
 * in >= 2.6.31.
 *
 * Note that due to firmware limitations, we don't have space to add
 * an ethernet header, so we need to copy each packet. Firmware
 * versions >= v1.4 fix this [see i2400m_net_erx()].
 *
 * We just clone the skb and set it up so that it's skb->data pointer
 * points to "buf" and it's length.
 *
 * Note that if the payload is the last (or the only one) in a
 * multi-payload message, we don't clone the SKB but just reuse it.
 *
 * This function is normally run from a thread context. However, we
 * still use netif_rx() instead of netif_receive_skb() as was
 * recommended in the mailing list. Reason is in some stress tests
 * when sending/receiving a lot of data we seem to hit a softlock in
 * the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using
 * netif_rx() took care of the issue.
 *
 * This is, of course, still open to do more research on why running
 * with netif_receive_skb() hits this softlock. FIXME.
 *
 * FIXME: currently we don't do any efforts at distinguishing if what
 * we got was an IPv4 or IPv6 header, to setup the protocol field
 * correctly.
 */
void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx,
		   unsigned i, const void *buf, int buf_len)
{
	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
	struct device *dev = i2400m_dev(i2400m);
	struct sk_buff *skb;

	d_fnstart(2, dev, "(i2400m %p buf %p buf_len %d)\n",
		  i2400m, buf, buf_len);
	if (i) {
		skb = skb_get(skb_rx);
		d_printf(2, dev, "RX: reusing first payload skb %p\n", skb);
		skb_pull(skb, buf - (void *) skb->data);
		skb_trim(skb, (void *) skb_end_pointer(skb) - buf);
	} else {
		/* Yes, this is bad -- a lot of overhead -- see
		 * comments at the top of the file */
		skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL);
		if (skb == NULL) {
			dev_err(dev, "NETRX: no memory to realloc skb\n");
			net_dev->stats.rx_dropped++;
			goto error_skb_realloc;
		}
		memcpy(skb_put(skb, buf_len), buf, buf_len);
	}
	i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
				  skb->data - ETH_HLEN,
				  cpu_to_be16(ETH_P_IP));
	skb_set_mac_header(skb, -ETH_HLEN);
	skb->dev = i2400m->wimax_dev.net_dev;
	skb->protocol = htons(ETH_P_IP);
	net_dev->stats.rx_packets++;
	net_dev->stats.rx_bytes += buf_len;
	d_printf(3, dev, "NETRX: receiving %d bytes to network stack\n",
		buf_len);
	d_dump(4, dev, buf, buf_len);
	netif_rx_ni(skb);	/* see notes in function header */
error_skb_realloc:
	d_fnend(2, dev, "(i2400m %p buf %p buf_len %d) = void\n",
		i2400m, buf, buf_len);
}
Beispiel #21
0
/* Determine the packet's protocol ID. The rule here is that we
 * assume 802.3 if the type field is short enough to be a length.
 * This is normal practice and works for any 'now in use' protocol.
 */
static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev)
{
	struct ethhdr *eth;
	unsigned char *rawp;

	skb_set_mac_header(skb, MYRI_PAD_LEN);
	skb_pull(skb, dev->hard_header_len);
	eth = eth_hdr(skb);

#ifdef DEBUG_HEADER
	DHDR(("myri_type_trans: "));
	dump_ehdr(eth);
#endif
	if (*eth->h_dest & 1) {
		if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN)==0)
			skb->pkt_type = PACKET_BROADCAST;
		else
			skb->pkt_type = PACKET_MULTICAST;
	} else if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) {
		if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
			skb->pkt_type = PACKET_OTHERHOST;
	}

	if (ntohs(eth->h_proto) >= 1536)
		return eth->h_proto;

	rawp = skb->data;

	/* This is a magic hack to spot IPX packets. Older Novell breaks
	 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
	 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
	 * won't work for fault tolerant netware but does for the rest.
	 */
	if (*(unsigned short *)rawp == 0xFFFF)
		return htons(ETH_P_802_3);

	/* Real 802.2 LLC */
	return htons(ETH_P_802_2);
}
Beispiel #22
0
static void sc_send_80211(struct sk_buff *skb, struct net_device *dev)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_hdr *hdr;
	int hdrlen;

	printk(KERN_DEBUG "capwap inject: %s: hdr: %p\n",
	       dev->name, skb->data);

	/* detach skb from CAPWAP */
	skb_orphan(skb);
	secpath_reset(skb);

	/* drop any routing info */
	skb_dst_drop(skb);

	/* drop conntrack reference */
	nf_reset(skb);

	hdr = (struct ieee80211_hdr *)skb->data;
	hdrlen = ieee80211_hdrlen(hdr->frame_control);

	skb->dev = dev;

	skb_set_mac_header(skb, hdrlen);
	skb_set_network_header(skb, hdrlen);
	skb_set_transport_header(skb, hdrlen);

	skb->protocol = htons(ETH_P_CONTROL);
	info->flags |= IEEE80211_TX_CTL_INJECTED;

	/* Force the device to verify it. */
	skb->ip_summed = CHECKSUM_NONE;

	dev_queue_xmit(skb);
}
Beispiel #23
0
static int ccmni_rx_callback(int md_id, int rx_ch, struct sk_buff *skb, void *priv_data)
{
	ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[md_id];
//	struct ccci_header *ccci_h = (struct ccci_header*)skb->data;
	ccmni_instance_t *ccmni = NULL;
	struct net_device *dev = NULL;
	int pkt_type, skb_len, ccmni_idx;

	if (unlikely(ctlb == NULL || ctlb->ccci_ops == NULL)) {
		CCMNI_ERR_MSG(md_id, "invalid CCMNI ctrl/ops struct for RX_CH(%d)\n", rx_ch);
		dev_kfree_skb(skb);
		return -1;
	}

	ccmni_idx = get_ccmni_idx_from_ch(md_id, rx_ch);
	if (unlikely(ccmni_idx < 0)) {
		CCMNI_ERR_MSG(md_id, "CCMNI rx(%d) skb ch error\n", rx_ch);
		dev_kfree_skb(skb);
		return -1;
	}
	ccmni = ctlb->ccmni_inst[ccmni_idx];
	dev = ccmni->dev;
	
//	skb_pull(skb, sizeof(struct ccci_header));
	pkt_type = skb->data[0] & 0xF0;
	ccmni_make_etherframe(skb->data-ETH_HLEN, dev->dev_addr, pkt_type);
    skb_set_mac_header(skb, -ETH_HLEN);
	skb->dev = dev;
	if(pkt_type == 0x60) {            
		skb->protocol  = htons(ETH_P_IPV6);
	} else {
		skb->protocol  = htons(ETH_P_IP);
	}
	skb->ip_summed = CHECKSUM_NONE;
	skb_len = skb->len;
	
	if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_RX)) {
		CCMNI_INF_MSG(md_id, "[RX]CCMNI%d(rx_ch=%d) recv data_len=%d\n", ccmni_idx, rx_ch, skb->len);
	}

	if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_RX_SKB)) {
		ccmni_dbg_skb_header(ccmni->md_id, false, skb);
	}
	
	if(likely(ctlb->ccci_ops->md_ability & MODEM_CAP_NAPI)) {
		netif_receive_skb(skb);
	} else {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
		if(!in_interrupt()) {
			netif_rx_ni(skb);
		} else {
			netif_rx(skb);
		}
#else
		netif_rx(skb);
#endif
	}
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += skb_len;

	wake_lock_timeout(&ctlb->ccmni_wakelock, HZ);
	
	return 0;
}
Beispiel #24
0
void ATWILC_WFI_monitor_rx(uint8_t *buff, uint32_t size)
{
	uint32_t header,pkt_offset;
        struct sk_buff *skb = NULL;
        struct atwilc_wfi_radiotap_hdr *hdr;
	struct atwilc_wfi_radiotap_cb_hdr *cb_hdr;

	 PRINT_INFO(HOSTAPD_DBG,"In monitor interface receive function\n");

     //   struct ATWILC_WFI_priv *priv = netdev_priv(dev);

     //   priv = wiphy_priv(priv->dev->ieee80211_ptr->wiphy);
	 
	 /* Bug 4601 */
     if(atwilc_wfi_mon == NULL)
		return;
	 
       if (!netif_running(atwilc_wfi_mon))
       {
    	   PRINT_INFO(HOSTAPD_DBG,"Monitor interface already RUNNING\n");
           return;
       }

	//Get ATWILC header
	memcpy(&header, (buff-HOST_HDR_OFFSET), HOST_HDR_OFFSET);

	//The packet offset field conain info about what type of managment frame 
	// we are dealing with and ack status
	pkt_offset = GET_PKT_OFFSET(header);
	
	if(pkt_offset & IS_MANAGMEMENT_CALLBACK)
	{
		
		// hostapd callback mgmt frame
		
		//if(INFO || buff[0] == 0x10 || buff[0] == 0xb0)
		 	//PRINT_D(HOSTAPD_DBG,"In monitor interface callback receive function\n");
	

		skb = dev_alloc_skb(size+sizeof(struct atwilc_wfi_radiotap_cb_hdr));
		if(skb == NULL)
		{
			PRINT_INFO(HOSTAPD_DBG,"Monitor if : No memory to allocate skb");
			return;
		}

		memcpy(skb_put(skb,size),buff, size);

		cb_hdr = (struct atwilc_wfi_radiotap_cb_hdr *) skb_push(skb, sizeof(*cb_hdr));
		memset(cb_hdr, 0, sizeof(struct atwilc_wfi_radiotap_cb_hdr));

		 cb_hdr->hdr.it_version = 0;//PKTHDR_RADIOTAP_VERSION;
        
      	        cb_hdr->hdr.it_len = cpu_to_le16(sizeof(struct atwilc_wfi_radiotap_cb_hdr));

       	 cb_hdr->hdr.it_present = cpu_to_le32(
                                          (1 << IEEE80211_RADIOTAP_RATE) |
                                         (1 << IEEE80211_RADIOTAP_TX_FLAGS));
 
        	cb_hdr->rate = 5;//txrate->bitrate / 5;

        	if(pkt_offset & IS_MGMT_STATUS_SUCCES)
        	{
        		//success
        		cb_hdr->tx_flags = IEEE80211_RADIOTAP_F_TX_RTS;
        	}
		else
		{
			cb_hdr->tx_flags = IEEE80211_RADIOTAP_F_TX_FAIL;
		}
		
	}
	else
	{

		// normal  mgmt frame
		
		//if(INFO || buff[0] == 0x00 || buff[0] == 0xb0)
	//	{	
			//PRINT_D(HOSTAPD_DBG,"In monitor interface receive function , length = %d\n",size);
			//for(i=0;i<size;i++)
				//PRINT_D(HOSTAPD_DBG,"Mon RxData[%d] = %02x\n",i,buff[i]);
	//	}
		
	
		skb = dev_alloc_skb(size+sizeof(struct atwilc_wfi_radiotap_hdr));

		if(skb == NULL)
		{
			PRINT_INFO(HOSTAPD_DBG,"Monitor if : No memory to allocate skb");
			return;
		}

	        //skb = skb_copy_expand(tx_skb, sizeof(*hdr), 0, GFP_ATOMIC);
	        //if (skb == NULL)
	          //      return;
	       
		memcpy(skb_put(skb,size),buff, size);
		//printk("--radiotap header--\n",sizeof(*hdr));
	        hdr = (struct atwilc_wfi_radiotap_hdr *) skb_push(skb, sizeof(*hdr));
		memset(hdr, 0, sizeof(struct atwilc_wfi_radiotap_hdr));
	        hdr->hdr.it_version = 0;//PKTHDR_RADIOTAP_VERSION;
	        //hdr->hdr.it_pad = 0;
	        hdr->hdr.it_len = cpu_to_le16(sizeof(struct atwilc_wfi_radiotap_hdr));
		PRINT_INFO(HOSTAPD_DBG,"Radiotap len %d\n", hdr->hdr.it_len);
	        hdr->hdr.it_present = cpu_to_le32
	                                          (1 << IEEE80211_RADIOTAP_RATE); //|
	                                         //(1 << IEEE80211_RADIOTAP_CHANNEL));
		PRINT_INFO(HOSTAPD_DBG,"Presentflags %d\n", hdr->hdr.it_present);
	       hdr->rate = 5;//txrate->bitrate / 5;

	}

/*	if(INFO || if(skb->data[9] == 0x00 || skb->data[9] == 0xb0))
	{
		for(i=0;i<skb->len;i++)
			PRINT_INFO(HOSTAPD_DBG,"Mon RxData[%d] = %02x\n",i,skb->data[i]);
	}*/

	
        skb->dev = atwilc_wfi_mon;
        skb_set_mac_header(skb, 0);
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb->pkt_type = PACKET_OTHERHOST;
        skb->protocol = htons(ETH_P_802_2);
        memset(skb->cb, 0, sizeof(skb->cb));
	
        netif_rx(skb);


}
Beispiel #25
0
/**
 *  @brief      WILC_WFI_mon_xmit
 *  @details
 *  @param[in]
 *  @return     int : Return 0 on Success
 *  @author	mdaftedar
 *  @date	12 JUL 2012
 *  @version	1.0
 */
static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
				     struct net_device *dev)
{
	u32 rtap_len, ret = 0;
	struct WILC_WFI_mon_priv  *mon_priv;

	struct sk_buff *skb2;
	struct wilc_wfi_radiotap_cb_hdr *cb_hdr;

	if (!wilc_wfi_mon)
		return -EFAULT;

	mon_priv = netdev_priv(wilc_wfi_mon);
	if (!mon_priv)
		return -EFAULT;
	rtap_len = ieee80211_get_radiotap_len(skb->data);
	if (skb->len < rtap_len)
		return -1;

	skb_pull(skb, rtap_len);

	if (skb->data[0] == 0xc0 && (!(memcmp(broadcast, &skb->data[4], 6)))) {
		skb2 = dev_alloc_skb(skb->len + sizeof(struct wilc_wfi_radiotap_cb_hdr));

		memcpy(skb_put(skb2, skb->len), skb->data, skb->len);

		cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *)skb_push(skb2, sizeof(*cb_hdr));
		memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr));

		cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */

		cb_hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_cb_hdr));

		cb_hdr->hdr.it_present = cpu_to_le32(
				(1 << IEEE80211_RADIOTAP_RATE) |
				(1 << IEEE80211_RADIOTAP_TX_FLAGS));

		cb_hdr->rate = 5; /* txrate->bitrate / 5; */
		cb_hdr->tx_flags = 0x0004;

		skb2->dev = wilc_wfi_mon;
		skb_set_mac_header(skb2, 0);
		skb2->ip_summed = CHECKSUM_UNNECESSARY;
		skb2->pkt_type = PACKET_OTHERHOST;
		skb2->protocol = htons(ETH_P_802_2);
		memset(skb2->cb, 0, sizeof(skb2->cb));

		netif_rx(skb2);

		return 0;
	}
	skb->dev = mon_priv->real_ndev;

	/* Identify if Ethernet or MAC header (data or mgmt) */
	memcpy(srcadd, &skb->data[10], 6);
	memcpy(bssid, &skb->data[16], 6);
	/* if source address and bssid fields are equal>>Mac header */
	/*send it to mgmt frames handler */
	if (!(memcmp(srcadd, bssid, 6))) {
		ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len);
		if (ret)
			netdev_err(dev, "fail to mgmt tx\n");
		dev_kfree_skb(skb);
	} else {
		ret = wilc_mac_xmit(skb, mon_priv->real_ndev);
	}

	return ret;
}
Beispiel #26
0
VOID
reply_to_arp_request(struct sk_buff *skb)
{
	PMINI_ADAPTER		Adapter;
	struct ArpHeader 	*pArpHdr = NULL;
	struct ethhdr		*pethhdr = NULL;
	UCHAR 				uiIPHdr[4];
	/* Check for valid skb */
	if(skb == NULL)
	{
		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Invalid skb: Cannot reply to ARP request\n");
		return;
	}


	Adapter = GET_BCM_ADAPTER(skb->dev);
	/* Print the ARP Request Packet */
	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, "ARP Packet Dump :");
	BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, (PUCHAR)(skb->data), skb->len);

	/*
	 * Extract the Ethernet Header and Arp Payload including Header
     */
	pethhdr = (struct ethhdr *)skb->data;
	pArpHdr  = (struct ArpHeader *)(skb->data+ETH_HLEN);

	if(Adapter->bETHCSEnabled)
	{
		if(memcmp(pethhdr->h_source, Adapter->dev->dev_addr, ETH_ALEN))
		{
			bcm_kfree_skb(skb);
			return;
		}
	}

	// Set the Ethernet Header First.
	memcpy(pethhdr->h_dest, pethhdr->h_source, ETH_ALEN);
	if(!memcmp(pethhdr->h_source, Adapter->dev->dev_addr, ETH_ALEN))
	{
		pethhdr->h_source[5]++;
	}

	/* Set the reply to ARP Reply */
	pArpHdr->arp.ar_op = ntohs(ARPOP_REPLY);

	/* Set the HW Address properly */
	memcpy(pArpHdr->ar_sha, pethhdr->h_source, ETH_ALEN);
	memcpy(pArpHdr->ar_tha, pethhdr->h_dest, ETH_ALEN);

	// Swapping the IP Adddress
	memcpy(uiIPHdr,pArpHdr->ar_sip,4);
	memcpy(pArpHdr->ar_sip,pArpHdr->ar_tip,4);
	memcpy(pArpHdr->ar_tip,uiIPHdr,4);

	/* Print the ARP Reply Packet */

	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, "ARP REPLY PACKET: ");

	/* Send the Packet to upper layer */
	BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, (PUCHAR)(skb->data), skb->len);

	skb->protocol = eth_type_trans(skb,skb->dev);
	skb->pkt_type = PACKET_HOST;

//	skb->mac.raw=skb->data+LEADER_SIZE;
	skb_set_mac_header (skb, LEADER_SIZE);
	netif_rx(skb);
	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, "<=============\n");
	return;
}
Beispiel #27
0
static int ccmni_receive(struct ccmni_instance_t *ccmni, int length)
{
	int counter, ret;
	struct packet_info_t packet_info;
	struct complete_ippkt_t *packet;
	struct complete_ippkt_t *processed_packet;
	struct sk_buff *skb;
	struct complete_ippkt_t last_packet = { 0 };
	int offset_put_pkt = 0;
	int offset_parse_frame = 0;
	int packet_type;
	struct ccmni_v1_ctl_block_t *ctl_b = (struct ccmni_v1_ctl_block_t *) ccmni->owner;
	int md_id = ctl_b->m_md_id;

	CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive() invoke pfp_unframe()\n",
		       ccmni->channel);
	do {
		packet_info = pfp_unframe(ccmni->decode_buffer + offset_put_pkt,
					  CCCI1_CCMNI_BUF_SIZE - offset_put_pkt,
					  ccmni->read_buffer +
					  offset_parse_frame, length,
					  ccmni->channel);
		packet = packet_info.pkt_list;

		CCCI_CCMNI_MSG(md_id,
			       "CCMNI%d num_complete_pkt=%d after pfp_unframe\n",
			       ccmni->channel,
			       packet_info.num_complete_packets);

		for (counter = 0; counter < packet_info.num_complete_packets;
		     counter++) {
			skb = dev_alloc_skb(packet->pkt_size);
			if (skb) {
				packet_type = packet->pkt_data[0] & 0xF0;
				memcpy(skb_put(skb, packet->pkt_size),
				       packet->pkt_data, packet->pkt_size);
				ccmni_make_etherframe(skb->data - ETH_HLEN,
						      ccmni->dev->dev_addr,
						      packet_type);
				skb_set_mac_header(skb, -ETH_HLEN);
				skb->dev = ccmni->dev;
				if (packet_type == IPV6_VERSION)
					skb->protocol = htons(ETH_P_IPV6);
				else
					skb->protocol = htons(ETH_P_IP);
				skb->ip_summed = CHECKSUM_NONE;

				ret = netif_rx(skb);

				CCCI_CCMNI_MSG(md_id,
					       "CCMNI%d invoke netif_rx()=%d\n",
					       ccmni->channel, ret);
				ccmni->dev->stats.rx_packets++;
				ccmni->dev->stats.rx_bytes += packet->pkt_size;
				CCCI_CCMNI_MSG(md_id,
					       "CCMNI%d rx_pkts=%ld, stats_rx_bytes=%ld\n",
					       ccmni->channel,
					       ccmni->dev->stats.rx_packets,
					       ccmni->dev->stats.rx_bytes);
			} else {
				CCCI_DBG_MSG(md_id, "net",
					     "CCMNI%d Socket buffer allocate fail\n",
					     ccmni->channel);
			}

			processed_packet = packet;
			last_packet = *processed_packet;
			packet = packet->next;

			/* Only clear the entry_used flag as 0 */
			release_one_used_complete_ippkt_entry(processed_packet);
		};

		/* It must to check if it is necessary to invoke the pfp_unframe()
		 * again due to no available complete_ippkt entry
		 */
		if (packet_info.try_decode_again == 1) {
			offset_put_pkt +=
			    (last_packet.pkt_data - ccmni->decode_buffer +
			     last_packet.pkt_size);
			offset_parse_frame += packet_info.consumed_length;
		}
	} while (packet_info.try_decode_again == 1);

	offset_parse_frame += packet_info.consumed_length;
	return offset_parse_frame;
}
Beispiel #28
0
void WILC_WFI_monitor_rx(u8 *buff, u32 size)
{
	u32 header, pkt_offset;
	struct sk_buff *skb = NULL;
	struct wilc_wfi_radiotap_hdr *hdr;
	struct wilc_wfi_radiotap_cb_hdr *cb_hdr;

	if (!wilc_wfi_mon)
		return;

	if (!netif_running(wilc_wfi_mon))
		return;

	/* Get WILC header */
	memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
	/*
	 * The packet offset field contain info about what type of management
	 * the frame we are dealing with and ack status
	 */
	pkt_offset = GET_PKT_OFFSET(header);

	if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
		/* hostapd callback mgmt frame */

		skb = dev_alloc_skb(size + sizeof(struct wilc_wfi_radiotap_cb_hdr));
		if (!skb)
			return;

		memcpy(skb_put(skb, size), buff, size);

		cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *)skb_push(skb, sizeof(*cb_hdr));
		memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr));

		cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */

		cb_hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_cb_hdr));

		cb_hdr->hdr.it_present = cpu_to_le32(
				(1 << IEEE80211_RADIOTAP_RATE) |
				(1 << IEEE80211_RADIOTAP_TX_FLAGS));

		cb_hdr->rate = 5; /* txrate->bitrate / 5; */

		if (pkt_offset & IS_MGMT_STATUS_SUCCES)	{
			/* success */
			cb_hdr->tx_flags = IEEE80211_RADIOTAP_F_TX_RTS;
		} else {
			cb_hdr->tx_flags = IEEE80211_RADIOTAP_F_TX_FAIL;
		}

	} else {
		skb = dev_alloc_skb(size + sizeof(struct wilc_wfi_radiotap_hdr));

		if (!skb)
			return;

		memcpy(skb_put(skb, size), buff, size);
		hdr = (struct wilc_wfi_radiotap_hdr *)skb_push(skb, sizeof(*hdr));
		memset(hdr, 0, sizeof(struct wilc_wfi_radiotap_hdr));
		hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
		hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_hdr));
		hdr->hdr.it_present = cpu_to_le32
				(1 << IEEE80211_RADIOTAP_RATE); /* | */
		hdr->rate = 5; /* txrate->bitrate / 5; */
	}

	skb->dev = wilc_wfi_mon;
	skb_set_mac_header(skb, 0);
	skb->ip_summed = CHECKSUM_UNNECESSARY;
	skb->pkt_type = PACKET_OTHERHOST;
	skb->protocol = htons(ETH_P_802_2);
	memset(skb->cb, 0, sizeof(skb->cb));

	netif_rx(skb);
}
Beispiel #29
0
static void hdd_wlan_tx_complete( hdd_adapter_t* pAdapter,
                                  hdd_cfg80211_state_t* cfgState,
                                  tANI_BOOLEAN actionSendSuccess )
{
    struct ieee80211_radiotap_header *rthdr;
    unsigned char *pos;
    struct sk_buff *skb = cfgState->skb;
#ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK
    hdd_context_t *pHddCtx = (hdd_context_t*)(pAdapter->pHddCtx);
#endif

    /* 2 Byte for TX flags and 1 Byte for Retry count */
    u32 rtHdrLen = sizeof(*rthdr) + 3;

    u8 *data;

    /* We have to return skb with Data starting with MAC header. We have
     * copied SKB data starting with MAC header to cfgState->buf. We will pull
     * entire skb->len from skb and then we will push cfgState->buf to skb
     * */
    if( NULL == skb_pull(skb, skb->len) )
    {
        hddLog( LOGE, FL("Not Able to Pull %d byte from skb"), skb->len);
        kfree_skb(cfgState->skb);
        return;
    }

    data = skb_push( skb, cfgState->len );

    if (data == NULL)
    {
        hddLog( LOGE, FL("Not Able to Push %d byte to skb"), cfgState->len);
        kfree_skb( cfgState->skb );
        return;
    }

    memcpy( data, cfgState->buf, cfgState->len );

    /* send frame to monitor interfaces now */
    if( skb_headroom(skb) < rtHdrLen )
    {
        hddLog( LOGE, FL("No headroom for rtap header"));
        kfree_skb(cfgState->skb);
        return;
    }

    rthdr = (struct ieee80211_radiotap_header*) skb_push( skb, rtHdrLen );

    memset( rthdr, 0, rtHdrLen );
    rthdr->it_len = cpu_to_le16( rtHdrLen );
    rthdr->it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
                                    (1 << IEEE80211_RADIOTAP_DATA_RETRIES)
                                   );

    pos = (unsigned char *)( rthdr+1 );

    // Fill TX flags
    *pos = actionSendSuccess;
    pos += 2;

    // Fill retry count
    *pos = 0;
    pos++;

    skb_set_mac_header( skb, 0 );
    skb->ip_summed = CHECKSUM_NONE;
    skb->pkt_type  = PACKET_OTHERHOST;
    skb->protocol  = htons(ETH_P_802_2);
    memset( skb->cb, 0, sizeof( skb->cb ) );
#ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK
    wake_lock_timeout(&pHddCtx->rx_wake_lock, HDD_WAKE_LOCK_DURATION);
#endif
    if (in_interrupt())
        netif_rx( skb );
    else
        netif_rx_ni( skb );

    /* Enable Queues which we have disabled earlier */
    netif_tx_start_all_queues( pAdapter->dev ); 

}
Beispiel #30
0
int hdd_mon_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
   v_U16_t rt_hdr_len;
   struct ieee80211_hdr *hdr;
   hdd_adapter_t *pPgBkAdapter, *pAdapter =  WLAN_HDD_GET_PRIV_PTR(dev);
   struct ieee80211_radiotap_header *rtap_hdr =
                        (struct ieee80211_radiotap_header *)skb->data;

   /*Supplicant sends the EAPOL packet on monitor interface*/
   pPgBkAdapter = pAdapter->sessionCtx.monitor.pAdapterForTx;    
   if(pPgBkAdapter == NULL)
   {
      VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
           "%s: No Adapter to piggy back. Dropping the pkt on monitor inf",
                                                                 __func__);
      goto fail; /* too short to be possibly valid */
   }
 
   /* check if toal skb length is greater then radio tab header length of not */
   if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
      goto fail; /* too short to be possibly valid */
   
   /* check if radio tap header version is correct or not */
   if (unlikely(rtap_hdr->it_version))
      goto fail; /* only version 0 is supported */
 
   /*Strip off the radio tap header*/
   rt_hdr_len = ieee80211_get_radiotap_len(skb->data);
 
   /* check if skb length if greator then total radio tap header length ot not*/
   if (unlikely(skb->len < rt_hdr_len))
      goto fail;
 
   /* Update the trans_start for this netdev */  
   dev->trans_start = jiffies;
   /*
    * fix up the pointers accounting for the radiotap
    * header still being in there.
    */
   skb_set_mac_header(skb, rt_hdr_len);
   skb_set_network_header(skb, rt_hdr_len);
   skb_set_transport_header(skb, rt_hdr_len); 

   /* Pull rtap header out of the skb */
   skb_pull(skb, rt_hdr_len);
  
   /*Supplicant adds: radiotap Hdr + radiotap data + 80211 Header. So after 
    * radio tap header and 802.11 header starts 
    */
   hdr = (struct ieee80211_hdr *)skb->data;
 
   /* Send data frames through the normal Data path. In this path we will 
    * conver rcvd 802.11 packet to 802.3 packet */
   if ( (hdr->frame_control & HDD_FRAME_TYPE_MASK)  == HDD_FRAME_TYPE_DATA)
   { 
      v_U8_t da[6];
      v_U8_t sa[6];

      memcpy (da, hdr->addr1, VOS_MAC_ADDR_SIZE);
      memcpy (sa, hdr->addr2, VOS_MAC_ADDR_SIZE);
 
      /* Pull 802.11 MAC header */ 
      skb_pull(skb, HDD_80211_HEADER_LEN);
 
      if ( HDD_FRAME_SUBTYPE_QOSDATA == 
          (hdr->frame_control & HDD_FRAME_SUBTYPE_MASK))
      {
         skb_pull(skb, HDD_80211_HEADER_QOS_CTL);
      }

      /* Pull LLC header */ 
      skb_pull(skb, HDD_LLC_HDR_LEN);

      /* Create space for Ethernet header */ 
      skb_push(skb, HDD_MAC_HDR_SIZE*2);
      memcpy(&skb->data[0], da, HDD_MAC_HDR_SIZE);
      memcpy(&skb->data[HDD_DEST_ADDR_OFFSET], sa, HDD_MAC_HDR_SIZE);

      /* Only EAPOL Data packets are allowed through monitor interface */ 
      if (vos_be16_to_cpu(
         (*(unsigned short*)&skb->data[HDD_ETHERTYPE_802_1_X_FRAME_OFFSET]) ) 
                                                     != HDD_ETHERTYPE_802_1_X)
      {
         VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
           "%s: Not a Eapol packet. Drop this frame", __func__);
         //If not EAPOL frames, drop them.
         kfree_skb(skb);
         return NETDEV_TX_OK;
      }

      skb->protocol = htons(HDD_ETHERTYPE_802_1_X);
 
      hdd_hostapd_select_queue(pPgBkAdapter->dev, skb);
      return hdd_softap_hard_start_xmit( skb, pPgBkAdapter->dev );
   }
   else
   {
      VOS_STATUS status;
      WLANTL_ACEnumType ac = 0;
      skb_list_node_t *pktNode = NULL;
      v_SIZE_t pktListSize = 0;

      spin_lock(&pAdapter->wmm_tx_queue[ac].lock);
      //If we have already reached the max queue size, disable the TX queue
      if ( pAdapter->wmm_tx_queue[ac].count == pAdapter->wmm_tx_queue[ac].max_size)
      {
         /* We want to process one packet at a time, so lets disable all TX queues
           * and re-enable the queues once we get TX feedback for this packet */
         netif_tx_stop_all_queues(pAdapter->dev);
         pAdapter->isTxSuspended[ac] = VOS_TRUE;
         spin_unlock(&pAdapter->wmm_tx_queue[ac].lock);      
         return NETDEV_TX_BUSY;   
      }
      spin_unlock(&pAdapter->wmm_tx_queue[ac].lock);      

      //Use the skb->cb field to hold the list node information
      pktNode = (skb_list_node_t *)&skb->cb;

      //Stick the OS packet inside this node.
      pktNode->skb = skb;

      INIT_LIST_HEAD(&pktNode->anchor);

      //Insert the OS packet into the appropriate AC queue
      spin_lock(&pAdapter->wmm_tx_queue[ac].lock);
      status = hdd_list_insert_back_size( &pAdapter->wmm_tx_queue[ac],
                                          &pktNode->anchor, &pktListSize );
      spin_unlock(&pAdapter->wmm_tx_queue[ac].lock);

      if ( !VOS_IS_STATUS_SUCCESS( status ) )
      {
         VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
                "%s:Insert Tx queue failed. Pkt dropped", __FUNCTION__);
         kfree_skb(skb);
         return NETDEV_TX_OK;
      }

      if ( pktListSize == 1 )
      {
         /* In this context we cannot acquire any mutex etc. And to transmit 
          * this packet we need to call SME API. So to take care of this we will
          * schedule a workqueue 
          */
         schedule_work(&pPgBkAdapter->monTxWorkQueue);
      }
      return NETDEV_TX_OK;
   }
 
fail:
   VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
           "%s: Packet Rcvd at Monitor interface is not proper,"
           " Dropping the packet",
            __func__);
   kfree_skb(skb);
   return NETDEV_TX_OK;
}