Exemplo n.º 1
0
static int zd_mac_tx(struct zd_mac *mac, struct ieee80211_txb *txb, int pri)
{
	int i, r;
	struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);

	for (i = 0; i < txb->nr_frags; i++) {
		struct sk_buff *skb = txb->fragments[i];

		r = fill_ctrlset(mac, txb, i);
		if (r) {
			ieee->stats.tx_dropped++;
			return r;
		}
		r = zd_usb_tx(&mac->chip.usb, skb->data, skb->len);
		if (r) {
			ieee->stats.tx_dropped++;
			return r;
		}
	}

	/* FIXME: shouldn't this be handled by the upper layers? */
	mac->netdev->trans_start = jiffies;

	ieee80211_txb_free(txb);
	return 0;
}
Exemplo n.º 2
0
/* Incoming skb is converted to a txb which consists of
 * a block of 802.11 fragment packets (stored as skbs) */
int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct ieee80211_device *ieee = netdev_priv(dev);
    struct ieee80211_txb *txb = NULL;
    struct ieee80211_hdr_3addrqos *frag_hdr;
    int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
        rts_required;
    unsigned long flags;
    struct net_device_stats *stats = &ieee->stats;
    int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv;
    int bytes, fc, hdr_len;
    struct sk_buff *skb_frag;
    struct ieee80211_hdr_3addrqos header = {/* Ensure zero initialized */
        .duration_id = 0,
        .seq_ctl = 0,
        .qos_ctl = 0
    };
    u8 dest[ETH_ALEN], src[ETH_ALEN];
    struct ieee80211_crypt_data *crypt;
    int priority = skb->priority;
    int snapped = 0;

    if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
        return NETDEV_TX_BUSY;

    spin_lock_irqsave(&ieee->lock, flags);

    /* If there is no driver handler to take the TXB, dont' bother
     * creating it... */
    if (!ieee->hard_start_xmit) {
        printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
        goto success;
    }

    if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
        printk(KERN_WARNING "%s: skb too small (%d).\n",
               ieee->dev->name, skb->len);
        goto success;
    }

    ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);

    crypt = ieee->crypt[ieee->tx_keyidx];

    encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
              ieee->sec.encrypt;

    host_encrypt = ieee->host_encrypt && encrypt && crypt;
    host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
    host_build_iv = ieee->host_build_iv && encrypt && crypt;

    if (!encrypt && ieee->ieee802_1x &&
            ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
        stats->tx_dropped++;
        goto success;
    }

    /* Save source and destination addresses */
    memcpy(dest, skb->data, ETH_ALEN);
    memcpy(src, skb->data + ETH_ALEN, ETH_ALEN);

    if (host_encrypt || host_build_iv)
        fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
             IEEE80211_FCTL_PROTECTED;
    else
        fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;

    if (ieee->iw_mode == IW_MODE_INFRA) {
        fc |= IEEE80211_FCTL_TODS;
        /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
        memcpy(header.addr1, ieee->bssid, ETH_ALEN);
        memcpy(header.addr2, src, ETH_ALEN);
        memcpy(header.addr3, dest, ETH_ALEN);
    } else if (ieee->iw_mode == IW_MODE_ADHOC) {
        /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
        memcpy(header.addr1, dest, ETH_ALEN);
        memcpy(header.addr2, src, ETH_ALEN);
        memcpy(header.addr3, ieee->bssid, ETH_ALEN);
    }
    hdr_len = IEEE80211_3ADDR_LEN;

    if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
        fc |= IEEE80211_STYPE_QOS_DATA;
        hdr_len += 2;

        skb->priority = ieee80211_classify(skb);
        header.qos_ctl |= skb->priority & IEEE80211_QCTL_TID;
    }
    header.frame_ctl = cpu_to_le16(fc);

    /* Advance the SKB to the start of the payload */
    skb_pull(skb, sizeof(struct ethhdr));

    /* Determine total amount of storage required for TXB packets */
    bytes = skb->len + SNAP_SIZE + sizeof(u16);

    /* Encrypt msdu first on the whole data packet. */
    if ((host_encrypt || host_encrypt_msdu) &&
            crypt && crypt->ops && crypt->ops->encrypt_msdu) {
        int res = 0;
        int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
                  crypt->ops->extra_msdu_postfix_len;
        struct sk_buff *skb_new = dev_alloc_skb(len);

        if (unlikely(!skb_new))
            goto failed;

        skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
        memcpy(skb_put(skb_new, hdr_len), &header, hdr_len);
        snapped = 1;
        ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
                            ether_type);
        memcpy(skb_put(skb_new, skb->len), skb->data, skb->len);
        res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
        if (res < 0) {
            IEEE80211_ERROR("msdu encryption failed\n");
            dev_kfree_skb_any(skb_new);
            goto failed;
        }
        dev_kfree_skb_any(skb);
        skb = skb_new;
        bytes += crypt->ops->extra_msdu_prefix_len +
                 crypt->ops->extra_msdu_postfix_len;
        skb_pull(skb, hdr_len);
    }

    if (host_encrypt || ieee->host_open_frag) {
        /* Determine fragmentation size based on destination (multicast
         * and broadcast are not fragmented) */
        if (is_multicast_ether_addr(dest) ||
                is_broadcast_ether_addr(dest))
            frag_size = MAX_FRAG_THRESHOLD;
        else
            frag_size = ieee->fts;

        /* Determine amount of payload per fragment.  Regardless of if
         * this stack is providing the full 802.11 header, one will
         * eventually be affixed to this fragment -- so we must account
         * for it when determining the amount of payload space. */
        bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
        if (ieee->config &
                (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
            bytes_per_frag -= IEEE80211_FCS_LEN;

        /* Each fragment may need to have room for encryptiong
         * pre/postfix */
        if (host_encrypt)
            bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
                              crypt->ops->extra_mpdu_postfix_len;

        /* Number of fragments is the total
         * bytes_per_frag / payload_per_fragment */
        nr_frags = bytes / bytes_per_frag;
        bytes_last_frag = bytes % bytes_per_frag;
        if (bytes_last_frag)
            nr_frags++;
        else
            bytes_last_frag = bytes_per_frag;
    } else {
        nr_frags = 1;
        bytes_per_frag = bytes_last_frag = bytes;
        frag_size = bytes + IEEE80211_3ADDR_LEN;
    }

    rts_required = (frag_size > ieee->rts
                    && ieee->config & CFG_IEEE80211_RTS);
    if (rts_required)
        nr_frags++;

    /* When we allocate the TXB we allocate enough space for the reserve
     * and full fragment bytes (bytes_per_frag doesn't include prefix,
     * postfix, header, FCS, etc.) */
    txb = ieee80211_alloc_txb(nr_frags, frag_size,
                              ieee->tx_headroom, GFP_ATOMIC);
    if (unlikely(!txb)) {
        printk(KERN_WARNING "%s: Could not allocate TXB\n",
               ieee->dev->name);
        goto failed;
    }
    txb->encrypted = encrypt;
    if (host_encrypt)
        txb->payload_size = frag_size * (nr_frags - 1) +
                            bytes_last_frag;
    else
        txb->payload_size = bytes;

    if (rts_required) {
        skb_frag = txb->fragments[0];
        frag_hdr =
            (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);

        /*
         * Set header frame_ctl to the RTS.
         */
        header.frame_ctl =
            cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
        memcpy(frag_hdr, &header, hdr_len);

        /*
         * Restore header frame_ctl to the original data setting.
         */
        header.frame_ctl = cpu_to_le16(fc);

        if (ieee->config &
                (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
            skb_put(skb_frag, 4);

        txb->rts_included = 1;
        i = 1;
    } else
        i = 0;

    for (; i < nr_frags; i++) {
        skb_frag = txb->fragments[i];

        if (host_encrypt || host_build_iv)
            skb_reserve(skb_frag,
                        crypt->ops->extra_mpdu_prefix_len);

        frag_hdr =
            (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
        memcpy(frag_hdr, &header, hdr_len);

        /* If this is not the last fragment, then add the MOREFRAGS
         * bit to the frame control */
        if (i != nr_frags - 1) {
            frag_hdr->frame_ctl =
                cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
            bytes = bytes_per_frag;
        } else {
            /* The last fragment takes the remaining length */
            bytes = bytes_last_frag;
        }

        if (i == 0 && !snapped) {
            ieee80211_copy_snap(skb_put
                                (skb_frag, SNAP_SIZE + sizeof(u16)),
                                ether_type);
            bytes -= SNAP_SIZE + sizeof(u16);
        }

        memcpy(skb_put(skb_frag, bytes), skb->data, bytes);

        /* Advance the SKB... */
        skb_pull(skb, bytes);

        /* Encryption routine will move the header forward in order
         * to insert the IV between the header and the payload */
        if (host_encrypt)
            ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
        else if (host_build_iv) {
            struct ieee80211_crypt_data *crypt;

            crypt = ieee->crypt[ieee->tx_keyidx];
            atomic_inc(&crypt->refcnt);
            if (crypt->ops->build_iv)
                crypt->ops->build_iv(skb_frag, hdr_len,
                                     ieee->sec.keys[ieee->sec.active_key],
                                     ieee->sec.key_sizes[ieee->sec.active_key],
                                     crypt->priv);
            atomic_dec(&crypt->refcnt);
        }

        if (ieee->config &
                (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
            skb_put(skb_frag, 4);
    }

success:
    spin_unlock_irqrestore(&ieee->lock, flags);

    dev_kfree_skb_any(skb);

    if (txb) {
        int ret = (*ieee->hard_start_xmit) (txb, dev, priority);
        if (ret == 0) {
            stats->tx_packets++;
            stats->tx_bytes += txb->payload_size;
            return 0;
        }

        if (ret == NETDEV_TX_BUSY) {
            printk(KERN_ERR "%s: NETDEV_TX_BUSY returned; "
                   "driver should report queue full via "
                   "ieee_device->is_queue_full.\n",
                   ieee->dev->name);
        }

        ieee80211_txb_free(txb);
    }

    return 0;

failed:
    spin_unlock_irqrestore(&ieee->lock, flags);
    netif_stop_queue(dev);
    stats->tx_errors++;
    return 1;
}

/* Incoming 802.11 strucure is converted to a TXB
 * a block of 802.11 fragment packets (stored as skbs) */
int ieee80211_tx_frame(struct ieee80211_device *ieee,
                       struct ieee80211_hdr *frame, int hdr_len, int total_len,
                       int encrypt_mpdu)
{
    struct ieee80211_txb *txb = NULL;
    unsigned long flags;
    struct net_device_stats *stats = &ieee->stats;
    struct sk_buff *skb_frag;
    int priority = -1;
    int fraglen = total_len;
    int headroom = ieee->tx_headroom;
    struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];

    spin_lock_irqsave(&ieee->lock, flags);

    if (encrypt_mpdu && (!ieee->sec.encrypt || !crypt))
        encrypt_mpdu = 0;

    /* If there is no driver handler to take the TXB, dont' bother
     * creating it... */
    if (!ieee->hard_start_xmit) {
        printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
        goto success;
    }

    if (unlikely(total_len < 24)) {
        printk(KERN_WARNING "%s: skb too small (%d).\n",
               ieee->dev->name, total_len);
        goto success;
    }

    if (encrypt_mpdu) {
        frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
        fraglen += crypt->ops->extra_mpdu_prefix_len +
                   crypt->ops->extra_mpdu_postfix_len;
        headroom += crypt->ops->extra_mpdu_prefix_len;
    }

    /* When we allocate the TXB we allocate enough space for the reserve
     * and full fragment bytes (bytes_per_frag doesn't include prefix,
     * postfix, header, FCS, etc.) */
    txb = ieee80211_alloc_txb(1, fraglen, headroom, GFP_ATOMIC);
    if (unlikely(!txb)) {
        printk(KERN_WARNING "%s: Could not allocate TXB\n",
               ieee->dev->name);
        goto failed;
    }
    txb->encrypted = 0;
    txb->payload_size = fraglen;

    skb_frag = txb->fragments[0];

    memcpy(skb_put(skb_frag, total_len), frame, total_len);

    if (ieee->config &
            (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
        skb_put(skb_frag, 4);

    /* To avoid overcomplicating things, we do the corner-case frame
     * encryption in software. The only real situation where encryption is
     * needed here is during software-based shared key authentication. */
    if (encrypt_mpdu)
        ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);

success:
    spin_unlock_irqrestore(&ieee->lock, flags);

    if (txb) {
        if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) {
            stats->tx_packets++;
            stats->tx_bytes += txb->payload_size;
            return 0;
        }
        ieee80211_txb_free(txb);
    }
    return 0;

failed:
    spin_unlock_irqrestore(&ieee->lock, flags);
    stats->tx_errors++;
    return 1;
}

EXPORT_SYMBOL(ieee80211_tx_frame);
EXPORT_SYMBOL(ieee80211_txb_free);
Exemplo n.º 3
0
int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ieee80211_device *ieee = netdev_priv(dev);
	struct ieee80211_txb *txb = NULL;
	struct ieee80211_hdr_3addrqos *frag_hdr;
	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
	unsigned long flags;
	struct net_device_stats *stats = &ieee->stats;
	int ether_type = 0, encrypt;
	int bytes, fc, qos_ctl = 0, hdr_len;
	struct sk_buff *skb_frag;
	struct ieee80211_hdr_3addrqos header = { /* Ensure zero initialized */
		.duration_id = 0,
		.seq_ctl = 0,
		.qos_ctl = 0
	};
	u8 dest[ETH_ALEN], src[ETH_ALEN];
	int qos_actived = ieee->current_network.qos_data.active;

	struct ieee80211_crypt_data* crypt;

	cb_desc *tcb_desc;

	spin_lock_irqsave(&ieee->lock, flags);

	/* If there is no driver handler to take the TXB, dont' bother
	 * creating it... */
	if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
	   ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
//		printk(KERN_WARNING "%s: No xmit handler.\n",
;
		goto success;
	}


	if(likely(ieee->raw_tx == 0)){
		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
//			printk(KERN_WARNING "%s: skb too small (%d).\n",
;
			goto success;
		}

		memset(skb->cb, 0, sizeof(skb->cb));
		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);

		crypt = ieee->crypt[ieee->tx_keyidx];

		encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
			ieee->host_encrypt && crypt && crypt->ops;

		if (!encrypt && ieee->ieee802_1x &&
		ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
			stats->tx_dropped++;
			goto success;
		}
	#ifdef CONFIG_IEEE80211_DEBUG
		if (crypt && !encrypt && ether_type == ETH_P_PAE) {
			struct eapol *eap = (struct eapol *)(skb->data +
				sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
			IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
				eap_get_type(eap->type));
		}
	#endif

		/* Save source and destination addresses */
		memcpy(&dest, skb->data, ETH_ALEN);
		memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);

                /* Advance the SKB to the start of the payload */
                skb_pull(skb, sizeof(struct ethhdr));

                /* Determine total amount of storage required for TXB packets */
                bytes = skb->len + SNAP_SIZE + sizeof(u16);

		if (encrypt)
			fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
		else

                        fc = IEEE80211_FTYPE_DATA;

		//if(ieee->current_network.QoS_Enable)
		if(qos_actived)
			fc |= IEEE80211_STYPE_QOS_DATA;
		else
			fc |= IEEE80211_STYPE_DATA;

		if (ieee->iw_mode == IW_MODE_INFRA) {
			fc |= IEEE80211_FCTL_TODS;
			/* To DS: Addr1 = BSSID, Addr2 = SA,
			Addr3 = DA */
			memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
			memcpy(&header.addr2, &src, ETH_ALEN);
			memcpy(&header.addr3, &dest, ETH_ALEN);
		} else if (ieee->iw_mode == IW_MODE_ADHOC) {
			/* not From/To DS: Addr1 = DA, Addr2 = SA,
			Addr3 = BSSID */
			memcpy(&header.addr1, dest, ETH_ALEN);
			memcpy(&header.addr2, src, ETH_ALEN);
			memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
		}

                header.frame_ctl = cpu_to_le16(fc);

		/* Determine fragmentation size based on destination (multicast
		* and broadcast are not fragmented) */
		if (is_multicast_ether_addr(header.addr1) ||
		is_broadcast_ether_addr(header.addr1)) {
			frag_size = MAX_FRAG_THRESHOLD;
			qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
		}
		else {
			frag_size = ieee->fts;//default:392
			qos_ctl = 0;
		}

		//if (ieee->current_network.QoS_Enable)
		if(qos_actived)
		{
			hdr_len = IEEE80211_3ADDR_LEN + 2;

			skb->priority = ieee80211_classify(skb, &ieee->current_network);
			qos_ctl |= skb->priority; //set in the ieee80211_classify
			header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID);
		} else {
			hdr_len = IEEE80211_3ADDR_LEN;
		}
		/* Determine amount of payload per fragment.  Regardless of if
		* this stack is providing the full 802.11 header, one will
		* eventually be affixed to this fragment -- so we must account for
		* it when determining the amount of payload space. */
		bytes_per_frag = frag_size - hdr_len;
		if (ieee->config &
		(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
			bytes_per_frag -= IEEE80211_FCS_LEN;

		/* Each fragment may need to have room for encryptiong pre/postfix */
		if (encrypt)
			bytes_per_frag -= crypt->ops->extra_prefix_len +
				crypt->ops->extra_postfix_len;

		/* Number of fragments is the total bytes_per_frag /
		* payload_per_fragment */
		nr_frags = bytes / bytes_per_frag;
		bytes_last_frag = bytes % bytes_per_frag;
		if (bytes_last_frag)
			nr_frags++;
		else
			bytes_last_frag = bytes_per_frag;

		/* When we allocate the TXB we allocate enough space for the reserve
		* and full fragment bytes (bytes_per_frag doesn't include prefix,
		* postfix, header, FCS, etc.) */
		txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
		if (unlikely(!txb)) {
//			printk(KERN_WARNING "%s: Could not allocate TXB\n",
;
			goto failed;
		}
		txb->encrypted = encrypt;
		txb->payload_size = bytes;

		//if (ieee->current_network.QoS_Enable)
		if(qos_actived)
		{
			txb->queue_index = UP2AC(skb->priority);
		} else {
			txb->queue_index = WME_AC_BK;
		}



		for (i = 0; i < nr_frags; i++) {
			skb_frag = txb->fragments[i];
			tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
			if(qos_actived){
				skb_frag->priority = skb->priority;//UP2AC(skb->priority);
				tcb_desc->queue_index =  UP2AC(skb->priority);
			} else {
				skb_frag->priority = WME_AC_BK;
				tcb_desc->queue_index = WME_AC_BK;
			}
			skb_reserve(skb_frag, ieee->tx_headroom);

			if (encrypt){
				if (ieee->hwsec_active)
					tcb_desc->bHwSec = 1;
				else
					tcb_desc->bHwSec = 0;
				skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
			}
			else
			{
				tcb_desc->bHwSec = 0;
			}
			frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
			memcpy(frag_hdr, &header, hdr_len);

			/* If this is not the last fragment, then add the MOREFRAGS
			* bit to the frame control */
			if (i != nr_frags - 1) {
				frag_hdr->frame_ctl = cpu_to_le16(
					fc | IEEE80211_FCTL_MOREFRAGS);
				bytes = bytes_per_frag;

			} else {
				/* The last fragment takes the remaining length */
				bytes = bytes_last_frag;
			}
			//if(ieee->current_network.QoS_Enable)
			if(qos_actived)
			{
				// add 1 only indicate to corresponding seq number control 2006/7/12
				frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
			} else {
				frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
			}

			/* Put a SNAP header on the first fragment */
			if (i == 0) {
				ieee80211_put_snap(
					skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
					ether_type);
				bytes -= SNAP_SIZE + sizeof(u16);
			}

			memcpy(skb_put(skb_frag, bytes), skb->data, bytes);

			/* Advance the SKB... */
			skb_pull(skb, bytes);

			/* Encryption routine will move the header forward in order
			* to insert the IV between the header and the payload */
			if (encrypt)
				ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
			if (ieee->config &
			(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
				skb_put(skb_frag, 4);
		}

		if(qos_actived)
		{
		  if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
			ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
		  else
			ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
		} else {
  		  if (ieee->seq_ctrl[0] == 0xFFF)
			ieee->seq_ctrl[0] = 0;
		  else
			ieee->seq_ctrl[0]++;
		}
	}else{
		if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
//			printk(KERN_WARNING "%s: skb too small (%d).\n",
;
			goto success;
		}

		txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
		if(!txb){
//			printk(KERN_WARNING "%s: Could not allocate TXB\n",
;
			goto failed;
		}

		txb->encrypted = 0;
		txb->payload_size = skb->len;
		memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
	}

 success:
//WB add to fill data tcb_desc here. only first fragment is considered, need to change, and you may remove to other place.
	if (txb)
	{
		cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
		tcb_desc->bTxEnableFwCalcDur = 1;
		if (is_multicast_ether_addr(header.addr1))
			tcb_desc->bMulticast = 1;
		if (is_broadcast_ether_addr(header.addr1))
			tcb_desc->bBroadcast = 1;
		ieee80211_txrate_selectmode(ieee, tcb_desc);
		if ( tcb_desc->bMulticast ||  tcb_desc->bBroadcast)
			tcb_desc->data_rate = ieee->basic_rate;
		else
			//tcb_desc->data_rate = CURRENT_RATE(ieee->current_network.mode, ieee->rate, ieee->HTCurrentOperaRate);
			tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
		ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
		ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
		ieee80211_query_HTCapShortGI(ieee, tcb_desc);
		ieee80211_query_BandwidthMode(ieee, tcb_desc);
		ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
		ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);
//		IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, txb->fragments[0]->data, txb->fragments[0]->len);
		//IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, tcb_desc, sizeof(cb_desc));
	}
	spin_unlock_irqrestore(&ieee->lock, flags);
	dev_kfree_skb_any(skb);
	if (txb) {
		if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
			ieee80211_softmac_xmit(txb, ieee);
		}else{
			if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
				stats->tx_packets++;
				stats->tx_bytes += txb->payload_size;
				return 0;
			}
			ieee80211_txb_free(txb);
		}
	}

	return 0;

 failed:
	spin_unlock_irqrestore(&ieee->lock, flags);
	netif_stop_queue(dev);
	stats->tx_errors++;
	return 1;

}

EXPORT_SYMBOL(ieee80211_txb_free);
/* SKBs are added to the ieee->tx_queue. */
int ieee80211_xmit(struct sk_buff *skb,
		   struct net_device *dev)
{
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
	struct ieee80211_device *ieee = netdev_priv(dev);
#else
	struct ieee80211_device *ieee = (struct ieee80211_device *)dev->priv;
#endif
	struct ieee80211_txb *txb = NULL;
	struct ieee80211_hdr *frag_hdr;
	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
	unsigned long flags;
	struct net_device_stats *stats = &ieee->stats;
	int ether_type, encrypt;
	int bytes, fc, hdr_len;
	struct sk_buff *skb_frag;
	struct ieee80211_hdr header = { /* Ensure zero initialized */
		.duration_id = 0,
		.seq_ctl = 0
	};
	u8 dest[ETH_ALEN], src[ETH_ALEN];
	int pend;


	struct ieee80211_crypt_data* crypt;

	spin_lock_irqsave(&ieee->lock, flags);

	/* If there is no driver handler to take the TXB, dont' bother
	 * creating it... */
	if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
	   ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
		printk(KERN_WARNING "%s: No xmit handler.\n",
			   ieee->dev->name);
		goto success;
	}

	if(likely(ieee->raw_tx == 0)){

		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}


		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);

				crypt = ieee->crypt[ieee->tx_keyidx];

		encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
			ieee->host_encrypt && crypt && crypt->ops;

		if (!encrypt && ieee->ieee802_1x &&
		ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
			stats->tx_dropped++;
			goto success;
		}

	#ifdef CONFIG_IEEE80211_DEBUG
		if (crypt && !encrypt && ether_type == ETH_P_PAE) {
			struct eapol *eap = (struct eapol *)(skb->data +
				sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
			IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
				eap_get_type(eap->type));
		}
	#endif

		/* Save source and destination addresses */
		memcpy(&dest, skb->data, ETH_ALEN);
		memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);



				/* Advance the SKB to the start of the payload */
		skb_pull(skb, sizeof(struct ethhdr));

		/* Determine total amount of storage required for TXB packets */
		bytes = skb->len + SNAP_SIZE + sizeof(u16);

		if (encrypt)
			fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
				IEEE80211_FCTL_WEP;
		else
			fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;

		if (ieee->iw_mode == IW_MODE_INFRA) {
			fc |= IEEE80211_FCTL_TODS;
			/* To DS: Addr1 = BSSID, Addr2 = SA,
			Addr3 = DA */
			memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
			memcpy(&header.addr2, &src, ETH_ALEN);
			memcpy(&header.addr3, &dest, ETH_ALEN);
		} else if (ieee->iw_mode == IW_MODE_ADHOC) {
			/* not From/To DS: Addr1 = DA, Addr2 = SA,
			Addr3 = BSSID */
			memcpy(&header.addr1, dest, ETH_ALEN);
			memcpy(&header.addr2, src, ETH_ALEN);
			memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
		}
		header.frame_ctl = cpu_to_le16(fc);

		hdr_len = IEEE80211_3ADDR_LEN;

	/* Determine fragmentation size based on destination (multicast
		* and broadcast are not fragmented) */
		if (is_multicast_ether_addr(dest) ||
		is_broadcast_ether_addr(dest))
			frag_size = MAX_FRAG_THRESHOLD;
		else
			frag_size = ieee->fts;

		/* Determine amount of payload per fragment.  Regardless of if
		* this stack is providing the full 802.11 header, one will
		* eventually be affixed to this fragment -- so we must account for
		* it when determining the amount of payload space. */
		bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
		if (ieee->config &
		(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
			bytes_per_frag -= IEEE80211_FCS_LEN;

		/* Each fragment may need to have room for encryptiong pre/postfix */
		if (encrypt)
			bytes_per_frag -= crypt->ops->extra_prefix_len +
					crypt->ops->extra_postfix_len;

		/* Number of fragments is the total bytes_per_frag /
		* payload_per_fragment */
		nr_frags = bytes / bytes_per_frag;
		bytes_last_frag = bytes % bytes_per_frag;
		if (bytes_last_frag)
			nr_frags++;
		else
			bytes_last_frag = bytes_per_frag;

		/* When we allocate the TXB we allocate enough space for the reserve
		* and full fragment bytes (bytes_per_frag doesn't include prefix,
		* postfix, header, FCS, etc.) */

		pend = atomic_read(&ieee->tx_pending_txb);
		if(pend > (MAX_TX_SKB - 1)) {
			goto failed;
		} else {
			atomic_inc(&ieee->tx_pending_txb);
		}
		txb = ieee->alloc_txb[ieee->tx_skb_index];
		ieee->tx_skb_index = (ieee->tx_skb_index + 1) % MAX_TX_SKB;

		if (unlikely(!txb)) {
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}

		for(i = 0; i < txb->nr_frags; i++) {
			txb->fragments[i]->len = 0;
			txb->fragments[i]->data = txb->fragments[i]->head + 16;
			txb->fragments[i]->tail = txb->fragments[i]->data;
		}

#if 1
		txb->nr_frags = nr_frags;
		txb->frag_size = frag_size;
#endif
		txb->encrypted = encrypt;
		txb->payload_size = bytes;

		for (i = 0; i < nr_frags; i++) {
			skb_frag = txb->fragments[i];

			if (encrypt)
				skb_reserve(skb_frag, crypt->ops->extra_prefix_len);

			frag_hdr = (struct ieee80211_hdr *)skb_put(skb_frag, hdr_len);
			memcpy(frag_hdr, &header, hdr_len);

			/* If this is not the last fragment, then add the MOREFRAGS
			* bit to the frame control */
			if (i != nr_frags - 1) {
				frag_hdr->frame_ctl = cpu_to_le16(
					fc | IEEE80211_FCTL_MOREFRAGS);
				bytes = bytes_per_frag;

			} else {
				/* The last fragment takes the remaining length */
				bytes = bytes_last_frag;
			}

			frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl<<4 | i);


			/* Put a SNAP header on the first fragment */
			if (i == 0) {
				ieee80211_put_snap(
					skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
					ether_type);
				bytes -= SNAP_SIZE + sizeof(u16);
			}

			memcpy(skb_put(skb_frag, bytes), skb->data, bytes);

			/* Advance the SKB... */
			skb_pull(skb, bytes);

			/* Encryption routine will move the header forward in order
			* to insert the IV between the header and the payload */
			if (encrypt)
				ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
			if (ieee->config &
			(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
				skb_put(skb_frag, 4);
		}
		if (ieee->seq_ctrl == 0xFFF)
			ieee->seq_ctrl = 0;
		else
			ieee->seq_ctrl++;
	}else{
		if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}

		txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
		if(!txb){
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}

		txb->encrypted = 0;
		txb->payload_size = skb->len;
		memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
	}

 success:
	spin_unlock_irqrestore(&ieee->lock, flags);
	dev_kfree_skb_any(skb);
	if (txb) {
		if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
			ieee80211_softmac_xmit(txb, ieee);
		}else{
			if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
				stats->tx_packets++;
				stats->tx_bytes += txb->payload_size;
				return 0;
			}
			ieee80211_txb_free(txb, txb->nr_frags);
		}
	}
	return 0;

 failed:
	spin_unlock_irqrestore(&ieee->lock, flags);
	netif_stop_queue(dev);
	stats->tx_errors++;
	return 1;

}

#ifndef BUILT_IN_IEEE80211
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
EXPORT_SYMBOL(ieee80211_txb_free);
EXPORT_SYMBOL(ieee80211_alloc_txb);
#else
EXPORT_SYMBOL_NOVERS(ieee80211_txb_free);
EXPORT_SYMBOL_NOVERS(ieee80211_alloc_txb);
Exemplo n.º 5
0
int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
	struct ieee80211_device *ieee = netdev_priv(dev);
#else
	struct ieee80211_device *ieee = (struct ieee80211_device *)dev->priv;
#endif
	struct ieee80211_txb *txb = NULL;
	struct ieee80211_hdr_3addrqos *frag_hdr;
	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
	unsigned long flags;
	struct net_device_stats *stats = &ieee->stats;
	int ether_type = 0, encrypt;
	int bytes, fc, qos_ctl = 0, hdr_len;
	struct sk_buff *skb_frag;
	struct ieee80211_hdr_3addrqos header = { 
		.duration_id = 0,
		.seq_ctl = 0,
		.qos_ctl = 0
	};
	u8 dest[ETH_ALEN], src[ETH_ALEN];
	int qos_actived = ieee->current_network.qos_data.active;

	struct ieee80211_crypt_data* crypt;

	cb_desc *tcb_desc;

	spin_lock_irqsave(&ieee->lock, flags);

	
	if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
	   ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
		printk(KERN_WARNING "%s: No xmit handler.\n",
		       ieee->dev->name);
		goto success;
	}


	if(likely(ieee->raw_tx == 0)){
		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}

		memset(skb->cb, 0, sizeof(skb->cb));
		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);

		crypt = ieee->crypt[ieee->tx_keyidx];

		encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
			ieee->host_encrypt && crypt && crypt->ops;

		if (!encrypt && ieee->ieee802_1x &&
		ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
			stats->tx_dropped++;
			goto success;
		}
	#ifdef CONFIG_IEEE80211_DEBUG
		if (crypt && !encrypt && ether_type == ETH_P_PAE) {
			struct eapol *eap = (struct eapol *)(skb->data +
				sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
			IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
				eap_get_type(eap->type));
		}
	#endif

		
		memcpy(&dest, skb->data, ETH_ALEN);
		memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);

                
                skb_pull(skb, sizeof(struct ethhdr));

                
                bytes = skb->len + SNAP_SIZE + sizeof(u16);

		if (encrypt)
			fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
		else

                        fc = IEEE80211_FTYPE_DATA;

		
		if(qos_actived)
			fc |= IEEE80211_STYPE_QOS_DATA;
		else
			fc |= IEEE80211_STYPE_DATA;

		if (ieee->iw_mode == IW_MODE_INFRA) {
			fc |= IEEE80211_FCTL_TODS;
			
			memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
			memcpy(&header.addr2, &src, ETH_ALEN);
			memcpy(&header.addr3, &dest, ETH_ALEN);
		} else if (ieee->iw_mode == IW_MODE_ADHOC) {
			
			memcpy(&header.addr1, dest, ETH_ALEN);
			memcpy(&header.addr2, src, ETH_ALEN);
			memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
		}

                header.frame_ctl = cpu_to_le16(fc);

		
		if (is_multicast_ether_addr(header.addr1) ||
		is_broadcast_ether_addr(header.addr1)) {
			frag_size = MAX_FRAG_THRESHOLD;
			qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
		}
		else {
			frag_size = ieee->fts;
			qos_ctl = 0;
		}

		
		if(qos_actived)
		{
			hdr_len = IEEE80211_3ADDR_LEN + 2;

			skb->priority = ieee80211_classify(skb, &ieee->current_network);
			qos_ctl |= skb->priority; 
			header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID);
		} else {
			hdr_len = IEEE80211_3ADDR_LEN;
		}
		
		bytes_per_frag = frag_size - hdr_len;
		if (ieee->config &
		(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
			bytes_per_frag -= IEEE80211_FCS_LEN;

		
		if (encrypt)
			bytes_per_frag -= crypt->ops->extra_prefix_len +
				crypt->ops->extra_postfix_len;

		
		nr_frags = bytes / bytes_per_frag;
		bytes_last_frag = bytes % bytes_per_frag;
		if (bytes_last_frag)
			nr_frags++;
		else
			bytes_last_frag = bytes_per_frag;

		
		txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
		if (unlikely(!txb)) {
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}
		txb->encrypted = encrypt;
		txb->payload_size = bytes;

		
		if(qos_actived)
		{
			txb->queue_index = UP2AC(skb->priority);
		} else {
			txb->queue_index = WME_AC_BK;;
		}



		for (i = 0; i < nr_frags; i++) {
			skb_frag = txb->fragments[i];
			tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
			if(qos_actived){
				skb_frag->priority = skb->priority;
				tcb_desc->queue_index =  UP2AC(skb->priority);
			} else {
				skb_frag->priority = WME_AC_BK;
				tcb_desc->queue_index = WME_AC_BK;
			}
			skb_reserve(skb_frag, ieee->tx_headroom);

			if (encrypt){
				if (ieee->hwsec_active)
					tcb_desc->bHwSec = 1;
				else
					tcb_desc->bHwSec = 0;
				skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
			}
			else
			{
				tcb_desc->bHwSec = 0;
			}
			frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
			memcpy(frag_hdr, &header, hdr_len);

			
			if (i != nr_frags - 1) {
				frag_hdr->frame_ctl = cpu_to_le16(
					fc | IEEE80211_FCTL_MOREFRAGS);
				bytes = bytes_per_frag;

			} else {
				
				bytes = bytes_last_frag;
			}
			
			if(qos_actived)
			{
				
				frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
			} else {
				frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
			}

			
			if (i == 0) {
				ieee80211_put_snap(
					skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
					ether_type);
				bytes -= SNAP_SIZE + sizeof(u16);
			}

			memcpy(skb_put(skb_frag, bytes), skb->data, bytes);

			
			skb_pull(skb, bytes);

			
			if (encrypt)
				ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
			if (ieee->config &
			(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
				skb_put(skb_frag, 4);
		}

		if(qos_actived)
		{
		  if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
			ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
		  else
			ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
		} else {
  		  if (ieee->seq_ctrl[0] == 0xFFF)
			ieee->seq_ctrl[0] = 0;
		  else
			ieee->seq_ctrl[0]++;
		}
	}else{
		if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}

		txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
		if(!txb){
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}

		txb->encrypted = 0;
		txb->payload_size = skb->len;
		memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
	}

 success:

	if (txb)
	{
#if 1
		cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
		tcb_desc->bTxEnableFwCalcDur = 1;
		if (is_multicast_ether_addr(header.addr1))
			tcb_desc->bMulticast = 1;
		if (is_broadcast_ether_addr(header.addr1))
			tcb_desc->bBroadcast = 1;
		ieee80211_txrate_selectmode(ieee, tcb_desc);
		if ( tcb_desc->bMulticast ||  tcb_desc->bBroadcast)
			tcb_desc->data_rate = ieee->basic_rate;
		else
			
			tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
		ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
		ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
		ieee80211_query_HTCapShortGI(ieee, tcb_desc);
		ieee80211_query_BandwidthMode(ieee, tcb_desc);
		ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
		ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);

		
#endif
	}
	spin_unlock_irqrestore(&ieee->lock, flags);
	dev_kfree_skb_any(skb);
	if (txb) {
		if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
			ieee80211_softmac_xmit(txb, ieee);
		}else{
			if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
				stats->tx_packets++;
				stats->tx_bytes += txb->payload_size;
				return 0;
			}
			ieee80211_txb_free(txb);
		}
	}

	return 0;

 failed:
	spin_unlock_irqrestore(&ieee->lock, flags);
	netif_stop_queue(dev);
	stats->tx_errors++;
	return 1;

}
Exemplo n.º 6
0
int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ieee80211_device *ieee = netdev_priv(dev);
	struct ieee80211_txb *txb = NULL;
	struct ieee80211_hdr_3addrqos *frag_hdr;
	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
	unsigned long flags;
	struct net_device_stats *stats = &ieee->stats;
	int ether_type = 0, encrypt;
	int bytes, fc, qos_ctl = 0, hdr_len;
	struct sk_buff *skb_frag;
	struct ieee80211_hdr_3addrqos header = { /* Ensure zero initialized */
		.duration_id = 0,
		.seq_ctl = 0,
		.qos_ctl = 0
	};
	u8 dest[ETH_ALEN], src[ETH_ALEN];
	int qos_actived = ieee->current_network.qos_data.active;

	struct ieee80211_crypt_data* crypt;
	bool    bdhcp =false;

	cb_desc *tcb_desc;

	spin_lock_irqsave(&ieee->lock, flags);

	/* If there is no driver handler to take the TXB, dont' bother
	 * creating it... */
	if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
	   ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
		printk(KERN_WARNING "%s: No xmit handler.\n",
		       ieee->dev->name);
		goto success;
	}


	if(likely(ieee->raw_tx == 0)){
		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}

		memset(skb->cb, 0, sizeof(skb->cb));
		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);

		crypt = ieee->crypt[ieee->tx_keyidx];

		encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
			ieee->host_encrypt && crypt && crypt->ops;

		if (!encrypt && ieee->ieee802_1x &&
		ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
			stats->tx_dropped++;
			goto success;
		}
	#ifdef CONFIG_IEEE80211_DEBUG
		if (crypt && !encrypt && ether_type == ETH_P_PAE) {
			struct eapol *eap = (struct eapol *)(skb->data +
				sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
			IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
				eap_get_type(eap->type));
		}
	#endif

		// The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time
		// to prevent DHCP protocol fail
		if (skb->len > 282){//MINIMUM_DHCP_PACKET_SIZE) {
			if (ETH_P_IP == ether_type) {// IP header
				const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
				if (IPPROTO_UDP == ip->protocol) {//FIXME windows is 11 but here UDP in linux kernel is 17.
					struct udphdr *udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
					//if(((ntohs(udp->source) == 68) && (ntohs(udp->dest) == 67)) ||
					///   ((ntohs(udp->source) == 67) && (ntohs(udp->dest) == 68))) {
					if(((((u8 *)udp)[1] == 68) && (((u8 *)udp)[3] == 67)) ||
							((((u8 *)udp)[1] == 67) && (((u8 *)udp)[3] == 68))) {
						// 68 : UDP BOOTP client
						// 67 : UDP BOOTP server
						printk("DHCP pkt src port:%d, dest port:%d!!\n", ((u8 *)udp)[1],((u8 *)udp)[3]);
						// Use low rate to send DHCP packet.
						//if(pMgntInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom)
						//{
						//      tcb_desc->DataRate = MgntQuery_TxRateExcludeCCKRates(ieee);//0xc;//ofdm 6m
						//      tcb_desc->bTxDisableRateFallBack = false;
						//}
						//else
						//pTcb->DataRate = Adapter->MgntInfo.LowestBasicRate;
						//RTPRINT(FDM, WA_IOT, ("DHCP TranslateHeader(), pTcb->DataRate = 0x%x\n", pTcb->DataRate));

						bdhcp = true;
#ifdef _RTL8192_EXT_PATCH_
						ieee->LPSDelayCnt = 100;//pPSC->LPSAwakeIntvl*2; //AMY,090701
#else
						ieee->LPSDelayCnt = 100;//pPSC->LPSAwakeIntvl*2;
#endif
					}
				}
				}else if(ETH_P_ARP == ether_type){// IP ARP packet
					printk("=================>DHCP Protocol start tx ARP pkt!!\n");
					bdhcp = true;
					ieee->LPSDelayCnt = ieee->current_network.tim.tim_count;

					//if(pMgntInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom)
					//{
					//      tcb_desc->DataRate = MgntQuery_TxRateExcludeCCKRates(Adapter->MgntInfo.mBrates);//0xc;//ofdm 6m
					//      tcb_desc->bTxDisableRateFallBack = FALSE;
					//}
					//else
					//      tcb_desc->DataRate = Adapter->MgntInfo.LowestBasicRate;
					//RTPRINT(FDM, WA_IOT, ("ARP TranslateHeader(), pTcb->DataRate = 0x%x\n", pTcb->DataRate));

				}
			}

		/* Save source and destination addresses */
		memcpy(&dest, skb->data, ETH_ALEN);
		memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);

                /* Advance the SKB to the start of the payload */
                skb_pull(skb, sizeof(struct ethhdr));

                /* Determine total amount of storage required for TXB packets */
                bytes = skb->len + SNAP_SIZE + sizeof(u16);

		if (encrypt)
			fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
		else

                        fc = IEEE80211_FTYPE_DATA;

		//if(ieee->current_network.QoS_Enable)
		if(qos_actived)
			fc |= IEEE80211_STYPE_QOS_DATA;
		else
			fc |= IEEE80211_STYPE_DATA;

		if (ieee->iw_mode == IW_MODE_INFRA) {
			fc |= IEEE80211_FCTL_TODS;
			/* To DS: Addr1 = BSSID, Addr2 = SA,
			Addr3 = DA */
			memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
			memcpy(&header.addr2, &src, ETH_ALEN);
			memcpy(&header.addr3, &dest, ETH_ALEN);
		} else if (ieee->iw_mode == IW_MODE_ADHOC) {
			/* not From/To DS: Addr1 = DA, Addr2 = SA,
			Addr3 = BSSID */
			memcpy(&header.addr1, dest, ETH_ALEN);
			memcpy(&header.addr2, src, ETH_ALEN);
			memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
		}

                header.frame_ctl = cpu_to_le16(fc);

		/* Determine fragmentation size based on destination (multicast
		* and broadcast are not fragmented) */
		if (is_multicast_ether_addr(header.addr1) ||
		is_broadcast_ether_addr(header.addr1)) {
			frag_size = MAX_FRAG_THRESHOLD;
			qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
		}
		else {
			frag_size = ieee->fts;//default:392
			qos_ctl = 0;
		}

		//if (ieee->current_network.QoS_Enable)
		if(qos_actived)
		{
			hdr_len = IEEE80211_3ADDR_LEN + 2;

			skb->priority = ieee80211_classify(skb, &ieee->current_network);
			qos_ctl |= skb->priority; //set in the ieee80211_classify
			header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID);
		} else {
			hdr_len = IEEE80211_3ADDR_LEN;
		}
		/* Determine amount of payload per fragment.  Regardless of if
		* this stack is providing the full 802.11 header, one will
		* eventually be affixed to this fragment -- so we must account for
		* it when determining the amount of payload space. */
		bytes_per_frag = frag_size - hdr_len;
		if (ieee->config &
		(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
			bytes_per_frag -= IEEE80211_FCS_LEN;

		/* Each fragment may need to have room for encryptiong pre/postfix */
		if (encrypt)
			bytes_per_frag -= crypt->ops->extra_prefix_len +
				crypt->ops->extra_postfix_len;

		/* Number of fragments is the total bytes_per_frag /
		* payload_per_fragment */
		nr_frags = bytes / bytes_per_frag;
		bytes_last_frag = bytes % bytes_per_frag;
		if (bytes_last_frag)
			nr_frags++;
		else
			bytes_last_frag = bytes_per_frag;

		/* When we allocate the TXB we allocate enough space for the reserve
		* and full fragment bytes (bytes_per_frag doesn't include prefix,
		* postfix, header, FCS, etc.) */
		txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
		if (unlikely(!txb)) {
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}
		txb->encrypted = encrypt;
		txb->payload_size = bytes;

		//if (ieee->current_network.QoS_Enable)
		if(qos_actived)
		{
			txb->queue_index = UP2AC(skb->priority);
		} else {
			txb->queue_index = WME_AC_BK;
		}



		for (i = 0; i < nr_frags; i++) {
			skb_frag = txb->fragments[i];
			tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
			if(qos_actived){
				skb_frag->priority = skb->priority;//UP2AC(skb->priority);
				tcb_desc->queue_index =  UP2AC(skb->priority);
			} else {
				skb_frag->priority = WME_AC_BK;
				tcb_desc->queue_index = WME_AC_BK;
			}
			skb_reserve(skb_frag, ieee->tx_headroom);

			if (encrypt){
				if (ieee->hwsec_active)
					tcb_desc->bHwSec = 1;
				else
					tcb_desc->bHwSec = 0;
				skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
			}
			else
			{
				tcb_desc->bHwSec = 0;
			}
			frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
			memcpy(frag_hdr, &header, hdr_len);

			/* If this is not the last fragment, then add the MOREFRAGS
			* bit to the frame control */
			if (i != nr_frags - 1) {
				frag_hdr->frame_ctl = cpu_to_le16(
					fc | IEEE80211_FCTL_MOREFRAGS);
				bytes = bytes_per_frag;

			} else {
				/* The last fragment takes the remaining length */
				bytes = bytes_last_frag;
			}
			//if(ieee->current_network.QoS_Enable)
			if(qos_actived)
			{
				// add 1 only indicate to corresponding seq number control 2006/7/12
				frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
			} else {
				frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
			}

			/* Put a SNAP header on the first fragment */
			if (i == 0) {
				ieee80211_put_snap(
					skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
					ether_type);
				bytes -= SNAP_SIZE + sizeof(u16);
			}

			memcpy(skb_put(skb_frag, bytes), skb->data, bytes);

			/* Advance the SKB... */
			skb_pull(skb, bytes);

			/* Encryption routine will move the header forward in order
			* to insert the IV between the header and the payload */
			if (encrypt)
				ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
			if (ieee->config &
			(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
				skb_put(skb_frag, 4);
		}

		if(qos_actived)
		{
		  if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
			ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
		  else
			ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
		} else {
  		  if (ieee->seq_ctrl[0] == 0xFFF)
			ieee->seq_ctrl[0] = 0;
		  else
			ieee->seq_ctrl[0]++;
		}
	}else{
		if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}

		txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
		if(!txb){
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}

		txb->encrypted = 0;
		txb->payload_size = skb->len;
		memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
	}

 success:
//WB add to fill data tcb_desc here. only first fragment is considered, need to change, and you may remove to other place.
	if (txb)
	{
#if 1
		cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
		tcb_desc->bTxEnableFwCalcDur = 1;
		if (is_multicast_ether_addr(header.addr1))
			tcb_desc->bMulticast = 1;
		if (is_broadcast_ether_addr(header.addr1))
			tcb_desc->bBroadcast = 1;
		ieee80211_txrate_selectmode(ieee, tcb_desc);
		if ( tcb_desc->bMulticast ||  tcb_desc->bBroadcast)
			tcb_desc->data_rate = ieee->basic_rate;
		else
			//tcb_desc->data_rate = CURRENT_RATE(ieee->current_network.mode, ieee->rate, ieee->HTCurrentOperaRate);
			tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);

		if(bdhcp == true){
			// Use low rate to send DHCP packet.
			//if(ieee->pHTInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom) {
			//	tcb_desc->data_rate = MGN_1M;//MgntQuery_TxRateExcludeCCKRates(ieee);//0xc;//ofdm 6m
			//	tcb_desc->bTxDisableRateFallBack = false;
			//}
			//else
			{
				tcb_desc->data_rate = MGN_1M;
				tcb_desc->bTxDisableRateFallBack = 1;
			}

			tcb_desc->RATRIndex = 7;
			tcb_desc->bTxUseDriverAssingedRate = 1;
			tcb_desc->bdhcp = 1;
		}


		ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
		ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
		ieee80211_query_HTCapShortGI(ieee, tcb_desc);
		ieee80211_query_BandwidthMode(ieee, tcb_desc);
		ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
		ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);
//		IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, txb->fragments[0]->data, txb->fragments[0]->len);
		//IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, tcb_desc, sizeof(cb_desc));
#endif
	}
	spin_unlock_irqrestore(&ieee->lock, flags);
	dev_kfree_skb_any(skb);
	if (txb) {
		if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
			ieee80211_softmac_xmit(txb, ieee);
		}else{
			if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
				stats->tx_packets++;
				stats->tx_bytes += txb->payload_size;
				return 0;
			}
			ieee80211_txb_free(txb);
		}
	}

	return 0;

 failed:
	spin_unlock_irqrestore(&ieee->lock, flags);
	netif_stop_queue(dev);
	stats->tx_errors++;
	return 1;

}