/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
 * the frame: IV (4 bytes), encrypted payload (including SNAP header),
 * ICV (4 bytes). len includes both IV and ICV.
 *
 * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
 * failure. If frame is OK, IV and ICV will be removed.
 */
static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
	struct prism2_wep_data *wep = priv;
        #if(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
        struct blkcipher_desc desc = {.tfm = wep->rx_tfm};
        #endif
	u32 klen, plen;
	u8 key[WEP_KEY_LEN + 3];
	u8 keyidx, *pos;
#ifndef JOHN_HWSEC
	u32 crc;
	u8 icv[4];	
	struct scatterlist sg;
#endif	
	if (skb->len < hdr_len + 8)
		return -1;

	pos = skb->data + hdr_len;
	key[0] = *pos++;
	key[1] = *pos++;
	key[2] = *pos++;
	keyidx = *pos++ >> 6;
	if (keyidx != wep->key_idx)
		return -1;

	klen = 3 + wep->key_len;

	/* Copy rest of the WEP key (the secret part) */
	memcpy(key + 3, wep->key, wep->key_len);

	/* Apply RC4 to data and compute CRC32 over decrypted data */
	plen = skb->len - hdr_len - 8;
#ifndef JOHN_HWSEC
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
	crypto_cipher_setkey(wep->tfm, key, klen);
	sg.page = virt_to_page(pos);
	sg.offset = offset_in_page(pos);
	sg.length = plen + 4;
	crypto_cipher_decrypt(wep->tfm, &sg, &sg, plen + 4);
#else
	crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
	#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
	sg.page = virt_to_page(pos);
	sg.offset = offset_in_page(pos);
	sg.length = plen + 4;	
	#else
	sg_init_one(&sg, pos, plen + 4);
	#endif	
	if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
		return -7;
#endif 

#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
	crc = ~crc32_le(~0, pos, plen);
#else
	crc = ~ether_crc_le(plen, pos);
#endif
	icv[0] = crc;
	icv[1] = crc >> 8;
	icv[2] = crc >> 16;
	icv[3] = crc >> 24;

	if (memcmp(icv, pos + plen, 4) != 0) {
		/* ICV mismatch - drop frame */
		return -2;
	}
#endif 	/* JOHN_HWSEC */

	/* Remove IV and ICV */
	memmove(skb->data + 4, skb->data, hdr_len);
	skb_pull(skb, 4);
	skb_trim(skb, skb->len - 4);
        return 0;
}


static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
{
	struct prism2_wep_data *wep = priv;

	if (len < 0 || len > WEP_KEY_LEN)
		return -1;

	memcpy(wep->key, key, len);
	wep->key_len = len;

	return 0;
}


static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
{
	struct prism2_wep_data *wep = priv;

	if (len < wep->key_len)
		return -1;

	memcpy(key, wep->key, wep->key_len);

	return wep->key_len;
}


static char * prism2_wep_print_stats(char *p, void *priv)
{
	struct prism2_wep_data *wep = priv;
	p += sprintf(p, "key[%d] alg=WEP len=%d\n",
		     wep->key_idx, wep->key_len);
	return p;
}


static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
	.name			= "WEP",
	.init			= prism2_wep_init,
	.deinit			= prism2_wep_deinit,
	.encrypt_mpdu		= prism2_wep_encrypt,
	.decrypt_mpdu		= prism2_wep_decrypt,
	.encrypt_msdu		= NULL,
	.decrypt_msdu		= NULL,
	.set_key		= prism2_wep_set_key,
	.get_key		= prism2_wep_get_key,
	.print_stats		= prism2_wep_print_stats,
	.extra_prefix_len	= 4, /* IV */
	.extra_postfix_len	= 4, /* ICV */
	.owner			= THIS_MODULE,
};


int __init ieee80211_crypto_wep_init(void)
{
	return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
}


void __exit ieee80211_crypto_wep_exit(void)
{
	ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
}


void ieee80211_wep_null(void)
{
//	printk("============>%s()\n", __FUNCTION__);
        return;
}
#if 0
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
EXPORT_SYMBOL(ieee80211_wep_null);
#else
EXPORT_SYMBOL_NOVERS(ieee80211_wep_null);
#endif

module_init(ieee80211_crypto_wep_init);
module_exit(ieee80211_crypto_wep_exit);
/* SKBs are added to the ieee->tx_queue. */
int ieee80211_xmit(struct sk_buff *skb,
		   struct net_device *dev)
{
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
	struct ieee80211_device *ieee = netdev_priv(dev);
#else
	struct ieee80211_device *ieee = (struct ieee80211_device *)dev->priv;
#endif
	struct ieee80211_txb *txb = NULL;
	struct ieee80211_hdr *frag_hdr;
	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
	unsigned long flags;
	struct net_device_stats *stats = &ieee->stats;
	int ether_type, encrypt;
	int bytes, fc, hdr_len;
	struct sk_buff *skb_frag;
	struct ieee80211_hdr header = { /* Ensure zero initialized */
		.duration_id = 0,
		.seq_ctl = 0
	};
	u8 dest[ETH_ALEN], src[ETH_ALEN];
	int pend;


	struct ieee80211_crypt_data* crypt;

	spin_lock_irqsave(&ieee->lock, flags);

	/* If there is no driver handler to take the TXB, dont' bother
	 * creating it... */
	if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
	   ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
		printk(KERN_WARNING "%s: No xmit handler.\n",
			   ieee->dev->name);
		goto success;
	}

	if(likely(ieee->raw_tx == 0)){

		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}


		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);

				crypt = ieee->crypt[ieee->tx_keyidx];

		encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
			ieee->host_encrypt && crypt && crypt->ops;

		if (!encrypt && ieee->ieee802_1x &&
		ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
			stats->tx_dropped++;
			goto success;
		}

	#ifdef CONFIG_IEEE80211_DEBUG
		if (crypt && !encrypt && ether_type == ETH_P_PAE) {
			struct eapol *eap = (struct eapol *)(skb->data +
				sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
			IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
				eap_get_type(eap->type));
		}
	#endif

		/* Save source and destination addresses */
		memcpy(&dest, skb->data, ETH_ALEN);
		memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);



				/* Advance the SKB to the start of the payload */
		skb_pull(skb, sizeof(struct ethhdr));

		/* Determine total amount of storage required for TXB packets */
		bytes = skb->len + SNAP_SIZE + sizeof(u16);

		if (encrypt)
			fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
				IEEE80211_FCTL_WEP;
		else
			fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;

		if (ieee->iw_mode == IW_MODE_INFRA) {
			fc |= IEEE80211_FCTL_TODS;
			/* To DS: Addr1 = BSSID, Addr2 = SA,
			Addr3 = DA */
			memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
			memcpy(&header.addr2, &src, ETH_ALEN);
			memcpy(&header.addr3, &dest, ETH_ALEN);
		} else if (ieee->iw_mode == IW_MODE_ADHOC) {
			/* not From/To DS: Addr1 = DA, Addr2 = SA,
			Addr3 = BSSID */
			memcpy(&header.addr1, dest, ETH_ALEN);
			memcpy(&header.addr2, src, ETH_ALEN);
			memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
		}
		header.frame_ctl = cpu_to_le16(fc);

		hdr_len = IEEE80211_3ADDR_LEN;

	/* Determine fragmentation size based on destination (multicast
		* and broadcast are not fragmented) */
		if (is_multicast_ether_addr(dest) ||
		is_broadcast_ether_addr(dest))
			frag_size = MAX_FRAG_THRESHOLD;
		else
			frag_size = ieee->fts;

		/* Determine amount of payload per fragment.  Regardless of if
		* this stack is providing the full 802.11 header, one will
		* eventually be affixed to this fragment -- so we must account for
		* it when determining the amount of payload space. */
		bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
		if (ieee->config &
		(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
			bytes_per_frag -= IEEE80211_FCS_LEN;

		/* Each fragment may need to have room for encryptiong pre/postfix */
		if (encrypt)
			bytes_per_frag -= crypt->ops->extra_prefix_len +
					crypt->ops->extra_postfix_len;

		/* Number of fragments is the total bytes_per_frag /
		* payload_per_fragment */
		nr_frags = bytes / bytes_per_frag;
		bytes_last_frag = bytes % bytes_per_frag;
		if (bytes_last_frag)
			nr_frags++;
		else
			bytes_last_frag = bytes_per_frag;

		/* When we allocate the TXB we allocate enough space for the reserve
		* and full fragment bytes (bytes_per_frag doesn't include prefix,
		* postfix, header, FCS, etc.) */

		pend = atomic_read(&ieee->tx_pending_txb);
		if(pend > (MAX_TX_SKB - 1)) {
			goto failed;
		} else {
			atomic_inc(&ieee->tx_pending_txb);
		}
		txb = ieee->alloc_txb[ieee->tx_skb_index];
		ieee->tx_skb_index = (ieee->tx_skb_index + 1) % MAX_TX_SKB;

		if (unlikely(!txb)) {
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}

		for(i = 0; i < txb->nr_frags; i++) {
			txb->fragments[i]->len = 0;
			txb->fragments[i]->data = txb->fragments[i]->head + 16;
			txb->fragments[i]->tail = txb->fragments[i]->data;
		}

#if 1
		txb->nr_frags = nr_frags;
		txb->frag_size = frag_size;
#endif
		txb->encrypted = encrypt;
		txb->payload_size = bytes;

		for (i = 0; i < nr_frags; i++) {
			skb_frag = txb->fragments[i];

			if (encrypt)
				skb_reserve(skb_frag, crypt->ops->extra_prefix_len);

			frag_hdr = (struct ieee80211_hdr *)skb_put(skb_frag, hdr_len);
			memcpy(frag_hdr, &header, hdr_len);

			/* If this is not the last fragment, then add the MOREFRAGS
			* bit to the frame control */
			if (i != nr_frags - 1) {
				frag_hdr->frame_ctl = cpu_to_le16(
					fc | IEEE80211_FCTL_MOREFRAGS);
				bytes = bytes_per_frag;

			} else {
				/* The last fragment takes the remaining length */
				bytes = bytes_last_frag;
			}

			frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl<<4 | i);


			/* Put a SNAP header on the first fragment */
			if (i == 0) {
				ieee80211_put_snap(
					skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
					ether_type);
				bytes -= SNAP_SIZE + sizeof(u16);
			}

			memcpy(skb_put(skb_frag, bytes), skb->data, bytes);

			/* Advance the SKB... */
			skb_pull(skb, bytes);

			/* Encryption routine will move the header forward in order
			* to insert the IV between the header and the payload */
			if (encrypt)
				ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
			if (ieee->config &
			(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
				skb_put(skb_frag, 4);
		}
		if (ieee->seq_ctrl == 0xFFF)
			ieee->seq_ctrl = 0;
		else
			ieee->seq_ctrl++;
	}else{
		if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}

		txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
		if(!txb){
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}

		txb->encrypted = 0;
		txb->payload_size = skb->len;
		memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
	}

 success:
	spin_unlock_irqrestore(&ieee->lock, flags);
	dev_kfree_skb_any(skb);
	if (txb) {
		if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
			ieee80211_softmac_xmit(txb, ieee);
		}else{
			if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
				stats->tx_packets++;
				stats->tx_bytes += txb->payload_size;
				return 0;
			}
			ieee80211_txb_free(txb, txb->nr_frags);
		}
	}
	return 0;

 failed:
	spin_unlock_irqrestore(&ieee->lock, flags);
	netif_stop_queue(dev);
	stats->tx_errors++;
	return 1;

}

#ifndef BUILT_IN_IEEE80211
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
EXPORT_SYMBOL(ieee80211_txb_free);
EXPORT_SYMBOL(ieee80211_alloc_txb);
#else
EXPORT_SYMBOL_NOVERS(ieee80211_txb_free);
EXPORT_SYMBOL_NOVERS(ieee80211_alloc_txb);