Exemplo n.º 1
0
static void mwifiex_usb_rx_complete(struct urb *urb)
{
	struct urb_context *context = (struct urb_context *)urb->context;
	struct mwifiex_adapter *adapter = context->adapter;
	struct sk_buff *skb = context->skb;
	struct usb_card_rec *card;
	int recv_length = urb->actual_length;
	int size, status;

	if (!adapter || !adapter->card) {
		pr_err("mwifiex adapter or card structure is not valid\n");
		return;
	}

	card = (struct usb_card_rec *)adapter->card;
	if (card->rx_cmd_ep == context->ep)
		atomic_dec(&card->rx_cmd_urb_pending);
	else
		atomic_dec(&card->rx_data_urb_pending);

	if (recv_length) {
		if (urb->status || (adapter->surprise_removed)) {
			dev_err(adapter->dev,
				"URB status is failed: %d\n", urb->status);
			/* Do not free skb in case of command ep */
			if (card->rx_cmd_ep != context->ep)
				dev_kfree_skb_any(skb);
			goto setup_for_next;
		}
		if (skb->len > recv_length)
			skb_trim(skb, recv_length);
		else
			skb_put(skb, recv_length - skb->len);

		atomic_inc(&adapter->rx_pending);
		status = mwifiex_usb_recv(adapter, skb, context->ep);

		dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
			recv_length, status);
		if (status == -EINPROGRESS) {
			queue_work(adapter->workqueue, &adapter->main_work);

			/* urb for data_ep is re-submitted now;
			 * urb for cmd_ep will be re-submitted in callback
			 * mwifiex_usb_recv_complete
			 */
			if (card->rx_cmd_ep == context->ep)
				return;
		} else {
			atomic_dec(&adapter->rx_pending);
			if (status == -1)
				dev_err(adapter->dev,
					"received data processing failed!\n");

			/* Do not free skb in case of command ep */
			if (card->rx_cmd_ep != context->ep)
				dev_kfree_skb_any(skb);
		}
	} else if (urb->status) {
		if (!adapter->is_suspended) {
			dev_warn(adapter->dev,
				 "Card is removed: %d\n", urb->status);
			adapter->surprise_removed = true;
		}
		dev_kfree_skb_any(skb);
		return;
	} else {
		/* Do not free skb in case of command ep */
		if (card->rx_cmd_ep != context->ep)
			dev_kfree_skb_any(skb);

		/* fall through setup_for_next */
	}

setup_for_next:
	if (card->rx_cmd_ep == context->ep)
		size = MWIFIEX_RX_CMD_BUF_SIZE;
	else
		size = MWIFIEX_RX_DATA_BUF_SIZE;

	mwifiex_usb_submit_rx_urb(context, size);

	return;
}
Exemplo n.º 2
0
static void z8530_rx_done(struct z8530_channel *c)
{
	struct sk_buff *skb;
	int ct;
	
	/*
	 *	Is our receive engine in DMA mode
	 */
	 
	if(c->rxdma_on)
	{
		/*
		 *	Save the ready state and the buffer currently
		 *	being used as the DMA target
		 */
		 
		int ready=c->dma_ready;
		unsigned char *rxb=c->rx_buf[c->dma_num];
		unsigned long flags;
		
		/*
		 *	Complete this DMA. Necessary to find the length
		 */		
		 
		flags=claim_dma_lock();
		
		disable_dma(c->rxdma);
		clear_dma_ff(c->rxdma);
		c->rxdma_on=0;
		ct=c->mtu-get_dma_residue(c->rxdma);
		if(ct<0)
			ct=2;	/* Shit happens.. */
		c->dma_ready=0;
		
		/*
		 *	Normal case: the other slot is free, start the next DMA
		 *	into it immediately.
		 */
		 
		if(ready)
		{
			c->dma_num^=1;
			set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
			set_dma_count(c->rxdma, c->mtu);
			c->rxdma_on = 1;
			enable_dma(c->rxdma);
			/* Stop any frames that we missed the head of 
			   from passing */
			write_zsreg(c, R0, RES_Rx_CRC);
		}
		else
			/* Can't occur as we dont reenable the DMA irq until
			   after the flip is done */
			netdev_warn(c->netdevice, "DMA flip overrun!\n");

		release_dma_lock(flags);

		/*
		 *	Shove the old buffer into an sk_buff. We can't DMA
		 *	directly into one on a PC - it might be above the 16Mb
		 *	boundary. Optimisation - we could check to see if we
		 *	can avoid the copy. Optimisation 2 - make the memcpy
		 *	a copychecksum.
		 */

		skb = dev_alloc_skb(ct);
		if (skb == NULL) {
			c->netdevice->stats.rx_dropped++;
			netdev_warn(c->netdevice, "Memory squeeze\n");
		} else {
			skb_put(skb, ct);
			skb_copy_to_linear_data(skb, rxb, ct);
			c->netdevice->stats.rx_packets++;
			c->netdevice->stats.rx_bytes += ct;
		}
		c->dma_ready = 1;
	} else {
		RT_LOCK;
		skb = c->skb;

		/*
		 *	The game we play for non DMA is similar. We want to
		 *	get the controller set up for the next packet as fast
		 *	as possible. We potentially only have one byte + the
		 *	fifo length for this. Thus we want to flip to the new
		 *	buffer and then mess around copying and allocating
		 *	things. For the current case it doesn't matter but
		 *	if you build a system where the sync irq isn't blocked
		 *	by the kernel IRQ disable then you need only block the
		 *	sync IRQ for the RT_LOCK area.
		 *
		 */
		ct=c->count;

		c->skb = c->skb2;
		c->count = 0;
		c->max = c->mtu;
		if (c->skb) {
			c->dptr = c->skb->data;
			c->max = c->mtu;
		} else {
			c->count = 0;
			c->max = 0;
		}
		RT_UNLOCK;

		c->skb2 = dev_alloc_skb(c->mtu);
		if (c->skb2 == NULL)
			netdev_warn(c->netdevice, "memory squeeze\n");
		else
			skb_put(c->skb2, c->mtu);
		c->netdevice->stats.rx_packets++;
		c->netdevice->stats.rx_bytes += ct;
	}
	/*
	 *	If we received a frame we must now process it.
	 */
	if (skb) {
		skb_trim(skb, ct);
		c->rx_function(c, skb);
	} else {
		c->netdevice->stats.rx_dropped++;
		netdev_err(c->netdevice, "Lost a frame\n");
	}
}
Exemplo n.º 3
0
static void myri_rx(struct myri_eth *mp, struct net_device *dev)
{
	struct recvq __iomem *rq = mp->rq;
	struct recvq __iomem *rqa = mp->rqack;
	int entry		= sbus_readl(&rqa->head);
	int limit		= sbus_readl(&rqa->tail);
	int drops;

	DRX(("entry[%d] limit[%d] ", entry, limit));
	if (entry == limit)
		return;
	drops = 0;
	DRX(("\n"));
	while (entry != limit) {
		struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
		u32 csum		= sbus_readl(&rxdack->csum);
		int len			= sbus_readl(&rxdack->myri_scatters[0].len);
		int index		= sbus_readl(&rxdack->ctx);
		struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
		struct sk_buff *skb	= mp->rx_skbs[index];

		/* Ack it. */
		sbus_writel(NEXT_RX(entry), &rqa->head);

		/* Check for errors. */
		DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
		dma_sync_single_for_cpu(&mp->myri_op->dev,
					sbus_readl(&rxd->myri_scatters[0].addr),
					RX_ALLOC_SIZE, DMA_FROM_DEVICE);
		if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
			DRX(("ERROR["));
			dev->stats.rx_errors++;
			if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
				DRX(("BAD_LENGTH] "));
				dev->stats.rx_length_errors++;
			} else {
				DRX(("NO_PADDING] "));
				dev->stats.rx_frame_errors++;
			}

			/* Return it to the LANAI. */
	drop_it:
			drops++;
			DRX(("DROP "));
			dev->stats.rx_dropped++;
			dma_sync_single_for_device(&mp->myri_op->dev,
						   sbus_readl(&rxd->myri_scatters[0].addr),
						   RX_ALLOC_SIZE,
						   DMA_FROM_DEVICE);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
			goto next;
		}

		DRX(("len[%d] ", len));
		if (len > RX_COPY_THRESHOLD) {
			struct sk_buff *new_skb;
			u32 dma_addr;

			DRX(("BIGBUFF "));
			new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
			if (new_skb == NULL) {
				DRX(("skb_alloc(FAILED) "));
				goto drop_it;
			}
			dma_unmap_single(&mp->myri_op->dev,
					 sbus_readl(&rxd->myri_scatters[0].addr),
					 RX_ALLOC_SIZE,
					 DMA_FROM_DEVICE);
			mp->rx_skbs[index] = new_skb;
			new_skb->dev = dev;
			skb_put(new_skb, RX_ALLOC_SIZE);
			dma_addr = dma_map_single(&mp->myri_op->dev,
						  new_skb->data,
						  RX_ALLOC_SIZE,
						  DMA_FROM_DEVICE);
			sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);

			/* Trim the original skb for the netif. */
			DRX(("trim(%d) ", len));
			skb_trim(skb, len);
		} else {
			struct sk_buff *copy_skb = dev_alloc_skb(len);

			DRX(("SMALLBUFF "));
			if (copy_skb == NULL) {
				DRX(("dev_alloc_skb(FAILED) "));
				goto drop_it;
			}
			/* DMA sync already done above. */
			copy_skb->dev = dev;
			DRX(("resv_and_put "));
			skb_put(copy_skb, len);
			skb_copy_from_linear_data(skb, copy_skb->data, len);

			/* Reuse original ring buffer. */
			DRX(("reuse "));
			dma_sync_single_for_device(&mp->myri_op->dev,
						   sbus_readl(&rxd->myri_scatters[0].addr),
						   RX_ALLOC_SIZE,
						   DMA_FROM_DEVICE);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);

			skb = copy_skb;
		}

		/* Just like the happy meal we get checksums from this card. */
		skb->csum = csum;
		skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */

		skb->protocol = myri_type_trans(skb, dev);
		DRX(("prot[%04x] netif_rx ", skb->protocol));
		netif_rx(skb);

		dev->stats.rx_packets++;
		dev->stats.rx_bytes += len;
	next:
		DRX(("NEXT\n"));
		entry = NEXT_RX(entry);
	}
}
Exemplo n.º 4
0
void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
{
	int status = 0;
	struct ath10k_htc *htc = &ar->htc;
	struct ath10k_htc_hdr *hdr;
	struct ath10k_htc_ep *ep;
	u16 payload_len;
	u32 trailer_len = 0;
	size_t min_len;
	u8 eid;
	bool trailer_present;

	hdr = (struct ath10k_htc_hdr *)skb->data;
	skb_pull(skb, sizeof(*hdr));

	eid = hdr->eid;

	if (eid >= ATH10K_HTC_EP_COUNT) {
		ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
				hdr, sizeof(*hdr));
		goto out;
	}

	ep = &htc->endpoint[eid];

	payload_len = __le16_to_cpu(hdr->len);

	if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
		ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
			    payload_len + sizeof(*hdr));
		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
				hdr, sizeof(*hdr));
		goto out;
	}

	if (skb->len < payload_len) {
		ath10k_dbg(ar, ATH10K_DBG_HTC,
			   "HTC Rx: insufficient length, got %d, expected %d\n",
			   skb->len, payload_len);
		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
				"", hdr, sizeof(*hdr));
		goto out;
	}

	/* get flags to check for trailer */
	trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
	if (trailer_present) {
		u8 *trailer;

		trailer_len = hdr->trailer_len;
		min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);

		if ((trailer_len < min_len) ||
		    (trailer_len > payload_len)) {
			ath10k_warn(ar, "Invalid trailer length: %d\n",
				    trailer_len);
			goto out;
		}

		trailer = (u8 *)hdr;
		trailer += sizeof(*hdr);
		trailer += payload_len;
		trailer -= trailer_len;
		status = ath10k_htc_process_trailer(htc, trailer,
						    trailer_len, hdr->eid,
						    NULL, NULL);
		if (status)
			goto out;

		skb_trim(skb, skb->len - trailer_len);
	}

	if (((int)payload_len - (int)trailer_len) <= 0)
		/* zero length packet with trailer data, just drop these */
		goto out;

	ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
		   eid, skb);
	ep->ep_ops.ep_rx_complete(ar, skb);

	/* skb is now owned by the rx completion handler */
	skb = NULL;
out:
	kfree_skb(skb);
}
Exemplo n.º 5
0
/*
 * Allocate a beacon frame and fillin the appropriate bits.
 */
struct sk_buff *
ieee80211_beacon_alloc(struct ieee80211_node *ni,
	struct ieee80211_beacon_offsets *bo)
{
	struct ieee80211vap *vap = ni->ni_vap;
	struct ieee80211com *ic = ni->ni_ic;
	struct ieee80211_frame *wh;
	struct sk_buff *skb;
	int pktlen;
	u_int8_t *frm;
	struct ieee80211_rateset *rs;

	/*
	 * beacon frame format
	 *	[8] time stamp
	 *	[2] beacon interval
	 *	[2] capability information
	 *	[tlv] ssid
	 *	[tlv] supported rates
	 *	[7] FH/DS parameter set
	 *	[tlv] IBSS/TIM parameter set
	 *	[tlv] country code 
	 *	[3] power constraint
	 *	[5] channel switch announcement
	 *	[3] extended rate phy (ERP)
	 *	[tlv] extended supported rates
	 *	[tlv] WME parameters
	 *	[tlv] WPA/RSN parameters
	 *	[tlv] Atheros Advanced Capabilities
	 *	[tlv] AtherosXR parameters
	 * XXX Vendor-specific OIDs (e.g. Atheros)
	 * NB: we allocate the max space required for the TIM bitmap.
	 */
	rs = &ni->ni_rates;
	pktlen = 8					/* time stamp */
		 + sizeof(u_int16_t)			/* beacon interval */
		 + sizeof(u_int16_t)			/* capability information */
		 + 2 + ni->ni_esslen			/* ssid */
		 + 2 + IEEE80211_RATE_SIZE		/* supported rates */
		 + 7 					/* FH/DS parameters max(7,3) */
		 + 2 + 4 + vap->iv_tim_len		/* IBSS/TIM parameter set*/
		 + ic->ic_country_ie.country_len + 2	/* country code */
		 + 3					/* power constraint */
		 + 5					/* channel switch announcement */
		 + 3					/* ERP */
		 + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) /* Ext. Supp. Rates */
		 + (vap->iv_caps & IEEE80211_C_WME ?	/* WME */
			sizeof(struct ieee80211_wme_param) : 0)
		 + (vap->iv_caps & IEEE80211_C_WPA ?	/* WPA 1+2 */
			2 * sizeof(struct ieee80211_ie_wpa) : 0)
		 + sizeof(struct ieee80211_ie_athAdvCap)
#ifdef ATH_SUPERG_XR
		 + (ic->ic_ath_cap & IEEE80211_ATHC_XR ?	/* XR */
			sizeof(struct ieee80211_xr_param) : 0)
#endif
		 ;
	skb = ieee80211_getmgtframe(&frm, pktlen);
	if (skb == NULL) {
		IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
			"%s: cannot get buf; size %u", __func__, pktlen);
		vap->iv_stats.is_tx_nobuf++;
		return NULL;
	}

	SKB_NI(skb) = ieee80211_ref_node(ni);

	frm = ieee80211_beacon_init(ni, bo, frm);

	skb_trim(skb, frm - skb->data);

	wh = (struct ieee80211_frame *)
		skb_push(skb, sizeof(struct ieee80211_frame));
	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
		IEEE80211_FC0_SUBTYPE_BEACON;
	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
	wh->i_dur = 0;
	IEEE80211_ADDR_COPY(wh->i_addr1, ic->ic_dev->broadcast);
	IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
	IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_bssid);
	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
			  "%s: beacon bssid:" MAC_FMT "\n",
			  __func__, MAC_ADDR(wh->i_addr3));
	*(u_int16_t *)wh->i_seq = 0;

	return skb;
}
Exemplo n.º 6
0
/*
 * Update the dynamic parts of a beacon frame based on the current state.
 */
int
ieee80211_beacon_update(struct ieee80211_node *ni,
	struct ieee80211_beacon_offsets *bo, struct sk_buff *skb, 
	int mcast, int *is_dtim)
{
	struct ieee80211vap *vap = ni->ni_vap;
	struct ieee80211com *ic = ni->ni_ic;
	int len_changed = 0;
	u_int16_t capinfo;

	IEEE80211_LOCK_IRQ(ic);

	/* Check if we need to change channel right now */
	if ((ic->ic_flags & IEEE80211_F_DOTH) &&
	    (vap->iv_flags & IEEE80211_F_CHANSWITCH)) {
		struct ieee80211_channel *c = 
			ieee80211_doth_findchan(vap, ic->ic_chanchange_chan);
		
		if (!vap->iv_chanchange_count && !c) {
			vap->iv_flags &= ~IEEE80211_F_CHANSWITCH;
			ic->ic_flags &= ~IEEE80211_F_CHANSWITCH;
		} else if (vap->iv_chanchange_count &&
			   ((!ic->ic_chanchange_tbtt) ||
			    (vap->iv_chanchange_count == ic->ic_chanchange_tbtt))) {
			u_int8_t *frm;

			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
					"%s: reinit beacon\n", __func__);

			/* NB: ic_bsschan is in the DSPARMS beacon IE, so must
			 * set this prior to the beacon re-init, below. */
			if (c == NULL) {
				/* Requested channel invalid; drop the channel
				 * switch announcement and do nothing. */
				IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
						"%s: find channel failure\n", __func__);
			} else
				ic->ic_bsschan = c;

			skb_pull(skb, sizeof(struct ieee80211_frame));
			skb_trim(skb, 0);
			frm = skb->data;
			skb_put(skb, ieee80211_beacon_init(ni, bo, frm) - frm);
			skb_push(skb, sizeof(struct ieee80211_frame));

			vap->iv_chanchange_count = 0;
			vap->iv_flags &= ~IEEE80211_F_CHANSWITCH;
			ic->ic_flags &= ~IEEE80211_F_CHANSWITCH;

			/* NB: Only for the first VAP to get here, and when we
			 * have a valid channel to which to change. */
			if (c && (ic->ic_curchan != c)) {
				ic->ic_curchan = c;
				ic->ic_set_channel(ic);
			}

			len_changed = 1;
		}
	}

	/* XXX faster to recalculate entirely or just changes? */
	if (vap->iv_opmode == IEEE80211_M_IBSS)
		capinfo = IEEE80211_CAPINFO_IBSS;
	else
		capinfo = IEEE80211_CAPINFO_ESS;

	if (vap->iv_flags & IEEE80211_F_PRIVACY)
		capinfo |= IEEE80211_CAPINFO_PRIVACY;
	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
	    IEEE80211_IS_CHAN_2GHZ(ic->ic_bsschan))
		capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
	if (ic->ic_flags & IEEE80211_F_SHSLOT)
		capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
	if (ic->ic_flags & IEEE80211_F_DOTH)
		capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;

	*bo->bo_caps = htole16(capinfo);

	if (vap->iv_flags & IEEE80211_F_WME) {
		struct ieee80211_wme_state *wme = &ic->ic_wme;

		/*
		 * Check for aggressive mode change.  When there is
		 * significant high priority traffic in the BSS
		 * throttle back BE traffic by using conservative
		 * parameters.  Otherwise BE uses aggressive params
		 * to optimize performance of legacy/non-QoS traffic.
		 */
		if (wme->wme_flags & WME_F_AGGRMODE) {
			if (wme->wme_hipri_traffic >
			    wme->wme_hipri_switch_thresh) {
				IEEE80211_NOTE(vap, IEEE80211_MSG_WME, ni,
					"%s: traffic %u, disable aggressive mode",
					__func__, wme->wme_hipri_traffic);
				wme->wme_flags &= ~WME_F_AGGRMODE;
				ieee80211_wme_updateparams_locked(vap);
				wme->wme_hipri_traffic =
					wme->wme_hipri_switch_hysteresis;
			} else
				wme->wme_hipri_traffic = 0;
		} else {
			if (wme->wme_hipri_traffic <=
			    wme->wme_hipri_switch_thresh) {
				IEEE80211_NOTE(vap, IEEE80211_MSG_WME, ni,
					"%s: traffic %u, enable aggressive mode",
					__func__, wme->wme_hipri_traffic);
				wme->wme_flags |= WME_F_AGGRMODE;
				ieee80211_wme_updateparams_locked(vap);
				wme->wme_hipri_traffic = 0;
			} else
				wme->wme_hipri_traffic =
					wme->wme_hipri_switch_hysteresis;
		}
		/* XXX multi-bss */
		if (vap->iv_flags & IEEE80211_F_WMEUPDATE) {
			(void) ieee80211_add_wme_param(bo->bo_wme, wme, IEEE80211_VAP_UAPSD_ENABLED(vap));
			vap->iv_flags &= ~IEEE80211_F_WMEUPDATE;
		}
	}

	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {	/* NB: no IBSS support*/
		struct ieee80211_tim_ie *tie =
			(struct ieee80211_tim_ie *) bo->bo_tim;
		if (vap->iv_flags & IEEE80211_F_TIMUPDATE) {
			u_int timlen, timoff, i;
			/*
			 * ATIM/DTIM needs updating.  If it fits in the
			 * current space allocated then just copy in the
			 * new bits.  Otherwise we need to move any trailing
			 * data to make room.  Note that we know there is
			 * contiguous space because ieee80211_beacon_allocate
			 * ensures there is space in the mbuf to write a
			 * maximal-size virtual bitmap (based on ic_max_aid).
			 */
			/*
			 * Calculate the bitmap size and offset, copy any
			 * trailer out of the way, and then copy in the
			 * new bitmap and update the information element.
			 * Note that the tim bitmap must contain at least
			 * one byte and any offset must be even.
			 */
			if (vap->iv_ps_pending != 0) {
				timoff = 128;		/* impossibly large */
				for (i = 0; i < vap->iv_tim_len; i++)
					if (vap->iv_tim_bitmap[i]) {
						timoff = i & BITCTL_BUFD_UCAST_AID_MASK;
						break;
					}
				KASSERT(timoff != 128, ("tim bitmap empty!"));
				for (i = vap->iv_tim_len-1; i >= timoff; i--)
					if (vap->iv_tim_bitmap[i])
						break;
				timlen = 1 + (i - timoff);
			} else {
				timoff = 0;
				timlen = 1;
			}
			if (timlen != bo->bo_tim_len) {
				/* copy up/down trailer */
				int trailer_adjust =
					(tie->tim_bitmap+timlen) - (bo->bo_tim_trailer);
				memmove(tie->tim_bitmap+timlen, bo->bo_tim_trailer,
					bo->bo_tim_trailerlen);
				bo->bo_tim_trailer = tie->tim_bitmap+timlen;
				bo->bo_chanswitch += trailer_adjust;
				bo->bo_wme += trailer_adjust;
				bo->bo_erp += trailer_adjust;
				bo->bo_ath_caps += trailer_adjust;
				bo->bo_xr += trailer_adjust;
				if (timlen > bo->bo_tim_len)
					skb_put(skb, timlen - bo->bo_tim_len);
				else
					skb_trim(skb, skb->len - (bo->bo_tim_len - timlen));
				bo->bo_tim_len = timlen;

				/* update information element */
				tie->tim_len = 3 + timlen;
				tie->tim_bitctl = timoff;
				len_changed = 1;
			}
			memcpy(tie->tim_bitmap, vap->iv_tim_bitmap + timoff,
				bo->bo_tim_len);

			vap->iv_flags &= ~IEEE80211_F_TIMUPDATE;

			IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
				"%s: TIM updated, pending %u, off %u, len %u",
				__func__, vap->iv_ps_pending, timoff, timlen);
		}
		/* count down DTIM period */
		if (tie->tim_count == 0)
			tie->tim_count = tie->tim_period - 1;
		else
			tie->tim_count--;
		/* update state for buffered multicast frames on DTIM */
		if (mcast && (tie->tim_count == 0))
			tie->tim_bitctl |= BITCTL_BUFD_MCAST;
		else
			tie->tim_bitctl &= ~BITCTL_BUFD_MCAST;
		*is_dtim = (tie->tim_count == 0);
	}

	/* Whenever we want to switch to a new channel, we need to follow the
	 * following steps:
	 *
	 * - iv_chanchange_count= number of beacon intervals elapsed (0)
	 * - ic_chanchange_tbtt = number of beacon intervals before switching
	 * - ic_chanchange_chan = IEEE channel number after switching
	 * - ic_flags |= IEEE80211_F_CHANSWITCH */

	if (IEEE80211_IS_MODE_BEACON(vap->iv_opmode)) {

		if ((ic->ic_flags & IEEE80211_F_DOTH) &&
		    (ic->ic_flags & IEEE80211_F_CHANSWITCH)) {
			struct ieee80211_ie_csa *csa_ie =
				(struct ieee80211_ie_csa *)bo->bo_chanswitch;

			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH, 
					"%s: Sending 802.11h chanswitch IE: "
					"%d/%d\n", __func__, 
					ic->ic_chanchange_chan, 
					ic->ic_chanchange_tbtt);
			if (!vap->iv_chanchange_count) {
				vap->iv_flags |= IEEE80211_F_CHANSWITCH;

				/* copy out trailer to open up a slot */
				memmove(bo->bo_chanswitch + sizeof(*csa_ie),
					bo->bo_chanswitch, 
					bo->bo_chanswitch_trailerlen);

				/* add ie in opened slot */
				csa_ie->csa_id = IEEE80211_ELEMID_CHANSWITCHANN;
				/* fixed length */
				csa_ie->csa_len = sizeof(*csa_ie) - 2;
				/* STA shall transmit no further frames */
				csa_ie->csa_mode = 1;
				csa_ie->csa_chan = ic->ic_chanchange_chan;
				csa_ie->csa_count = ic->ic_chanchange_tbtt;

				/* update the trailer lens */
				bo->bo_chanswitch_trailerlen += sizeof(*csa_ie);
				bo->bo_tim_trailerlen += sizeof(*csa_ie);
				bo->bo_wme += sizeof(*csa_ie);
				bo->bo_erp += sizeof(*csa_ie);
				bo->bo_ath_caps += sizeof(*csa_ie);
				bo->bo_xr += sizeof(*csa_ie);

				/* indicate new beacon length so other layers 
				 * may manage memory */
				skb_put(skb, sizeof(*csa_ie));
				len_changed = 1;
			} else if(csa_ie->csa_count)
				csa_ie->csa_count--;
			
			vap->iv_chanchange_count++;
			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
				"%s: CHANSWITCH IE, change in %d TBTT\n",
				__func__, csa_ie->csa_count);
		}
#ifdef ATH_SUPERG_XR
		if (vap->iv_flags & IEEE80211_F_XRUPDATE) {
			if (vap->iv_xrvap)
				(void)ieee80211_add_xr_param(bo->bo_xr, vap);
			vap->iv_flags &= ~IEEE80211_F_XRUPDATE;
		}
#endif
		if ((ic->ic_flags_ext & IEEE80211_FEXT_ERPUPDATE) && 
				(bo->bo_erp != NULL)) {
			(void)ieee80211_add_erp(bo->bo_erp, ic);
			ic->ic_flags_ext &= ~IEEE80211_FEXT_ERPUPDATE;
		}
	}
	/* if it is a mode change beacon for dynamic turbo case */
	if (((ic->ic_ath_cap & IEEE80211_ATHC_BOOST) != 0) ^
	    IEEE80211_IS_CHAN_TURBO(ic->ic_curchan))
		ieee80211_add_athAdvCap(bo->bo_ath_caps, 
				vap->iv_bss->ni_ath_flags,
				vap->iv_bss->ni_ath_defkeyindex);
	/* add APP_IE buffer if app updated it */
	if (vap->iv_flags_ext & IEEE80211_FEXT_APPIE_UPDATE) {
		/* adjust the buffer size if the size is changed */
		if (vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length != 
				bo->bo_appie_buf_len) {
			int diff_len;
			diff_len = 
				vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].
					length - 
				bo->bo_appie_buf_len;

			if (diff_len > 0)
				skb_put(skb, diff_len);
			else
				skb_trim(skb, skb->len + diff_len);

			bo->bo_appie_buf_len = 
				vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].
					length;
			/* update the trailer lens */
			bo->bo_chanswitch_trailerlen += diff_len;
			bo->bo_tim_trailerlen += diff_len;

			len_changed = 1;
		}
		memcpy(bo->bo_appie_buf, 
			vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].ie,
			vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length);

		vap->iv_flags_ext &= ~IEEE80211_FEXT_APPIE_UPDATE;
	}

	IEEE80211_UNLOCK_IRQ(ic);

	return len_changed;
}
static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
	struct prism2_wep_data *wep = priv;
	struct blkcipher_desc desc = { .tfm = wep->rx_tfm };
	u32 klen, plen;
	u8 key[WEP_KEY_LEN + 3];
	u8 keyidx, *pos;
	u32 crc;
	u8 icv[4];
	struct scatterlist sg;

	if (skb->len < hdr_len + 8)
		return -1;

	pos = skb->data + hdr_len;
	key[0] = *pos++;
	key[1] = *pos++;
	key[2] = *pos++;
	keyidx = *pos++ >> 6;
	if (keyidx != wep->key_idx)
		return -1;

	klen = 3 + wep->key_len;

	/*                                            */
	memcpy(key + 3, wep->key, wep->key_len);

	/*                                                         */
	plen = skb->len - hdr_len - 8;

	crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
	sg_init_one(&sg, pos, plen + 4);

	if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
		return -7;

	crc = ~crc32_le(~0, pos, plen);
	icv[0] = crc;
	icv[1] = crc >> 8;
	icv[2] = crc >> 16;
	icv[3] = crc >> 24;

	if (memcmp(icv, pos + plen, 4) != 0) {
		/*                           */
		return -2;
	}

	/*                   */
	memmove(skb->data + 4, skb->data, hdr_len);
	skb_pull(skb, 4);
	skb_trim(skb, skb->len - 4);
        return 0;
}


static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
{
	struct prism2_wep_data *wep = priv;

	if (len < 0 || len > WEP_KEY_LEN)
		return -1;

	memcpy(wep->key, key, len);
	wep->key_len = len;

	return 0;
}


static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
{
	struct prism2_wep_data *wep = priv;

	if (len < wep->key_len)
		return -1;

	memcpy(key, wep->key, wep->key_len);

	return wep->key_len;
}


static char * prism2_wep_print_stats(char *p, void *priv)
{
	struct prism2_wep_data *wep = priv;
	p += sprintf(p, "key[%d] alg=WEP len=%d\n",
		     wep->key_idx, wep->key_len);
	return p;
}


static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
	.name			= "WEP",
	.init			= prism2_wep_init,
	.deinit			= prism2_wep_deinit,
	.encrypt_mpdu		= prism2_wep_encrypt,
	.decrypt_mpdu		= prism2_wep_decrypt,
	.encrypt_msdu		= NULL,
	.decrypt_msdu		= NULL,
	.set_key		= prism2_wep_set_key,
	.get_key		= prism2_wep_get_key,
	.print_stats		= prism2_wep_print_stats,
	.extra_prefix_len	= 4, /*    */
	.extra_postfix_len	= 4, /*     */
	.owner			= THIS_MODULE,
};


int ieee80211_crypto_wep_init(void)
{
	return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
}


void ieee80211_crypto_wep_exit(void)
{
	ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
}


void ieee80211_wep_null(void)
{
//                                         
        return;
}
Exemplo n.º 8
0
Arquivo: sb1000.c Projeto: E-LLP/n900
/* receive a single frame and assemble datagram
 * (this is the heart of the interrupt routine)
 */
static int
sb1000_rx(struct net_device *dev)
{

#define FRAMESIZE 184
	unsigned char st[2], buffer[FRAMESIZE], session_id, frame_id;
	short dlen;
	int ioaddr, ns;
	unsigned int skbsize;
	struct sk_buff *skb;
	struct sb1000_private *lp = netdev_priv(dev);
	struct net_device_stats *stats = &dev->stats;

	/* SB1000 frame constants */
	const int FrameSize = FRAMESIZE;
	const int NewDatagramHeaderSkip = 8;
	const int NewDatagramHeaderSize = NewDatagramHeaderSkip + 18;
	const int NewDatagramDataSize = FrameSize - NewDatagramHeaderSize;
	const int ContDatagramHeaderSkip = 7;
	const int ContDatagramHeaderSize = ContDatagramHeaderSkip + 1;
	const int ContDatagramDataSize = FrameSize - ContDatagramHeaderSize;
	const int TrailerSize = 4;

	ioaddr = dev->base_addr;

	insw(ioaddr, (unsigned short*) st, 1);
#ifdef XXXDEBUG
printk("cm0: received: %02x %02x\n", st[0], st[1]);
#endif /* XXXDEBUG */
	lp->rx_frames++;

	/* decide if it is a good or bad frame */
	for (ns = 0; ns < NPIDS; ns++) {
		session_id = lp->rx_session_id[ns];
		frame_id = lp->rx_frame_id[ns];
		if (st[0] == session_id) {
			if (st[1] == frame_id || (!frame_id && (st[1] & 0xf0) == 0x30)) {
				goto good_frame;
			} else if ((st[1] & 0xf0) == 0x30 && (st[0] & 0x40)) {
				goto skipped_frame;
			} else {
				goto bad_frame;
			}
		} else if (st[0] == (session_id | 0x40)) {
			if ((st[1] & 0xf0) == 0x30) {
				goto skipped_frame;
			} else {
				goto bad_frame;
			}
		}
	}
	goto bad_frame;

skipped_frame:
	stats->rx_frame_errors++;
	skb = lp->rx_skb[ns];
	if (sb1000_debug > 1)
		printk(KERN_WARNING "%s: missing frame(s): got %02x %02x "
			"expecting %02x %02x\n", dev->name, st[0], st[1],
			skb ? session_id : session_id | 0x40, frame_id);
	if (skb) {
		dev_kfree_skb(skb);
		skb = NULL;
	}

good_frame:
	lp->rx_frame_id[ns] = 0x30 | ((st[1] + 1) & 0x0f);
	/* new datagram */
	if (st[0] & 0x40) {
		/* get data length */
		insw(ioaddr, buffer, NewDatagramHeaderSize / 2);
#ifdef XXXDEBUG
printk("cm0: IP identification: %02x%02x  fragment offset: %02x%02x\n", buffer[30], buffer[31], buffer[32], buffer[33]);
#endif /* XXXDEBUG */
		if (buffer[0] != NewDatagramHeaderSkip) {
			if (sb1000_debug > 1)
				printk(KERN_WARNING "%s: new datagram header skip error: "
					"got %02x expecting %02x\n", dev->name, buffer[0],
					NewDatagramHeaderSkip);
			stats->rx_length_errors++;
			insw(ioaddr, buffer, NewDatagramDataSize / 2);
			goto bad_frame_next;
		}
		dlen = ((buffer[NewDatagramHeaderSkip + 3] & 0x0f) << 8 |
			buffer[NewDatagramHeaderSkip + 4]) - 17;
		if (dlen > SB1000_MRU) {
			if (sb1000_debug > 1)
				printk(KERN_WARNING "%s: datagram length (%d) greater "
					"than MRU (%d)\n", dev->name, dlen, SB1000_MRU);
			stats->rx_length_errors++;
			insw(ioaddr, buffer, NewDatagramDataSize / 2);
			goto bad_frame_next;
		}
		lp->rx_dlen[ns] = dlen;
		/* compute size to allocate for datagram */
		skbsize = dlen + FrameSize;
		if ((skb = alloc_skb(skbsize, GFP_ATOMIC)) == NULL) {
			if (sb1000_debug > 1)
				printk(KERN_WARNING "%s: can't allocate %d bytes long "
					"skbuff\n", dev->name, skbsize);
			stats->rx_dropped++;
			insw(ioaddr, buffer, NewDatagramDataSize / 2);
			goto dropped_frame;
		}
		skb->dev = dev;
		skb_reset_mac_header(skb);
		skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16];
		insw(ioaddr, skb_put(skb, NewDatagramDataSize),
			NewDatagramDataSize / 2);
		lp->rx_skb[ns] = skb;
	} else {
		/* continuation of previous datagram */
		insw(ioaddr, buffer, ContDatagramHeaderSize / 2);
		if (buffer[0] != ContDatagramHeaderSkip) {
			if (sb1000_debug > 1)
				printk(KERN_WARNING "%s: cont datagram header skip error: "
					"got %02x expecting %02x\n", dev->name, buffer[0],
					ContDatagramHeaderSkip);
			stats->rx_length_errors++;
			insw(ioaddr, buffer, ContDatagramDataSize / 2);
			goto bad_frame_next;
		}
		skb = lp->rx_skb[ns];
		insw(ioaddr, skb_put(skb, ContDatagramDataSize),
			ContDatagramDataSize / 2);
		dlen = lp->rx_dlen[ns];
	}
	if (skb->len < dlen + TrailerSize) {
		lp->rx_session_id[ns] &= ~0x40;
		return 0;
	}

	/* datagram completed: send to upper level */
	skb_trim(skb, dlen);
	netif_rx(skb);
	dev->last_rx = jiffies;
	stats->rx_bytes+=dlen;
	stats->rx_packets++;
	lp->rx_skb[ns] = NULL;
	lp->rx_session_id[ns] |= 0x40;
	return 0;

bad_frame:
	insw(ioaddr, buffer, FrameSize / 2);
	if (sb1000_debug > 1)
		printk(KERN_WARNING "%s: frame error: got %02x %02x\n",
			dev->name, st[0], st[1]);
	stats->rx_frame_errors++;
bad_frame_next:
	if (sb1000_debug > 2)
		sb1000_print_status_buffer(dev->name, st, buffer, FrameSize);
dropped_frame:
	stats->rx_errors++;
	if (ns < NPIDS) {
		if ((skb = lp->rx_skb[ns])) {
			dev_kfree_skb(skb);
			lp->rx_skb[ns] = NULL;
		}
		lp->rx_session_id[ns] |= 0x40;
	}
	return -1;
}
Exemplo n.º 9
0
int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
{
	struct iphdr *iph = skb->h.iph;
	struct sock *raw_sk=NULL;
	unsigned char hash;
	unsigned char flag = 0;
	struct inet_protocol *ipprot;
	int brd=IS_MYADDR;
	struct options * opt = NULL;
	int is_frag=0;
	__u32 daddr;

#ifdef CONFIG_FIREWALL
	int fwres;
	__u16 rport;
#endif	
#ifdef CONFIG_IP_MROUTE
	int mroute_pkt=0;
#endif	

#ifdef CONFIG_NET_IPV6
	/* 
	 *	Intercept IPv6 frames. We dump ST-II and invalid types just below..
	 */
	 
	if(iph->version == 6)
		return ipv6_rcv(skb,dev,pt);
#endif		

	ip_statistics.IpInReceives++;


	/*
	 *	Tag the ip header of this packet so we can find it
	 */

	skb->ip_hdr = iph;

	/*
	 *	RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
	 *	RFC1122: 3.1.2.3 MUST discard a frame with invalid source address [NEEDS FIXING].
	 *
	 *	Is the datagram acceptable?
	 *
	 *	1.	Length at least the size of an ip header
	 *	2.	Version of 4
	 *	3.	Checksums correctly. [Speed optimisation for later, skip loopback checksums]
	 *	4.	Doesn't have a bogus length
	 *	(5.	We ought to check for IP multicast addresses and undefined types.. does this matter ?)
	 */

	if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0
		|| skb->len < ntohs(iph->tot_len))
	{
		ip_statistics.IpInHdrErrors++;
		kfree_skb(skb, FREE_WRITE);
		return(0);
	}

	/*
	 *	Our transport medium may have padded the buffer out. Now we know it
	 *	is IP we can trim to the true length of the frame.
	 *	Note this now means skb->len holds ntohs(iph->tot_len).
	 */

	skb_trim(skb,ntohs(iph->tot_len));
	
	if(skb->len < (iph->ihl<<2))
	{
		ip_statistics.IpInHdrErrors++;
		kfree_skb(skb, FREE_WRITE);
		return 0;
	}

	/*
	 *	Account for the packet (even if the packet is
	 *	not accepted by the firewall!). We do this after
	 *	the sanity checks and the additional ihl check
	 *	so we dont account garbage as we might do before.
	 */

#ifdef CONFIG_IP_ACCT
	ip_fw_chk(iph,dev,NULL,ip_acct_chain,0,IP_FW_MODE_ACCT_IN);
#endif	

	/*
	 *	Try to select closest <src,dst> alias device, if any.
	 *	net_alias_dev_rx32 returns main device if it 
	 *	fails to found other.
 	 *  	If successful, also incr. alias rx count.
	 *
	 *	Only makes sense for unicasts - Thanks ANK.
	 */

#ifdef CONFIG_NET_ALIAS
	if (skb->pkt_type == PACKET_HOST && iph->daddr != skb->dev->pa_addr && net_alias_has(skb->dev)) {
		skb->dev = dev = net_alias_dev_rx32(skb->dev, AF_INET, iph->saddr, iph->daddr);
	}
#endif

	if (iph->ihl > 5) 
	{
		skb->ip_summed = 0;
		if (ip_options_compile(NULL, skb))
			return(0);
		opt = (struct options*)skb->proto_priv;
#ifdef CONFIG_IP_NOSR
		if (opt->srr) 
		{
			kfree_skb(skb, FREE_READ);
			return -EINVAL;
		}
#endif					
	}
	
#if defined(CONFIG_IP_TRANSPARENT_PROXY) && !defined(CONFIG_IP_ALWAYS_DEFRAG)
#define CONFIG_IP_ALWAYS_DEFRAG 1
#endif
#ifdef CONFIG_IP_ALWAYS_DEFRAG
	/*
	 * Defragment all incoming traffic before even looking at it.
	 * If you have forwarding enabled, this makes the system a
	 * defragmenting router.  Not a common thing.
	 * You probably DON'T want to enable this unless you have to.
	 * You NEED to use this if you want to use transparent proxying,
	 * otherwise, we can't vouch for your sanity.
	 */

	/*
	 *	See if the frame is fragmented.
	 */
	 
	if(iph->frag_off)
	{
		if (iph->frag_off & htons(IP_MF))
			is_frag|=IPFWD_FRAGMENT;
		/*
		 *	Last fragment ?
		 */
	
		if (iph->frag_off & htons(IP_OFFSET))
			is_frag|=IPFWD_LASTFRAG;
	
		/*
		 *	Reassemble IP fragments.
		 */

		if(is_frag)
		{
			/* Defragment. Obtain the complete packet if there is one */
			skb=ip_defrag(iph,skb,dev);
			if(skb==NULL)
				return 0;
			skb->dev = dev;
			iph=skb->h.iph;
			is_frag = 0;
			/*
			 * When the reassembled packet gets forwarded, the ip
			 * header checksum should be correct.
			 * For better performance, this should actually only
			 * be done in that particular case, i.e. set a flag
			 * here and calculate the checksum in ip_forward.
			 */
			ip_send_check(iph);
		}
	}

#endif
	/*
	 *	See if the firewall wants to dispose of the packet. 
	 */
	
#ifdef	CONFIG_FIREWALL

	if ((fwres=call_in_firewall(PF_INET, skb->dev, iph, &rport))<FW_ACCEPT)
	{
		if(fwres==FW_REJECT)
			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev);
		kfree_skb(skb, FREE_WRITE);
		return 0;	
	}

#ifdef	CONFIG_IP_TRANSPARENT_PROXY
	if (fwres==FW_REDIRECT)
		skb->redirport = rport;
	else
#endif
		skb->redirport = 0;
#endif
	
#ifndef CONFIG_IP_ALWAYS_DEFRAG
	/*
	 *	Remember if the frame is fragmented.
	 */
	 
	if(iph->frag_off)
	{
		if (iph->frag_off & htons(IP_MF))
			is_frag|=IPFWD_FRAGMENT;
		/*
		 *	Last fragment ?
		 */
	
		if (iph->frag_off & htons(IP_OFFSET))
			is_frag|=IPFWD_LASTFRAG;
	}
	
#endif
	/*
	 *	Do any IP forwarding required.  chk_addr() is expensive -- avoid it someday.
	 *
	 *	This is inefficient. While finding out if it is for us we could also compute
	 *	the routing table entry. This is where the great unified cache theory comes
	 *	in as and when someone implements it
	 *
	 *	For most hosts over 99% of packets match the first conditional
	 *	and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
	 *	function entry.
	 */
	daddr = iph->daddr;
#ifdef CONFIG_IP_TRANSPARENT_PROXY
	/*
	 *	ip_chksock adds still more overhead for forwarded traffic...
	 */
	if ( iph->daddr == skb->dev->pa_addr || skb->redirport || dev->pa_addr == 0 || (brd = ip_chk_addr(iph->daddr)) != 0 || ip_chksock(skb))
#else
	if ( iph->daddr == skb->dev->pa_addr || dev->pa_addr == 0 || (brd = ip_chk_addr(iph->daddr)) != 0)
#endif
	{
		if (opt && opt->srr) 
	        {
			int srrspace, srrptr;
			__u32 nexthop;
			unsigned char * optptr = ((unsigned char *)iph) + opt->srr;

			if (brd != IS_MYADDR || skb->pkt_type != PACKET_HOST) 
			{
				kfree_skb(skb, FREE_WRITE);
				return 0;
			}

			for ( srrptr=optptr[2], srrspace = optptr[1];
			      srrptr <= srrspace;
			      srrptr += 4
			     ) 
			{
				int brd2;
				if (srrptr + 3 > srrspace) 
				{
					icmp_send(skb, ICMP_PARAMETERPROB, 0, opt->srr+2,
						  skb->dev);
					kfree_skb(skb, FREE_WRITE);
					return 0;
				}
				memcpy(&nexthop, &optptr[srrptr-1], 4);
				if ((brd2 = ip_chk_addr(nexthop)) == 0)
					break;
				if (brd2 != IS_MYADDR) 
				{

					/*
					 *	ANK: should we implement weak tunneling of multicasts?
					 *	Are they obsolete? DVMRP specs (RFC-1075) is old enough...
					 *	[They are obsolete]
					 */
					kfree_skb(skb, FREE_WRITE);
					return -EINVAL;
				}
				memcpy(&daddr, &optptr[srrptr-1], 4);
			}
			if (srrptr <= srrspace) 
			{
				opt->srr_is_hit = 1;
				opt->is_changed = 1;
				if (sysctl_ip_forward) {
					if (ip_forward(skb, dev, is_frag, nexthop))
						kfree_skb(skb, FREE_WRITE);
				} else {
					ip_statistics.IpInAddrErrors++;
					kfree_skb(skb, FREE_WRITE);
				}
				return 0;
			}
		}

#ifdef CONFIG_IP_MULTICAST	
		if(!(dev->flags&IFF_ALLMULTI) && brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
		{
			/*
			 *	Check it is for one of our groups
			 */
			struct ip_mc_list *ip_mc=dev->ip_mc_list;
			do
			{
				if(ip_mc==NULL)
				{	
					kfree_skb(skb, FREE_WRITE);
					return 0;
				}
				if(ip_mc->multiaddr==iph->daddr)
					break;
				ip_mc=ip_mc->next;
			}
			while(1);
		}
#endif

#ifndef CONFIG_IP_ALWAYS_DEFRAG
		/*
		 *	Reassemble IP fragments.
		 */

		if(is_frag)
		{
			/* Defragment. Obtain the complete packet if there is one */
			skb=ip_defrag(iph,skb,dev);
			if(skb==NULL)
				return 0;
			skb->dev = dev;
			iph=skb->h.iph;
		}

#endif

#ifdef CONFIG_IP_MASQUERADE
		/*
		 * Do we need to de-masquerade this packet?
		 */
		{
			int ret = ip_fw_demasquerade(&skb,dev);
			if (ret < 0) {
				kfree_skb(skb, FREE_WRITE);
				return 0;
			}

			if (ret)
			{
				struct iphdr *iph=skb->h.iph;
				if (ip_forward(skb, dev, IPFWD_MASQUERADED, iph->daddr))
					kfree_skb(skb, FREE_WRITE);
				return 0;
			}
		}
#endif

		/*
		 *	Point into the IP datagram, just past the header.
		 */

		skb->ip_hdr = iph;
		skb->h.raw += iph->ihl*4;

#ifdef CONFIG_IP_MROUTE		
		/*
		 *	Check the state on multicast routing (multicast and not 224.0.0.z)
		 */
		 
		if(brd==IS_MULTICAST && (iph->daddr&htonl(0xFFFFFF00))!=htonl(0xE0000000))
			mroute_pkt=1;

#endif
		/*
		 *	Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.
		 *
		 *	RFC 1122: SHOULD pass TOS value up to the transport layer.
		 */
 
		/* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
		hash = iph->protocol & (MAX_INET_PROTOS - 1);

		/* 
		 *	If there maybe a raw socket we must check - if not we don't care less 
		 */
		 
		if((raw_sk = raw_v4_htable[hash]) != NULL) {
			struct sock *sknext = NULL;
			struct sk_buff *skb1;

			raw_sk = raw_v4_lookup(raw_sk, iph->protocol,
					       iph->saddr, iph->daddr);
			if(raw_sk) {	/* Any raw sockets */
				do {
					/* Find the next */
					sknext = raw_v4_lookup(raw_sk->next,
							       iph->protocol,
							       iph->saddr,
							       iph->daddr);
					if(sknext)
						skb1 = skb_clone(skb, GFP_ATOMIC);
					else
						break;	/* One pending raw socket left */
					if(skb1)
						raw_rcv(raw_sk, skb1, dev, iph->saddr,daddr);
					raw_sk = sknext;
				} while(raw_sk!=NULL);
				
				/*
				 *	Here either raw_sk is the last raw socket, or NULL if none 
				 */
				 
				/*
				 *	We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy 
				 */
			}
		}
	
		/*
		 *	skb->h.raw now points at the protocol beyond the IP header.
		 */
	
		for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
		{
			struct sk_buff *skb2;
	
			if (ipprot->protocol != iph->protocol)
				continue;
		       /*
			* 	See if we need to make a copy of it.  This will
			* 	only be set if more than one protocol wants it.
			* 	and then not for the last one. If there is a pending
			*	raw delivery wait for that
			*/
	
#ifdef CONFIG_IP_MROUTE
			if (ipprot->copy || raw_sk || mroute_pkt)
#else	
			if (ipprot->copy || raw_sk)
#endif			
			{
				skb2 = skb_clone(skb, GFP_ATOMIC);
				if(skb2==NULL)
					continue;
			}
			else
			{
				skb2 = skb;
			}
			flag = 1;

		       /*
			*	Pass on the datagram to each protocol that wants it,
			*	based on the datagram protocol.  We should really
			*	check the protocol handler's return values here...
			*/

			ipprot->handler(skb2, dev, opt, daddr,
				(ntohs(iph->tot_len) - (iph->ihl * 4)),
				iph->saddr, 0, ipprot);
		}

		/*
		 *	All protocols checked.
		 *	If this packet was a broadcast, we may *not* reply to it, since that
		 *	causes (proven, grin) ARP storms and a leakage of memory (i.e. all
		 *	ICMP reply messages get queued up for transmission...)
		 */

#ifdef CONFIG_IP_MROUTE		 
		/*
		 *	Forward the last copy to the multicast router. If
		 *	there is a pending raw delivery however make a copy
		 *	and forward that.
		 */
		 
		if(mroute_pkt)
		{
			flag=1;
			if(raw_sk==NULL)
				ipmr_forward(skb, is_frag);
			else
			{
				struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
				if(skb2)
				{
					skb2->free=1;
					ipmr_forward(skb2, is_frag);
				}
			}
		}
#endif		

		if(raw_sk!=NULL)	/* Shift to last raw user */
			raw_rcv(raw_sk, skb, dev, iph->saddr, daddr);
		else if (!flag)		/* Free and report errors */
		{
			if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
				icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev);	
			kfree_skb(skb, FREE_WRITE);
		}

		return(0);
	}

	/*
	 *	Do any unicast IP forwarding required.
	 */
	
	/*
	 *	Don't forward multicast or broadcast frames.
	 */

	if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
	{
		kfree_skb(skb,FREE_WRITE);
		return 0;
	}

	/*
	 *	The packet is for another target. Forward the frame
	 */

	if (sysctl_ip_forward) {
		if (opt && opt->is_strictroute) 
		{
			icmp_send(skb, ICMP_PARAMETERPROB, 0, 16, skb->dev);
			kfree_skb(skb, FREE_WRITE);
			return -1;
		}
		if (ip_forward(skb, dev, is_frag, iph->daddr))
			kfree_skb(skb, FREE_WRITE);
	} else {
/*	printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
			iph->saddr,iph->daddr);*/
		ip_statistics.IpInAddrErrors++;
		kfree_skb(skb, FREE_WRITE);
	}
	return(0);
}
Exemplo n.º 10
0
/*
 * This function transfers received packets from card to driver, performing
 * aggregation if required.
 *
 * For data received on control port, or if aggregation is disabled, the
 * received buffers are uploaded as separate packets. However, if aggregation
 * is enabled and required, the buffers are copied onto an aggregation buffer,
 * provided there is space left, processed and finally uploaded.
 */
static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
					     struct sk_buff *skb, u8 port)
{
	struct sdio_mmc_card *card = adapter->card;
	s32 f_do_rx_aggr = 0;
	s32 f_do_rx_cur = 0;
	s32 f_aggr_cur = 0;
	struct sk_buff *skb_deaggr;
	u32 pind;
	u32 pkt_len, pkt_type = 0;
	u8 *curr_ptr;
	u32 rx_len = skb->len;

	if (port == CTRL_PORT) {
		/* Read the command Resp without aggr */
		dev_dbg(adapter->dev, "info: %s: no aggregation for cmd "
				"response\n", __func__);

		f_do_rx_cur = 1;
		goto rx_curr_single;
	}

	if (!card->mpa_rx.enabled) {
		dev_dbg(adapter->dev, "info: %s: rx aggregation disabled\n",
						__func__);

		f_do_rx_cur = 1;
		goto rx_curr_single;
	}

	if (card->mp_rd_bitmap & (~((u16) CTRL_PORT_MASK))) {
		/* Some more data RX pending */
		dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);

		if (MP_RX_AGGR_IN_PROGRESS(card)) {
			if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len)) {
				f_aggr_cur = 1;
			} else {
				/* No room in Aggr buf, do rx aggr now */
				f_do_rx_aggr = 1;
				f_do_rx_cur = 1;
			}
		} else {
			/* Rx aggr not in progress */
			f_aggr_cur = 1;
		}

	} else {
		/* No more data RX pending */
		dev_dbg(adapter->dev, "info: %s: last packet\n", __func__);

		if (MP_RX_AGGR_IN_PROGRESS(card)) {
			f_do_rx_aggr = 1;
			if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len))
				f_aggr_cur = 1;
			else
				/* No room in Aggr buf, do rx aggr now */
				f_do_rx_cur = 1;
		} else {
			f_do_rx_cur = 1;
		}
	}

	if (f_aggr_cur) {
		dev_dbg(adapter->dev, "info: current packet aggregation\n");
		/* Curr pkt can be aggregated */
		MP_RX_AGGR_SETUP(card, skb, port);

		if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
		    MP_RX_AGGR_PORT_LIMIT_REACHED(card)) {
			dev_dbg(adapter->dev, "info: %s: aggregated packet "
					"limit reached\n", __func__);
			/* No more pkts allowed in Aggr buf, rx it */
			f_do_rx_aggr = 1;
		}
	}

	if (f_do_rx_aggr) {
		/* do aggr RX now */
		dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n",
		       card->mpa_rx.pkt_cnt);

		if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf,
					   card->mpa_rx.buf_len,
					   (adapter->ioport | 0x1000 |
					    (card->mpa_rx.ports << 4)) +
					   card->mpa_rx.start_port, 1))
			return -1;

		curr_ptr = card->mpa_rx.buf;

		for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {

			/* get curr PKT len & type */
			pkt_len = *(u16 *) &curr_ptr[0];
			pkt_type = *(u16 *) &curr_ptr[2];

			/* copy pkt to deaggr buf */
			skb_deaggr = card->mpa_rx.skb_arr[pind];

			if ((pkt_type == MWIFIEX_TYPE_DATA) && (pkt_len <=
					 card->mpa_rx.len_arr[pind])) {

				memcpy(skb_deaggr->data, curr_ptr, pkt_len);

				skb_trim(skb_deaggr, pkt_len);

				/* Process de-aggr packet */
				mwifiex_decode_rx_packet(adapter, skb_deaggr,
							 pkt_type);
			} else {
				dev_err(adapter->dev, "wrong aggr pkt:"
					" type=%d len=%d max_len=%d\n",
					pkt_type, pkt_len,
					card->mpa_rx.len_arr[pind]);
				dev_kfree_skb_any(skb_deaggr);
			}
			curr_ptr += card->mpa_rx.len_arr[pind];
		}
		MP_RX_AGGR_BUF_RESET(card);
	}

rx_curr_single:
	if (f_do_rx_cur) {
		dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n",
			port, rx_len);

		if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
					      skb->data, skb->len,
					      adapter->ioport + port))
			return -1;

		mwifiex_decode_rx_packet(adapter, skb, pkt_type);
	}

	return 0;
}
Exemplo n.º 11
0
static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
				 int type, u32 pid, u32 seq, u32 change)
{
	struct ifinfomsg *r;
	struct nlmsghdr  *nlh;
	unsigned char	 *b = skb->tail;

	nlh = NLMSG_PUT(skb, pid, seq, type, sizeof(*r));
	if (pid) nlh->nlmsg_flags |= NLM_F_MULTI;
	r = NLMSG_DATA(nlh);
	r->ifi_family = AF_UNSPEC;
	r->ifi_type = dev->type;
	r->ifi_index = dev->ifindex;
	r->ifi_flags = dev_get_flags(dev);
	r->ifi_change = change;

	RTA_PUT(skb, IFLA_IFNAME, strlen(dev->name)+1, dev->name);

	if (1) {
		u32 txqlen = dev->tx_queue_len;
		RTA_PUT(skb, IFLA_TXQLEN, sizeof(txqlen), &txqlen);
	}

	if (1) {
		u32 weight = dev->weight;
		RTA_PUT(skb, IFLA_WEIGHT, sizeof(weight), &weight);
	}

	if (1) {
		struct rtnl_link_ifmap map = {
			.mem_start   = dev->mem_start,
			.mem_end     = dev->mem_end,
			.base_addr   = dev->base_addr,
			.irq         = dev->irq,
			.dma         = dev->dma,
			.port        = dev->if_port,
		};
		RTA_PUT(skb, IFLA_MAP, sizeof(map), &map);
	}

	if (dev->addr_len) {
		RTA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
		RTA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
	}

	if (1) {
		u32 mtu = dev->mtu;
		RTA_PUT(skb, IFLA_MTU, sizeof(mtu), &mtu);
	}

	if (dev->ifindex != dev->iflink) {
		u32 iflink = dev->iflink;
		RTA_PUT(skb, IFLA_LINK, sizeof(iflink), &iflink);
	}

	if (dev->qdisc_sleeping)
		RTA_PUT(skb, IFLA_QDISC,
			strlen(dev->qdisc_sleeping->ops->id) + 1,
			dev->qdisc_sleeping->ops->id);
	
	if (dev->master) {
		u32 master = dev->master->ifindex;
		RTA_PUT(skb, IFLA_MASTER, sizeof(master), &master);
	}

	if (dev->get_stats) {
		unsigned long *stats = (unsigned long*)dev->get_stats(dev);
		if (stats) {
			struct rtattr  *a;
			__u32	       *s;
			int		i;
			int		n = sizeof(struct rtnl_link_stats)/4;

			a = __RTA_PUT(skb, IFLA_STATS, n*4);
			s = RTA_DATA(a);
			for (i=0; i<n; i++)
				s[i] = stats[i];
		}
	}
	nlh->nlmsg_len = skb->tail - b;
	return skb->len;

nlmsg_failure:
rtattr_failure:
	skb_trim(skb, b - skb->data);
	return -1;
}

static int rtnetlink_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
	int idx;
	int s_idx = cb->args[0];
	struct net_device *dev;

	read_lock(&dev_base_lock);
	for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
		if (idx < s_idx)
			continue;
		if (rtnetlink_fill_ifinfo(skb, dev, RTM_NEWLINK, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 0) <= 0)
			break;
	}
	read_unlock(&dev_base_lock);
	cb->args[0] = idx;

	return skb->len;
}
Exemplo n.º 12
0
void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
{
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
	struct rxdone_entry_desc rxdesc;
	struct sk_buff *skb;
	struct ieee80211_rx_status *rx_status;
	unsigned int header_length;
	int rate_idx;

	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
	    !test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
		goto submit_entry;

	if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
		goto submit_entry;

	/*
	 * Allocate a new sk_buffer. If no new buffer available, drop the
	 * received frame and reuse the existing buffer.
	 */
	skb = rt2x00queue_alloc_rxskb(entry, gfp);
	if (!skb)
		goto submit_entry;

	/*
	 * Unmap the skb.
	 */
	rt2x00queue_unmap_skb(entry);

	/*
	 * Extract the RXD details.
	 */
	memset(&rxdesc, 0, sizeof(rxdesc));
	rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);

	/*
	 * Check for valid size in case we get corrupted descriptor from
	 * hardware.
	 */
	if (unlikely(rxdesc.size == 0 ||
		     rxdesc.size > entry->queue->data_size)) {
		ERROR(rt2x00dev, "Wrong frame size %d max %d.\n",
			rxdesc.size, entry->queue->data_size);
		dev_kfree_skb(entry->skb);
		goto renew_skb;
	}

	/*
	 * The data behind the ieee80211 header must be
	 * aligned on a 4 byte boundary.
	 */
	header_length = ieee80211_get_hdrlen_from_skb(entry->skb);

	/*
	 * Hardware might have stripped the IV/EIV/ICV data,
	 * in that case it is possible that the data was
	 * provided separately (through hardware descriptor)
	 * in which case we should reinsert the data into the frame.
	 */
	if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) &&
	    (rxdesc.flags & RX_FLAG_IV_STRIPPED))
		rt2x00crypto_rx_insert_iv(entry->skb, header_length,
					  &rxdesc);
	else if (header_length &&
		 (rxdesc.size > header_length) &&
		 (rxdesc.dev_flags & RXDONE_L2PAD))
		rt2x00queue_remove_l2pad(entry->skb, header_length);

	/* Trim buffer to correct size */
	skb_trim(entry->skb, rxdesc.size);

	/*
	 * Translate the signal to the correct bitrate index.
	 */
	rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc);
	if (rxdesc.rate_mode == RATE_MODE_HT_MIX ||
	    rxdesc.rate_mode == RATE_MODE_HT_GREENFIELD)
		rxdesc.flags |= RX_FLAG_HT;

	/*
	 * Check if this is a beacon, and more frames have been
	 * buffered while we were in powersaving mode.
	 */
	rt2x00lib_rxdone_check_ps(rt2x00dev, entry->skb, &rxdesc);

	/*
	 * Update extra components
	 */
	rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
	rt2x00debug_update_crypto(rt2x00dev, &rxdesc);
	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);

	/*
	 * Initialize RX status information, and send frame
	 * to mac80211.
	 */
	rx_status = IEEE80211_SKB_RXCB(entry->skb);
	rx_status->mactime = rxdesc.timestamp;
	rx_status->band = rt2x00dev->curr_band;
	rx_status->freq = rt2x00dev->curr_freq;
	rx_status->rate_idx = rate_idx;
	rx_status->signal = rxdesc.rssi;
	rx_status->flag = rxdesc.flags;
	rx_status->antenna = rt2x00dev->link.ant.active.rx;

	ieee80211_rx_ni(rt2x00dev->hw, entry->skb);

renew_skb:
	/*
	 * Replace the skb with the freshly allocated one.
	 */
	entry->skb = skb;

submit_entry:
	entry->flags = 0;
	rt2x00queue_index_inc(entry, Q_INDEX_DONE);
	if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
	    test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
		rt2x00dev->ops->lib->clear_entry(entry);
}
/* Send RST reply */
static void send_reset(struct sk_buff *oldskb, int hook)
{
	struct sk_buff *nskb;
	struct iphdr *iph = oldskb->nh.iph;
	struct tcphdr _otcph, *oth, *tcph;
	struct rtable *rt;
	u_int16_t tmp_port;
	u_int32_t tmp_addr;
	unsigned int tcplen;
	int needs_ack;
	int hh_len;

	/* IP header checks: fragment. */
	if (oldskb->nh.iph->frag_off & htons(IP_OFFSET))
		return;

	oth = skb_header_pointer(oldskb, oldskb->nh.iph->ihl * 4,
				 sizeof(_otcph), &_otcph);
	if (oth == NULL)
 		return;

	/* No RST for RST. */
	if (oth->rst)
		return;

	/* Check checksum */
	tcplen = oldskb->len - iph->ihl * 4;
	if (((hook != NF_IP_LOCAL_IN && oldskb->ip_summed != CHECKSUM_HW) ||
	     (hook == NF_IP_LOCAL_IN &&
	      oldskb->ip_summed != CHECKSUM_UNNECESSARY)) &&
	    csum_tcpudp_magic(iph->saddr, iph->daddr, tcplen, IPPROTO_TCP,
	                      oldskb->ip_summed == CHECKSUM_HW ? oldskb->csum :
	                      skb_checksum(oldskb, iph->ihl * 4, tcplen, 0)))
		return;

	if ((rt = route_reverse(oldskb, oth, hook)) == NULL)
		return;

	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);

	/* We need a linear, writeable skb.  We also need to expand
	   headroom in case hh_len of incoming interface < hh_len of
	   outgoing interface */
	nskb = skb_copy_expand(oldskb, hh_len, skb_tailroom(oldskb),
			       GFP_ATOMIC);
	if (!nskb) {
		dst_release(&rt->u.dst);
		return;
	}

	dst_release(nskb->dst);
	nskb->dst = &rt->u.dst;

	/* This packet will not be the same as the other: clear nf fields */
	nf_reset(nskb);
	nskb->nfmark = 0;
#ifdef CONFIG_BRIDGE_NETFILTER
	nf_bridge_put(nskb->nf_bridge);
	nskb->nf_bridge = NULL;
#endif

	tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl);

	/* Swap source and dest */
	tmp_addr = nskb->nh.iph->saddr;
	nskb->nh.iph->saddr = nskb->nh.iph->daddr;
	nskb->nh.iph->daddr = tmp_addr;
	tmp_port = tcph->source;
	tcph->source = tcph->dest;
	tcph->dest = tmp_port;

	/* Truncate to length (no data) */
	tcph->doff = sizeof(struct tcphdr)/4;
	skb_trim(nskb, nskb->nh.iph->ihl*4 + sizeof(struct tcphdr));
	nskb->nh.iph->tot_len = htons(nskb->len);

	if (tcph->ack) {
		needs_ack = 0;
		tcph->seq = oth->ack_seq;
		tcph->ack_seq = 0;
	} else {
		needs_ack = 1;
		tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin
				      + oldskb->len - oldskb->nh.iph->ihl*4
				      - (oth->doff<<2));
		tcph->seq = 0;
	}

	/* Reset flags */
	((u_int8_t *)tcph)[13] = 0;
	tcph->rst = 1;
	tcph->ack = needs_ack;

	tcph->window = 0;
	tcph->urg_ptr = 0;

	/* Adjust TCP checksum */
	tcph->check = 0;
	tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr),
				   nskb->nh.iph->saddr,
				   nskb->nh.iph->daddr,
				   csum_partial((char *)tcph,
						sizeof(struct tcphdr), 0));

	/* Adjust IP TTL, DF */
	nskb->nh.iph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT);
	/* Set DF, id = 0 */
	nskb->nh.iph->frag_off = htons(IP_DF);
	nskb->nh.iph->id = 0;

	/* Adjust IP checksum */
	nskb->nh.iph->check = 0;
	nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph, 
					   nskb->nh.iph->ihl);

	/* "Never happens" */
	if (nskb->len > dst_mtu(nskb->dst))
		goto free_nskb;

	nf_ct_attach(nskb, oldskb);

	NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
		dst_output);
	return;

 free_nskb:
	kfree_skb(nskb);
}