Esempio n. 1
0
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
{
	if (unlikely(!pskb_may_pull(skb, hdr_len)))
		return -ENOMEM;

	skb_pull_rcsum(skb, hdr_len);

	if (inner_proto == htons(ETH_P_TEB)) {
		struct ethhdr *eh;

		if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
			return -ENOMEM;

		eh = (struct ethhdr *)skb->data;
		if (likely(eth_proto_is_802_3(eh->h_proto)))
			skb->protocol = eh->h_proto;
		else
			skb->protocol = htons(ETH_P_802_2);

	} else {
		skb->protocol = inner_proto;
	}

	nf_reset(skb);
	secpath_reset(skb);
	skb_clear_hash_if_not_l4(skb);
	skb_dst_drop(skb);
	skb->vlan_tci = 0;
	skb_set_queue_mapping(skb, 0);
	skb->pkt_type = PACKET_HOST;
	return 0;
}
Esempio n. 2
0
int rpl___iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
			       __be16 inner_proto, bool raw_proto, bool xnet)
{
	if (unlikely(!pskb_may_pull(skb, hdr_len)))
		return -ENOMEM;

	skb_pull_rcsum(skb, hdr_len);

	if (!raw_proto && inner_proto == htons(ETH_P_TEB)) {
		struct ethhdr *eh;

		if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
			return -ENOMEM;

		eh = (struct ethhdr *)skb->data;
		if (likely(eth_proto_is_802_3(eh->h_proto)))
			skb->protocol = eh->h_proto;
		else
			skb->protocol = htons(ETH_P_802_2);

	} else {
		skb->protocol = inner_proto;
	}

	skb_clear_hash_if_not_l4(skb);
	skb->vlan_tci = 0;
	skb_set_queue_mapping(skb, 0);
	skb_scrub_packet(skb, xnet);

	return iptunnel_pull_offloads(skb);
}
Esempio n. 3
0
/* Transmit routine used by generic_netmap_txsync(). Returns 0 on success
   and -1 on error (which may be packet drops or other errors). */
int generic_xmit_frame(struct ifnet *ifp, struct mbuf *m,
	void *addr, u_int len, u_int ring_nr)
{
    netdev_tx_t ret;

    /* Empty the sk_buff. */
    if (unlikely(skb_headroom(m)))
	skb_push(m, skb_headroom(m));
    skb_trim(m, 0);

    /* TODO Support the slot flags (NS_MOREFRAG, NS_INDIRECT). */
    skb_copy_to_linear_data(m, addr, len); // skb_store_bits(m, 0, addr, len);
    skb_put(m, len);
    NM_ATOMIC_INC(&m->users);
    m->dev = ifp;
    /* Tell generic_ndo_start_xmit() to pass this mbuf to the driver. */
    m->priority = NM_MAGIC_PRIORITY_TX;
    skb_set_queue_mapping(m, ring_nr);

    ret = dev_queue_xmit(m);

    if (likely(ret == NET_XMIT_SUCCESS)) {
        return 0;
    }
    if (unlikely(ret != NET_XMIT_DROP)) {
        /* If something goes wrong in the TX path, there is nothing
           intelligent we can do (for now) apart from error reporting. */
        RD(5, "dev_queue_xmit failed: HARD ERROR %d", ret);
    }
    return -1;
}
Esempio n. 4
0
void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	u16 queue;
	u8 tid;

	queue = classify80211(local, skb);
	if (unlikely(queue >= local->hw.queues))
		queue = local->hw.queues - 1;

	/*
	 * Now we know the 1d priority, fill in the QoS header if
	 * there is one (and we haven't done this before).
	 */
	if (ieee80211_is_data_qos(hdr->frame_control)) {
		u8 *p = ieee80211_get_qos_ctl(hdr);
		u8 ack_policy = 0;
		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
		if (unlikely(local->wifi_wme_noack_test))
			ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
					QOS_CONTROL_ACK_POLICY_SHIFT;
		/* qos header is 2 bytes, second reserved */
		*p++ = ack_policy | tid;
		*p = 0;
	}

#if 0 /* Not in RHEl5... */
	skb_set_queue_mapping(skb, queue);
#else
	ieee80211_set_queue_mapping(skb, queue);
#endif
}
Esempio n. 5
0
static void imq_done_check_queue_mapping(struct sk_buff *skb,
					 struct net_device *dev)
{
	unsigned int queue_index;

	/* Don't let queue_mapping be left too large after exiting IMQ */
	if (likely(skb->dev != dev && skb->dev != NULL)) {
		queue_index = skb_get_queue_mapping(skb);
		if (unlikely(queue_index >= skb->dev->real_num_tx_queues)) {
			queue_index = (u16)((u32)queue_index %
						skb->dev->real_num_tx_queues);
			skb_set_queue_mapping(skb, queue_index);
		}
	} else {
		/* skb->dev was IMQ device itself or NULL, be on safe side and
		 * just clear queue mapping.
		 */
		skb_set_queue_mapping(skb, 0);
	}
}
Esempio n. 6
0
File: tx.c Progetto: 020gzh/linux
/* Take mac80211 Q id from the skb and translate it to hardware Q id */
static u8 skb2q(struct sk_buff *skb)
{
	int qid = skb_get_queue_mapping(skb);

	if (WARN_ON(qid >= MT_TXQ_PSD)) {
		qid = MT_TXQ_BE;
		skb_set_queue_mapping(skb, qid);
	}

	return q2hwq(qid);
}
Esempio n. 7
0
void ieee80211_requeue(struct ieee80211_local *local, int queue)
{
	struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue);
	struct sk_buff_head list;
	spinlock_t *root_lock;
	struct Qdisc *qdisc;
	u32 len;

	rcu_read_lock_bh();

	qdisc = rcu_dereference(txq->qdisc);
	if (!qdisc || !qdisc->dequeue)
		goto out_unlock;

	skb_queue_head_init(&list);

	root_lock = qdisc_root_lock(qdisc);
	spin_lock(root_lock);
	for (len = qdisc->q.qlen; len > 0; len--) {
		struct sk_buff *skb = qdisc->dequeue(qdisc);

		if (skb)
			__skb_queue_tail(&list, skb);
	}
	spin_unlock(root_lock);

	for (len = list.qlen; len > 0; len--) {
		struct sk_buff *skb = __skb_dequeue(&list);
		u16 new_queue;

		BUG_ON(!skb);
		new_queue = ieee80211_select_queue(local->mdev, skb);
		skb_set_queue_mapping(skb, new_queue);

		txq = netdev_get_tx_queue(local->mdev, new_queue);


		qdisc = rcu_dereference(txq->qdisc);
		root_lock = qdisc_root_lock(qdisc);

		spin_lock(root_lock);
		qdisc_enqueue_root(skb, qdisc);
		spin_unlock(root_lock);
	}

out_unlock:
	rcu_read_unlock_bh();
}
Esempio n. 8
0
static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
		struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	skb_set_mac_header(skb, 0);
	skb_set_network_header(skb, 0);
	skb_set_transport_header(skb, 0);

	
	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
	skb->priority = 7;

	info->control.vif = &sdata->vif;
	ieee80211_set_qos_hdr(sdata, skb);
}
Esempio n. 9
0
/*  Headroom is not adjusted.  Caller should ensure that skb has sufficient
 *  headroom in case the frame is encrypted. */
static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
		struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	skb_set_mac_header(skb, 0);
	skb_set_network_header(skb, 0);
	skb_set_transport_header(skb, 0);

	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
	skb->priority = 7;

	info->control.vif = &sdata->vif;
	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
	ieee80211_set_qos_hdr(sdata, skb);
}
Esempio n. 10
0
static int mlx5e_test_loopback(struct mlx5e_priv *priv)
{
	struct mlx5e_lbt_priv *lbtp;
	struct sk_buff *skb = NULL;
	int err;

	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
		netdev_err(priv->netdev,
			   "\tCan't perform loopback test while device is down\n");
		return -ENODEV;
	}

	lbtp = kzalloc(sizeof(*lbtp), GFP_KERNEL);
	if (!lbtp)
		return -ENOMEM;
	lbtp->loopback_ok = false;

	err = mlx5e_test_loopback_setup(priv, lbtp);
	if (err)
		goto out;

	skb = mlx5e_test_get_udp_skb(priv);
	if (!skb) {
		err = -ENOMEM;
		goto cleanup;
	}

	skb_set_queue_mapping(skb, 0);
	err = dev_queue_xmit(skb);
	if (err) {
		netdev_err(priv->netdev,
			   "\tFailed to xmit loopback packet err(%d)\n",
			   err);
		goto cleanup;
	}

	wait_for_completion_timeout(&lbtp->comp, MLX5E_LB_VERIFY_TIMEOUT);
	err = !lbtp->loopback_ok;

cleanup:
	mlx5e_test_loopback_cleanup(priv, lbtp);
out:
	kfree(lbtp);
	return err;
}
Esempio n. 11
0
static unsigned int
set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
{
	const struct xt_set_info_target_v3 *info = par->targinfo;

	ADT_OPT(add_opt, par->family, info->add_set.dim,
		info->add_set.flags, info->flags, info->timeout);
	ADT_OPT(del_opt, par->family, info->del_set.dim,
		info->del_set.flags, 0, UINT_MAX);
	ADT_OPT(map_opt, par->family, info->map_set.dim,
		info->map_set.flags, 0, UINT_MAX);

	int ret;

	/* Normalize to fit into jiffies */
	if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
	    add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC)
		add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC;
	if (info->add_set.index != IPSET_INVALID_ID)
		ip_set_add(info->add_set.index, skb, CAST_TO_MATCH par,
			   &add_opt);
	if (info->del_set.index != IPSET_INVALID_ID)
		ip_set_del(info->del_set.index, skb, CAST_TO_MATCH par,
			   &del_opt);
	if (info->map_set.index != IPSET_INVALID_ID) {
		map_opt.cmdflags |= info->flags & (IPSET_FLAG_MAP_SKBMARK |
						   IPSET_FLAG_MAP_SKBPRIO |
						   IPSET_FLAG_MAP_SKBQUEUE);
		ret = match_set(info->map_set.index, skb, CAST_TO_MATCH par,
				&map_opt,
				info->map_set.flags & IPSET_INV_MATCH);
		if (!ret)
			return XT_CONTINUE;
		if (map_opt.cmdflags & IPSET_FLAG_MAP_SKBMARK)
			skb->mark = (skb->mark & ~MOPT(map_opt,skbmarkmask))
				    ^ MOPT(map_opt, skbmark);
		if (map_opt.cmdflags & IPSET_FLAG_MAP_SKBPRIO)
			skb->priority = MOPT(map_opt, skbprio);
		if ((map_opt.cmdflags & IPSET_FLAG_MAP_SKBQUEUE) &&
		    skb->dev &&
		    skb->dev->real_num_tx_queues > MOPT(map_opt, skbqueue))
			skb_set_queue_mapping(skb, MOPT(map_opt, skbqueue));
	}
	return XT_CONTINUE;
}
Esempio n. 12
0
static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a,
		       struct tcf_result *res)
{
	struct tcf_skbedit *d = a->priv;

	spin_lock(&d->tcf_lock);
	d->tcf_tm.lastuse = jiffies;
	d->tcf_bstats.bytes += qdisc_pkt_len(skb);
	d->tcf_bstats.packets++;

	if (d->flags & SKBEDIT_F_PRIORITY)
		skb->priority = d->priority;
	if (d->flags & SKBEDIT_F_QUEUE_MAPPING &&
	    skb->dev->real_num_tx_queues > d->queue_mapping)
		skb_set_queue_mapping(skb, d->queue_mapping);

	spin_unlock(&d->tcf_lock);
	return d->tcf_action;
}
Esempio n. 13
0
static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
		       struct tcf_result *res)
{
	struct tcf_skbedit *d = a->priv;

	spin_lock(&d->tcf_lock);
	d->tcf_tm.lastuse = jiffies;
	bstats_update(&d->tcf_bstats, skb);

	if (d->flags & SKBEDIT_F_PRIORITY)
		skb->priority = d->priority;
	if (d->flags & SKBEDIT_F_QUEUE_MAPPING &&
	    skb->dev->real_num_tx_queues > d->queue_mapping)
		skb_set_queue_mapping(skb, d->queue_mapping);
	if (d->flags & SKBEDIT_F_MARK)
		skb->mark = d->mark;

	spin_unlock(&d->tcf_lock);
	return d->tcf_action;
}
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
				    struct sk_buff *skb,
				    void *accel_priv)
{
	int queue_index = 0;

	if (dev->real_num_tx_queues != 1) {
		const struct net_device_ops *ops = dev->netdev_ops;
		if (ops->ndo_select_queue)
			queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
							    __netdev_pick_tx);
		else
			queue_index = __netdev_pick_tx(dev, skb);

		if (!accel_priv)
			queue_index = netdev_cap_txqueue(dev, queue_index);
	}

	skb_set_queue_mapping(skb, queue_index);
	return netdev_get_tx_queue(dev, queue_index);
}
Esempio n. 15
0
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
{
	if (unlikely(!pskb_may_pull(skb, hdr_len)))
		return -ENOMEM;

	skb_pull_rcsum(skb, hdr_len);

	if (inner_proto == htons(ETH_P_TEB)) {
		struct ethhdr *eh;

		if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
			return -ENOMEM;

		eh = (struct ethhdr *)skb->data;

		if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
			skb->protocol = eh->h_proto;
		else
			skb->protocol = htons(ETH_P_802_2);

	} else {
		skb->protocol = inner_proto;
	}

	if (unlikely(compute_ip_summed(skb, false)))
		return -EPROTO;

	nf_reset(skb);
	secpath_reset(skb);
	skb_clear_rxhash(skb);
	skb_dst_drop(skb);
	vlan_set_tci(skb, 0);
	skb_set_queue_mapping(skb, 0);
	skb->pkt_type = PACKET_HOST;
	return 0;
}
Esempio n. 16
0
static struct Qdisc *
prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
	struct prio_sched_data *q = qdisc_priv(sch);
	u32 band = skb->priority;
	struct tcf_result res;
	int err;

	*qerr = NET_XMIT_BYPASS;
	if (TC_H_MAJ(skb->priority) != sch->handle) {
		err = tc_classify(skb, q->filter_list, &res);
#ifdef CONFIG_NET_CLS_ACT
		switch (err) {
		case TC_ACT_STOLEN:
		case TC_ACT_QUEUED:
			*qerr = NET_XMIT_SUCCESS;
		case TC_ACT_SHOT:
			return NULL;
		}
#endif
		if (!q->filter_list || err < 0) {
			if (TC_H_MAJ(band))
				band = 0;
			band = q->prio2band[band&TC_PRIO_MAX];
			goto out;
		}
		band = res.classid;
	}
	band = TC_H_MIN(band) - 1;
	if (band >= q->bands)
		band = q->prio2band[0];
out:
	if (q->mq)
		skb_set_queue_mapping(skb, band);
	return q->queues[band];
}
Esempio n. 17
0
int rtw_recv_indicatepkt(_adapter *padapter, union recv_frame *precv_frame)
{
	struct recv_priv *precvpriv;
	_queue	*pfree_recv_queue;
	_pkt *skb;
	struct mlme_priv*pmlmepriv = &padapter->mlmepriv;
#ifdef CONFIG_TCP_CSUM_OFFLOAD_RX
	struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
#endif

#ifdef CONFIG_BR_EXT
	void *br_port = NULL;
#endif

_func_enter_;

	precvpriv = &(padapter->recvpriv);
	pfree_recv_queue = &(precvpriv->free_recv_queue);

#ifdef CONFIG_DRVEXT_MODULE
	if (drvext_rx_handler(padapter, precv_frame->u.hdr.rx_data, precv_frame->u.hdr.len) == _SUCCESS)
	{
		goto _recv_indicatepkt_drop;
	}
#endif

	skb = precv_frame->u.hdr.pkt;
	if(skb == NULL)
	{
		RT_TRACE(_module_recv_osdep_c_,_drv_err_,("rtw_recv_indicatepkt():skb==NULL something wrong!!!!\n"));
		goto _recv_indicatepkt_drop;
	}

	RT_TRACE(_module_recv_osdep_c_,_drv_info_,("rtw_recv_indicatepkt():skb != NULL !!!\n"));		
	RT_TRACE(_module_recv_osdep_c_,_drv_info_,("rtw_recv_indicatepkt():precv_frame->u.hdr.rx_head=%p  precv_frame->hdr.rx_data=%p\n", precv_frame->u.hdr.rx_head, precv_frame->u.hdr.rx_data));
	RT_TRACE(_module_recv_osdep_c_,_drv_info_,("precv_frame->hdr.rx_tail=%p precv_frame->u.hdr.rx_end=%p precv_frame->hdr.len=%d \n", precv_frame->u.hdr.rx_tail, precv_frame->u.hdr.rx_end, precv_frame->u.hdr.len));

	skb->data = precv_frame->u.hdr.rx_data;

	skb_set_tail_pointer(skb, precv_frame->u.hdr.len);

	skb->len = precv_frame->u.hdr.len;

	RT_TRACE(_module_recv_osdep_c_,_drv_info_,("\n skb->head=%p skb->data=%p skb->tail=%p skb->end=%p skb->len=%d\n", skb->head, skb->data, skb->tail, skb->end, skb->len));

	if(check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
	{
	 	_pkt *pskb2=NULL;
	 	struct sta_info *psta = NULL;
	 	struct sta_priv *pstapriv = &padapter->stapriv;
		struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
		int bmcast = IS_MCAST(pattrib->dst);

		//DBG_871X("bmcast=%d\n", bmcast);

		if(_rtw_memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN)==_FALSE)
		{
			//DBG_871X("not ap psta=%p, addr=%pM\n", psta, pattrib->dst);

			if(bmcast)
			{
				psta = rtw_get_bcmc_stainfo(padapter);
				pskb2 = rtw_skb_clone(skb);
			} else {
				psta = rtw_get_stainfo(pstapriv, pattrib->dst);
			}

			if(psta)
			{
				struct net_device *pnetdev= (struct net_device*)padapter->pnetdev;			

				//DBG_871X("directly forwarding to the rtw_xmit_entry\n");

				//skb->ip_summed = CHECKSUM_NONE;
				skb->dev = pnetdev;			
#if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35))
				skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));
#endif //LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35)
			
				_rtw_xmit_entry(skb, pnetdev);
			
				if(bmcast)
					skb = pskb2;
				else
					goto _recv_indicatepkt_end;
			}


		}
		else// to APself
		{
			//DBG_871X("to APSelf\n");
		}
	}
	

#ifdef CONFIG_BR_EXT

#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
	br_port = padapter->pnetdev->br_port;
#else   // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
	rcu_read_lock();
	br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
	rcu_read_unlock();
#endif  // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))

	if( br_port	&& (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) )	 	
	{
		int nat25_handle_frame(_adapter *priv, struct sk_buff *skb);
		if (nat25_handle_frame(padapter, skb) == -1) {
			//priv->ext_stats.rx_data_drops++;
			//DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n");
			//return FAIL;
#if 1			
			// bypass this frame to upper layer!!
#else
			goto _recv_indicatepkt_drop;
#endif
		}	
	}

#endif	// CONFIG_BR_EXT


#ifdef CONFIG_TCP_CSUM_OFFLOAD_RX
	if ( (pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1) ) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		//DBG_871X("CHECKSUM_UNNECESSARY \n");
	} else {
		skb->ip_summed = CHECKSUM_NONE;
		//DBG_871X("CHECKSUM_NONE(%d, %d) \n", pattrib->tcpchk_valid, pattrib->tcp_chkrpt);
	}
#else /* !CONFIG_TCP_CSUM_OFFLOAD_RX */

	skb->ip_summed = CHECKSUM_NONE;

#endif

	skb->dev = padapter->pnetdev;
	skb->protocol = eth_type_trans(skb, padapter->pnetdev);

	rtw_netif_rx(padapter->pnetdev, skb);

_recv_indicatepkt_end:

	precv_frame->u.hdr.pkt = NULL; // pointers to NULL before rtw_free_recvframe()

	rtw_free_recvframe(precv_frame, pfree_recv_queue);

	RT_TRACE(_module_recv_osdep_c_,_drv_info_,("\n rtw_recv_indicatepkt :after rtw_netif_rx!!!!\n"));

_func_exit_;

        return _SUCCESS;

_recv_indicatepkt_drop:

	 //enqueue back to free_recv_queue
	 if(precv_frame)
		 rtw_free_recvframe(precv_frame, pfree_recv_queue);

	 return _FAIL;

_func_exit_;

}
Esempio n. 18
0
void rtw_os_recv_indicate_pkt(_adapter *padapter, _pkt *pkt, struct rx_pkt_attrib *pattrib)
{
	struct mlme_priv*pmlmepriv = &padapter->mlmepriv;
	struct recv_priv *precvpriv = &(padapter->recvpriv);
#ifdef CONFIG_BR_EXT
	void *br_port = NULL;
#endif
	int ret;

	/* Indicat the packets to upper layer */
	if (pkt) {
		if(check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
		{
		 	_pkt *pskb2=NULL;
		 	struct sta_info *psta = NULL;
		 	struct sta_priv *pstapriv = &padapter->stapriv;
			int bmcast = IS_MCAST(pattrib->dst);

			//DBG_871X("bmcast=%d\n", bmcast);

			if (_rtw_memcmp(pattrib->dst, adapter_mac_addr(padapter), ETH_ALEN) == _FALSE)
			{
				//DBG_871X("not ap psta=%p, addr=%pM\n", psta, pattrib->dst);

				if(bmcast)
				{
					psta = rtw_get_bcmc_stainfo(padapter);
					pskb2 = rtw_skb_clone(pkt);
				} else {
					psta = rtw_get_stainfo(pstapriv, pattrib->dst);
				}

				if(psta)
				{
					struct net_device *pnetdev= (struct net_device*)padapter->pnetdev;			

					//DBG_871X("directly forwarding to the rtw_xmit_entry\n");

					//skb->ip_summed = CHECKSUM_NONE;
					pkt->dev = pnetdev;				
#if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35))
					skb_set_queue_mapping(pkt, rtw_recv_select_queue(pkt));
#endif //LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35)

					_rtw_xmit_entry(pkt, pnetdev);

					if(bmcast && (pskb2 != NULL) ) {
						pkt = pskb2;
						DBG_COUNTER(padapter->rx_logs.os_indicate_ap_mcast);
					} else {
						DBG_COUNTER(padapter->rx_logs.os_indicate_ap_forward);
						return;
					}
				}
			}
			else// to APself
			{
				//DBG_871X("to APSelf\n");
				DBG_COUNTER(padapter->rx_logs.os_indicate_ap_self);
			}
		}
		
#ifdef CONFIG_BR_EXT
		// Insert NAT2.5 RX here!
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
		br_port = padapter->pnetdev->br_port;
#else   // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
		rcu_read_lock();
		br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
		rcu_read_unlock();
#endif  // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))


		if( br_port && (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) )
		{
			int nat25_handle_frame(_adapter *priv, struct sk_buff *skb);
			if (nat25_handle_frame(padapter, pkt) == -1) {
				//priv->ext_stats.rx_data_drops++;
				//DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n");
				//return FAIL;
				
#if 1
				// bypass this frame to upper layer!!
#else
				rtw_skb_free(sub_skb);
				continue;
#endif
			}							
		}
#endif	// CONFIG_BR_EXT
		if( precvpriv->sink_udpport > 0)
			rtw_sink_rtp_seq_dbg(padapter,pkt);
#ifdef DBG_UDP_PKT_LOSE_11AC
		/* After eth_type_trans process , pkt->data pointer will move from ethrnet header to ip header ,  
		*	we have to check ethernet type , so this debug must be print before eth_type_trans
		*/
		if (*((unsigned short *)(pkt->data+ETH_ALEN*2)) == htons(ETH_P_ARP)) {
			/* ARP Payload length will be 42bytes or 42+18(tailer)=60bytes*/
			if (pkt->len != 42 && pkt->len != 60) 
				DBG_871X("Error !!%s,ARP Payload length %u not correct\n" , __func__ , pkt->len);
		} else if (*((unsigned short *)(pkt->data+ETH_ALEN*2)) == htons(ETH_P_IP)) { 
			if (be16_to_cpu(*((u16 *)(pkt->data+PAYLOAD_LEN_LOC_OF_IP_HDR))) != (pkt->len)-ETH_HLEN) {
				DBG_871X("Error !!%s,Payload length not correct\n" , __func__);
				DBG_871X("%s, IP header describe Total length=%u\n" , __func__ , be16_to_cpu(*((u16 *)(pkt->data+PAYLOAD_LEN_LOC_OF_IP_HDR))));
				DBG_871X("%s, Pkt real length=%u\n" , __func__ , (pkt->len)-ETH_HLEN);
			} 
		}
#endif
		/* After eth_type_trans process , pkt->data pointer will move from ethrnet header to ip header */
		pkt->protocol = eth_type_trans(pkt, padapter->pnetdev);
		pkt->dev = padapter->pnetdev;

#ifdef CONFIG_TCP_CSUM_OFFLOAD_RX
		if ( (pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1) ) {
			pkt->ip_summed = CHECKSUM_UNNECESSARY;
		} else {
			pkt->ip_summed = CHECKSUM_NONE;
		}
#else /* !CONFIG_TCP_CSUM_OFFLOAD_RX */
		pkt->ip_summed = CHECKSUM_NONE;
#endif //CONFIG_TCP_CSUM_OFFLOAD_RX

		ret = rtw_netif_rx(padapter->pnetdev, pkt);
		if (ret == NET_RX_SUCCESS)
			DBG_COUNTER(padapter->rx_logs.os_netif_ok);
		else
			DBG_COUNTER(padapter->rx_logs.os_netif_err);
	}
}
Esempio n. 19
0
static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
{
	struct net_device *dev;
	struct sk_buff *skb_orig, *skb, *skb_shared;
	struct Qdisc *q;
	struct netdev_queue *txq;
	spinlock_t *root_lock;
	int users, index;
	int retval = -EINVAL;
	unsigned int orig_queue_index;

	index = entry->skb->imq_flags & IMQ_F_IFMASK;
	if (unlikely(index > numdevs - 1)) {
		if (net_ratelimit())
			printk(KERN_WARNING
			       "IMQ: invalid device specified, highest is %u\n",
			       numdevs - 1);
		retval = -EINVAL;
		goto out;
	}

	/* check for imq device by index from cache */
	dev = imq_devs_cache[index];
	if (unlikely(!dev)) {
		char buf[8];

		/* get device by name and cache result */
		snprintf(buf, sizeof(buf), "imq%d", index);
		dev = dev_get_by_name(&init_net, buf);
		if (unlikely(!dev)) {
			/* not found ?!*/
			BUG();
			retval = -ENODEV;
			goto out;
		}

		imq_devs_cache[index] = dev;
		dev_put(dev);
	}

	if (unlikely(!(dev->flags & IFF_UP))) {
		entry->skb->imq_flags = 0;
		nf_reinject(entry, NF_ACCEPT);
		retval = 0;
		goto out;
	}
	dev->last_rx = jiffies;

	skb = entry->skb;
	skb_orig = NULL;

	/* skb has owner? => make clone */
	if (unlikely(skb->destructor)) {
		skb_orig = skb;
		skb = skb_clone(skb, GFP_ATOMIC);
		if (unlikely(!skb)) {
			retval = -ENOMEM;
			goto out;
		}
		entry->skb = skb;
	}

	skb->nf_queue_entry = entry;

	dev->stats.rx_bytes += skb->len;
	dev->stats.rx_packets++;

	if (!skb->dev) {
		/* skb->dev == NULL causes problems, try the find cause. */
		if (net_ratelimit()) {
			dev_warn(&dev->dev,
				 "received packet with skb->dev == NULL\n");
			dump_stack();
		}

		skb->dev = dev;
	}

	/* Disables softirqs for lock below */
	rcu_read_lock_bh();

	/* Multi-queue selection */
	orig_queue_index = skb_get_queue_mapping(skb);
	txq = imq_select_queue(dev, skb);

	q = rcu_dereference(txq->qdisc);
	if (unlikely(!q->enqueue))
		goto packet_not_eaten_by_imq_dev;

	root_lock = qdisc_lock(q);
	spin_lock(root_lock);

	users = atomic_read(&skb->users);

	skb_shared = skb_get(skb); /* increase reference count by one */
	skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will
					overwrite it */
	qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */

	if (likely(atomic_read(&skb_shared->users) == users + 1)) {
		kfree_skb(skb_shared); /* decrease reference count by one */

		skb->destructor = &imq_skb_destructor;

		/* cloned? */
		if (unlikely(skb_orig))
			kfree_skb(skb_orig); /* free original */

		spin_unlock(root_lock);
		rcu_read_unlock_bh();

		/* schedule qdisc dequeue */
		__netif_schedule(q);

		retval = 0;
		goto out;
	} else {
		skb_restore_cb(skb_shared); /* restore skb->cb */
		skb->nf_queue_entry = NULL;
		/* qdisc dropped packet and decreased skb reference count of
		 * skb, so we don't really want to and try refree as that would
		 * actually destroy the skb. */
		spin_unlock(root_lock);
		goto packet_not_eaten_by_imq_dev;
	}

packet_not_eaten_by_imq_dev:
	skb_set_queue_mapping(skb, orig_queue_index);
	rcu_read_unlock_bh();

	/* cloned? restore original */
	if (unlikely(skb_orig)) {
		kfree_skb(skb);
		entry->skb = skb_orig;
	}
	retval = -1;
out:
	return retval;
}
Esempio n. 20
0
int rtw_recv_indicatepkt23a(struct rtw_adapter *padapter,
			 struct recv_frame *precv_frame)
{
	struct recv_priv *precvpriv;
	struct rtw_queue *pfree_recv_queue;
	struct sk_buff *skb;
	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;

	precvpriv = &(padapter->recvpriv);
	pfree_recv_queue = &(precvpriv->free_recv_queue);

	skb = precv_frame->pkt;
	if (!skb) {
		RT_TRACE(_module_recv_osdep_c_, _drv_err_,
			 ("rtw_recv_indicatepkt23a():skb == NULL!!!!\n"));
		goto _recv_indicatepkt_drop;
	}

	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("rtw_recv_indicatepkt23a():skb != NULL !!!\n"));
	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("rtw_recv_indicatepkt23a():precv_frame->hdr.rx_data =%p\n",
		  precv_frame->pkt->data));
	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("\n skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n",
		  skb->head, skb->data,
		  skb_tail_pointer(skb), skb_end_pointer(skb), skb->len));

	if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
		struct sk_buff *pskb2 = NULL;
		struct sta_info *psta = NULL;
		struct sta_priv *pstapriv = &padapter->stapriv;
		struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
		int bmcast = is_multicast_ether_addr(pattrib->dst);

		/* DBG_8723A("bmcast =%d\n", bmcast); */

		if (!ether_addr_equal(pattrib->dst,
				      myid(&padapter->eeprompriv))) {
			/* DBG_8723A("not ap psta =%p, addr =%pM\n", psta, pattrib->dst); */
			if (bmcast) {
				psta = rtw_get_bcmc_stainfo23a(padapter);
				pskb2 = skb_clone(skb, GFP_ATOMIC);
			} else {
				psta = rtw_get_stainfo23a(pstapriv, pattrib->dst);
			}

			if (psta) {
				struct net_device *pnetdev = padapter->pnetdev;

				/* DBG_8723A("directly forwarding to the rtw_xmit23a_entry23a\n"); */

				/* skb->ip_summed = CHECKSUM_NONE; */
				skb->dev = pnetdev;
				skb_set_queue_mapping(skb, rtw_recv_select_queue23a(skb));

				rtw_xmit23a_entry23a(skb, pnetdev);

				if (bmcast)
					skb = pskb2;
				else
					goto _recv_indicatepkt_end;
			}
		} else { /*  to APself */
			/* DBG_8723A("to APSelf\n"); */
		}
	}

	skb->ip_summed = CHECKSUM_NONE;
	skb->dev = padapter->pnetdev;
	skb->protocol = eth_type_trans(skb, padapter->pnetdev);

	netif_rx(skb);

_recv_indicatepkt_end:

	precv_frame->pkt = NULL; /*  pointers to NULL before rtw_free_recvframe23a() */

	rtw_free_recvframe23a(precv_frame, pfree_recv_queue);

	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("\n rtw_recv_indicatepkt23a :after netif_rx!!!!\n"));
	return _SUCCESS;

_recv_indicatepkt_drop:

	 rtw_free_recvframe23a(precv_frame, pfree_recv_queue);
	 return _FAIL;
}
Esempio n. 21
0
int rtw_recv_indicatepkt(struct adapter *padapter,
			 struct recv_frame *precv_frame)
{
	struct recv_priv *precvpriv;
	struct __queue *pfree_recv_queue;
	struct sk_buff *skb;
	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;


	precvpriv = &(padapter->recvpriv);
	pfree_recv_queue = &(precvpriv->free_recv_queue);

	skb = precv_frame->pkt;
	if (!skb) {
		RT_TRACE(_module_recv_osdep_c_, _drv_err_,
			 ("rtw_recv_indicatepkt():skb == NULL something wrong!!!!\n"));
		goto _recv_indicatepkt_drop;
	}

	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("rtw_recv_indicatepkt():skb != NULL !!!\n"));
	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("rtw_recv_indicatepkt():precv_frame->rx_head =%p  precv_frame->hdr.rx_data =%p\n",
		 precv_frame->rx_head, precv_frame->rx_data));
	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("precv_frame->hdr.rx_tail =%p precv_frame->rx_end =%p precv_frame->hdr.len =%d\n",
		 precv_frame->rx_tail, precv_frame->rx_end,
		 precv_frame->len));

	skb->data = precv_frame->rx_data;

	skb_set_tail_pointer(skb, precv_frame->len);

	skb->len = precv_frame->len;

	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n",
		 skb->head, skb->data, skb_tail_pointer(skb),
		 skb_end_pointer(skb), skb->len));

	if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
		struct sk_buff *pskb2 = NULL;
		struct sta_info *psta = NULL;
		struct sta_priv *pstapriv = &padapter->stapriv;
		struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
		int bmcast = IS_MCAST(pattrib->dst);

		if (memcmp(pattrib->dst, myid(&padapter->eeprompriv),
			   ETH_ALEN)) {
			if (bmcast) {
				psta = rtw_get_bcmc_stainfo(padapter);
				pskb2 = skb_clone(skb, GFP_ATOMIC);
			} else {
				psta = rtw_get_stainfo(pstapriv, pattrib->dst);
			}

			if (psta) {
				struct net_device *pnetdev;

				pnetdev = (struct net_device *)padapter->pnetdev;
				skb->dev = pnetdev;
				skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));

				rtw_xmit_entry(skb, pnetdev);

				if (bmcast)
					skb = pskb2;
				else
					goto _recv_indicatepkt_end;
			}
		}
	}

	rcu_read_lock();
	rcu_dereference(padapter->pnetdev->rx_handler_data);
	rcu_read_unlock();

	skb->ip_summed = CHECKSUM_NONE;
	skb->dev = padapter->pnetdev;
	skb->protocol = eth_type_trans(skb, padapter->pnetdev);

	netif_rx(skb);

_recv_indicatepkt_end:

	/*  pointers to NULL before rtw_free_recvframe() */
	precv_frame->pkt = NULL;

	rtw_free_recvframe(precv_frame, pfree_recv_queue);

	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("\n rtw_recv_indicatepkt :after netif_rx!!!!\n"));


	return _SUCCESS;

_recv_indicatepkt_drop:

	 /* enqueue back to free_recv_queue */
	rtw_free_recvframe(precv_frame, pfree_recv_queue);

	 return _FAIL;
}
Esempio n. 22
0
void rtw_os_recv_indicate_pkt(_adapter *padapter, _pkt *pkt, struct rx_pkt_attrib *pattrib)
{
	struct mlme_priv*pmlmepriv = &padapter->mlmepriv;
	int ret;

	/* Indicat the packets to upper layer */
	if (pkt) {
		if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
		{
			_pkt *pskb2 =NULL;
			struct sta_info *psta = NULL;
			struct sta_priv *pstapriv = &padapter->stapriv;
			int bmcast = IS_MCAST(pattrib->dst);

			/* DBG_871X("bmcast =%d\n", bmcast); */

			if (memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN))
			{
				/* DBG_871X("not ap psta =%p, addr =%pM\n", psta, pattrib->dst); */

				if (bmcast)
				{
					psta = rtw_get_bcmc_stainfo(padapter);
					pskb2 = rtw_skb_clone(pkt);
				} else {
					psta = rtw_get_stainfo(pstapriv, pattrib->dst);
				}

				if (psta)
				{
					struct net_device *pnetdev = (struct net_device*)padapter->pnetdev;

					/* DBG_871X("directly forwarding to the rtw_xmit_entry\n"); */

					/* skb->ip_summed = CHECKSUM_NONE; */
					pkt->dev = pnetdev;
					skb_set_queue_mapping(pkt, rtw_recv_select_queue(pkt));

					_rtw_xmit_entry(pkt, pnetdev);

					if (bmcast && (pskb2 != NULL)) {
						pkt = pskb2;
						DBG_COUNTER(padapter->rx_logs.os_indicate_ap_mcast);
					} else {
						DBG_COUNTER(padapter->rx_logs.os_indicate_ap_forward);
						return;
					}
				}
			}
			else/*  to APself */
			{
				/* DBG_871X("to APSelf\n"); */
				DBG_COUNTER(padapter->rx_logs.os_indicate_ap_self);
			}
		}

		pkt->protocol = eth_type_trans(pkt, padapter->pnetdev);
		pkt->dev = padapter->pnetdev;

#ifdef CONFIG_TCP_CSUM_OFFLOAD_RX
		if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1)) {
			pkt->ip_summed = CHECKSUM_UNNECESSARY;
		} else {
			pkt->ip_summed = CHECKSUM_NONE;
		}
#else /* !CONFIG_TCP_CSUM_OFFLOAD_RX */
		pkt->ip_summed = CHECKSUM_NONE;
#endif /* CONFIG_TCP_CSUM_OFFLOAD_RX */

		ret = rtw_netif_rx(padapter->pnetdev, pkt);
		if (ret == NET_RX_SUCCESS)
			DBG_COUNTER(padapter->rx_logs.os_netif_ok);
		else
			DBG_COUNTER(padapter->rx_logs.os_netif_err);
	}
}
Esempio n. 23
0
/* Transmit routine used by generic_netmap_txsync(). Returns 0 on success
   and -1 on error (which may be packet drops or other errors). */
int
nm_os_generic_xmit_frame(struct nm_os_gen_arg *a)
{
	struct mbuf *m = a->m;
	struct ifnet *ifp = a->ifp;
	u_int len = a->len;
	netdev_tx_t ret;

	/* We know that the driver needs to prepend ifp->needed_headroom bytes
	 * to each packet to be transmitted. We then reset the mbuf pointers
	 * to the correct initial state:
	 *    ___________________________________________
	 *    ^           ^                             ^
	 *    |           |                             |
	 *   head        data                          end
	 *               tail
	 *
	 * which correspond to an empty buffer with exactly
	 * ifp->needed_headroom bytes between head and data.
	 */
	m->len = 0;
	m->data = m->head + ifp->needed_headroom;
	skb_reset_tail_pointer(m);
	skb_reset_mac_header(m);
	skb_reset_network_header(m);

	/* Copy a netmap buffer into the mbuf.
	 * TODO Support the slot flags (NS_MOREFRAG, NS_INDIRECT). */
	skb_copy_to_linear_data(m, a->addr, len); // skb_store_bits(m, 0, addr, len);
	skb_put(m, len);

	/* Hold a reference on this, we are going to recycle mbufs as
	 * much as possible. */
	NM_ATOMIC_INC(&m->users);

	/* On linux m->dev is not reliable, since it can be changed by the
	 * ndo_start_xmit() callback. This happens, for instance, with veth
	 * and bridge drivers. For this reason, the nm_os_generic_xmit_frame()
	 * implementation for linux stores a copy of m->dev into the
	 * destructor_arg field. */
	m->dev = ifp;
	skb_shinfo(m)->destructor_arg = m->dev;

	/* Tell generic_ndo_start_xmit() to pass this mbuf to the driver. */
	skb_set_queue_mapping(m, a->ring_nr);
	m->priority = a->qevent ? NM_MAGIC_PRIORITY_TXQE : NM_MAGIC_PRIORITY_TX;

	ret = dev_queue_xmit(m);

	if (unlikely(ret != NET_XMIT_SUCCESS)) {
		/* Reset priority, so that generic_netmap_tx_clean() can
		 * reclaim this mbuf. */
		m->priority = 0;

		/* Qdisc queue is full (this cannot happen with
		 * the netmap-aware qdisc, see exaplanation in
		 * netmap_generic_txsync), or qdisc is being
		 * deactivated. In the latter case dev_queue_xmit()
		 * does not call the enqueue method and returns
		 * NET_XMIT_DROP.
		 * If there is no carrier, the generic qdisc is
		 * not yet active (is pending in the qdisc_sleeping
		 * field), and so the temporary noop qdisc enqueue
		 * method will drop the packet and return NET_XMIT_CN.
		 */
		RD(3, "Warning: dev_queue_xmit() is dropping [%d]", ret);
		return -1;
	}

	return 0;
}
Esempio n. 24
0
void rtw_os_recv_indicate_pkt(_adapter *padapter, _pkt *pkt, struct rx_pkt_attrib *pattrib)
{
	struct mlme_priv*pmlmepriv = &padapter->mlmepriv;
	struct recv_priv *precvpriv = &(padapter->recvpriv);
#ifdef CONFIG_BR_EXT
	void *br_port = NULL;
#endif
	int ret;

	/* Indicat the packets to upper layer */
	if (pkt) {
		if(check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
		{
		 	_pkt *pskb2=NULL;
		 	struct sta_info *psta = NULL;
		 	struct sta_priv *pstapriv = &padapter->stapriv;
			int bmcast = IS_MCAST(pattrib->dst);

			//DBG_871X("bmcast=%d\n", bmcast);

			if(_rtw_memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN)==_FALSE)
			{
				//DBG_871X("not ap psta=%p, addr=%pM\n", psta, pattrib->dst);

				if(bmcast)
				{
					psta = rtw_get_bcmc_stainfo(padapter);
					pskb2 = rtw_skb_clone(pkt);
				} else {
					psta = rtw_get_stainfo(pstapriv, pattrib->dst);
				}

				if(psta)
				{
					struct net_device *pnetdev= (struct net_device*)padapter->pnetdev;			

					//DBG_871X("directly forwarding to the rtw_xmit_entry\n");

					//skb->ip_summed = CHECKSUM_NONE;
					pkt->dev = pnetdev;				
#if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35))
					skb_set_queue_mapping(pkt, rtw_recv_select_queue(pkt));
#endif //LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35)

					_rtw_xmit_entry(pkt, pnetdev);

					if(bmcast && (pskb2 != NULL) ) {
						pkt = pskb2;
						DBG_COUNTER(padapter->rx_logs.os_indicate_ap_mcast);
					} else {
						DBG_COUNTER(padapter->rx_logs.os_indicate_ap_forward);
						return;
					}
				}
			}
			else// to APself
			{
				//DBG_871X("to APSelf\n");
				DBG_COUNTER(padapter->rx_logs.os_indicate_ap_self);
			}
		}
		
#ifdef CONFIG_BR_EXT
		// Insert NAT2.5 RX here!
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
		br_port = padapter->pnetdev->br_port;
#else   // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
		rcu_read_lock();
		br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
		rcu_read_unlock();
#endif  // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))


		if( br_port && (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) )
		{
			int nat25_handle_frame(_adapter *priv, struct sk_buff *skb);
			if (nat25_handle_frame(padapter, pkt) == -1) {
				//priv->ext_stats.rx_data_drops++;
				//DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n");
				//return FAIL;
				
#if 1
				// bypass this frame to upper layer!!
#else
				rtw_skb_free(sub_skb);
				continue;
#endif
			}							
		}
#endif	// CONFIG_BR_EXT
		if( precvpriv->sink_udpport > 0)
			rtw_sink_rtp_seq_dbg(padapter,pkt);
		pkt->protocol = eth_type_trans(pkt, padapter->pnetdev);
		pkt->dev = padapter->pnetdev;

#ifdef CONFIG_TCP_CSUM_OFFLOAD_RX
		if ( (pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1) ) {
			pkt->ip_summed = CHECKSUM_UNNECESSARY;
		} else {
			pkt->ip_summed = CHECKSUM_NONE;
		}
#else /* !CONFIG_TCP_CSUM_OFFLOAD_RX */
		pkt->ip_summed = CHECKSUM_NONE;
#endif //CONFIG_TCP_CSUM_OFFLOAD_RX

		ret = rtw_netif_rx(padapter->pnetdev, pkt);
		if (ret == NET_RX_SUCCESS)
			DBG_COUNTER(padapter->rx_logs.os_netif_ok);
		else
			DBG_COUNTER(padapter->rx_logs.os_netif_err);
	}
}
Esempio n. 25
0
int rtw_recv_indicatepkt(struct adapter *padapter,
			 struct recv_frame *precv_frame)
{
	struct recv_priv *precvpriv;
	struct __queue *pfree_recv_queue;
	struct sk_buff *skb;
	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;

	precvpriv = &padapter->recvpriv;
	pfree_recv_queue = &precvpriv->free_recv_queue;

	skb = precv_frame->pkt;
	if (!skb) {
		RT_TRACE(_module_recv_osdep_c_, _drv_err_,
			 ("%s():skb == NULL something wrong!!!!\n", __func__));
		goto _recv_indicatepkt_drop;
	}

	if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
		struct sk_buff *pskb2 = NULL;
		struct sta_info *psta = NULL;
		struct sta_priv *pstapriv = &padapter->stapriv;
		struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
		bool mcast = is_multicast_ether_addr(pattrib->dst);

		if (memcmp(pattrib->dst, myid(&padapter->eeprompriv),
			   ETH_ALEN)) {
			if (mcast) {
				psta = rtw_get_bcmc_stainfo(padapter);
				pskb2 = skb_clone(skb, GFP_ATOMIC);
			} else {
				psta = rtw_get_stainfo(pstapriv, pattrib->dst);
			}

			if (psta) {
				struct net_device *pnetdev;

				pnetdev = (struct net_device *)padapter->pnetdev;
				skb->dev = pnetdev;
				skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));

				rtw_xmit_entry(skb, pnetdev);

				if (mcast)
					skb = pskb2;
				else
					goto _recv_indicatepkt_end;
			}
		}
	}

	skb->ip_summed = CHECKSUM_NONE;
	skb->dev = padapter->pnetdev;
	skb->protocol = eth_type_trans(skb, padapter->pnetdev);

	netif_rx(skb);

_recv_indicatepkt_end:

	/*  pointers to NULL before rtw_free_recvframe() */
	precv_frame->pkt = NULL;

	rtw_free_recvframe(precv_frame, pfree_recv_queue);

	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("\n %s :after netif_rx!!!!\n", __func__));

	return _SUCCESS;

_recv_indicatepkt_drop:

	 /* enqueue back to free_recv_queue */
	rtw_free_recvframe(precv_frame, pfree_recv_queue);

	return _FAIL;
}