예제 #1
0
int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
			   unsigned int encap_type)
{
	int ret;

	if (encap_type == LWTUNNEL_ENCAP_NONE ||
	    encap_type > LWTUNNEL_ENCAP_MAX)
		return -ERANGE;

	ret = (cmpxchg((const struct lwtunnel_encap_ops **)
		       &lwtun_encaps[encap_type],
		       ops, NULL) == ops) ? 0 : -1;

	synchronize_net();

	return ret;
}
예제 #2
0
파일: cfg.c 프로젝트: DenisLug/mptcp
static int ieee802154_suspend(struct wpan_phy *wpan_phy)
{
	struct ieee802154_local *local = wpan_phy_priv(wpan_phy);

	if (!local->open_count)
		goto suspend;

	ieee802154_stop_queue(&local->hw);
	synchronize_net();

	/* stop hardware - this must stop RX */
	ieee802154_stop_device(local);

suspend:
	local->suspended = true;
	return 0;
}
예제 #3
0
void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
{
	struct vlan_dev_info *vlan = vlan_dev_info(dev);
	struct net_device *real_dev = vlan->real_dev;
	const struct net_device_ops *ops = real_dev->netdev_ops;
	struct vlan_group *grp;
	u16 vlan_id = vlan->vlan_id;

	ASSERT_RTNL();

	grp = rtnl_dereference(real_dev->vlgrp);
	BUG_ON(!grp);

	/* Take it out of our own structures, but be sure to interlock with
	 * HW accelerating devices or SW vlan input packet processing if
	 * VLAN is not 0 (leave it there for 802.1p).
	 */
	if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
		ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);

	grp->nr_vlans--;

	if (vlan->flags & VLAN_FLAG_GVRP)
		vlan_gvrp_request_leave(dev);

	vlan_group_set_device(grp, vlan_id, NULL);
	if (!grp->killall)
		synchronize_net();

	unregister_netdevice_queue(dev, head);

	/* If the group is now empty, kill off the group. */
	if (grp->nr_vlans == 0) {
		vlan_gvrp_uninit_applicant(real_dev);

		rcu_assign_pointer(real_dev->vlgrp, NULL);
		if (ops->ndo_vlan_rx_register)
			ops->ndo_vlan_rx_register(real_dev, NULL);

		/* Free the group, after all cpu's are done. */
		call_rcu(&grp->rcu, vlan_rcu_free);
	}

	/* Get rid of the vlan's reference to real_dev */
	dev_put(real_dev);
}
예제 #4
0
int inet_del_protocol(struct net_protocol *prot, unsigned char protocol)
{
	int hash, ret;

	hash = protocol & (MAX_INET_PROTOS - 1);

	spin_lock_bh(&inet_proto_lock);
	if (inet_protos[hash] == prot) {
		inet_protos[hash] = NULL;
		ret = 0;
	} else {
		ret = -1;
	}
	spin_unlock_bh(&inet_proto_lock);

	synchronize_net();

	return ret;
}
예제 #5
0
int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
	int ret, hash = protocol & (MAX_INET_PROTOS - 1);

	spin_lock_bh(&inet6_proto_lock);

	if (inet6_protos[hash] != prot) {
		ret = -1;
	} else {
		inet6_protos[hash] = NULL;
		ret = 0;
	}

	spin_unlock_bh(&inet6_proto_lock);

	synchronize_net();

	return ret;
}
예제 #6
0
static void __exit br_deinit(void)
{
#ifdef CONFIG_BRIDGE_NETFILTER
	br_netfilter_fini();
#endif
	unregister_netdevice_notifier(&br_device_notifier);
	brioctl_set(NULL);

	br_cleanup_bridges();

	synchronize_net();

#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
	br_fdb_get_hook = NULL;
	br_fdb_put_hook = NULL;
#endif

	br_handle_frame_hook = NULL;
	br_fdb_fini();
}
예제 #7
0
static void __exit br_deinit(void)
{
	rcu_assign_pointer(br_stp_sap->rcv_func, NULL);

	br_netlink_fini();
	br_netfilter_fini();
	unregister_netdevice_notifier(&br_device_notifier);
	brioctl_set(NULL);

	br_cleanup_bridges();

	synchronize_net();

	llc_sap_put(br_stp_sap);
	br_fdb_get_hook = NULL;
	br_fdb_put_hook = NULL;

	br_handle_frame_hook = NULL;
	br_fdb_fini();
}
예제 #8
0
int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
{
	struct xfrm_tunnel **pprev;
	int ret = -ENOENT;

	mutex_lock(&tunnel4_mutex);

	for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
		if (*pprev == handler) {
			*pprev = handler->next;
			ret = 0;
			break;
		}
	}

	mutex_unlock(&tunnel4_mutex);

	synchronize_net();

	return ret;
}
예제 #9
0
int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler)
{
	struct xfrm6_tunnel **pprev;
	int ret = -ENOENT;

	mutex_lock(&tunnel6_mutex);

	for (pprev = &tunnel6_handlers; *pprev; pprev = &(*pprev)->next) {
		if (*pprev == handler) {
			*pprev = handler->next;
			ret = 0;
			break;
		}
	}

	mutex_unlock(&tunnel6_mutex);

	synchronize_net();

	return ret;
}
int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler)
{
	struct xfrm_tunnel __rcu **pprev;
	struct xfrm_tunnel *t;
	int ret = -ENOENT;

	mutex_lock(&xfrm4_mode_tunnel_input_mutex);
	for (pprev = &rcv_notify_handlers;
	     (t = rcu_dereference_protected(*pprev,
	     lockdep_is_held(&xfrm4_mode_tunnel_input_mutex))) != NULL;
	     pprev = &t->next) {
		if (t == handler) {
			*pprev = handler->next;
			ret = 0;
			break;
		}
	}
	mutex_unlock(&xfrm4_mode_tunnel_input_mutex);
	synchronize_net();

	return ret;
}
예제 #11
0
/*
 *     Module 'remove' entry point.
 *     o delete /proc/net/router directory and static entries.
 */
static void __exit vlan_cleanup_module(void)
{
	int i;

	vlan_ioctl_set(NULL);

	/* Un-register us from receiving netdevice events */
	unregister_netdevice_notifier(&vlan_notifier_block);

	dev_remove_pack(&vlan_packet_type);
	vlan_cleanup_devices();

	/* This table must be empty if there are no module
	 * references left.
	 */
	for (i = 0; i < VLAN_GRP_HASH_SIZE; i++) {
		BUG_ON(!hlist_empty(&vlan_group_hash[i]));
	}
	vlan_proc_cleanup();

	synchronize_net();
}
예제 #12
0
int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
			      unsigned char protocol)
{
	struct xfrm4_protocol __rcu **pprev;
	struct xfrm4_protocol *t;
	int ret = -ENOENT;

	if (!proto_handlers(protocol) || !netproto(protocol))
		return -EINVAL;

	mutex_lock(&xfrm4_protocol_mutex);

	for (pprev = proto_handlers(protocol);
	     (t = rcu_dereference_protected(*pprev,
			lockdep_is_held(&xfrm4_protocol_mutex))) != NULL;
	     pprev = &t->next) {
		if (t == handler) {
			*pprev = handler->next;
			ret = 0;
			break;
		}
	}

	if (!rcu_dereference_protected(*proto_handlers(protocol),
				       lockdep_is_held(&xfrm4_protocol_mutex))) {
		if (inet_del_protocol(netproto(protocol), protocol) < 0) {
			pr_err("%s: can't remove protocol\n", __func__);
			ret = -EAGAIN;
		}
	}

	mutex_unlock(&xfrm4_protocol_mutex);

	synchronize_net();

	return ret;
}
예제 #13
0
파일: mptp.c 프로젝트: paulvlase/mptp
static int mptp_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct mptp_sock *ssk = mptp_sk(sk);

	if (unlikely(!sk))
		return 0;

	mptp_unhash(ssk->src);

	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);

	synchronize_net();

	sock_orphan(sk);
	sock->sk = NULL;

	skb_queue_purge(&sk->sk_receive_queue);

	log_debug("mptp_release sock=%p\n", sk);
	sock_put(sk);

	return 0;
}
예제 #14
0
int nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
{
	int ret = 0;

	if (l4proto->l3proto >= PF_MAX) {
		ret = -EBUSY;
		goto out;
	}

	if (l4proto == &nf_conntrack_l4proto_generic) {
		nf_ct_l4proto_unregister_sysctl(l4proto);
		goto out;
	}

	write_lock_bh(&nf_conntrack_lock);
	if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto]
	    != l4proto) {
		write_unlock_bh(&nf_conntrack_lock);
		ret = -EBUSY;
		goto out;
	}
	nf_ct_protos[l4proto->l3proto][l4proto->l4proto]
		= &nf_conntrack_l4proto_generic;
	write_unlock_bh(&nf_conntrack_lock);

	nf_ct_l4proto_unregister_sysctl(l4proto);

	/* Somebody could be still looking at the proto in bh. */
	synchronize_net();

	/* Remove all contrack entries for this protocol */
	nf_ct_iterate_cleanup(kill_l4proto, l4proto);

out:
	return ret;
}
예제 #15
0
static void __exit fini(void)
{
    nf_nat_rtsp_hook = NULL;
    nf_nat_rtsp_hook_expectfn = NULL;
    synchronize_net();
}
예제 #16
0
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
    struct tid_ampdu_tx *tid_tx;
    struct ieee80211_local *local = sta->local;
    struct ieee80211_sub_if_data *sdata = sta->sdata;
    u16 start_seq_num;
    int ret;

    tid_tx = rcu_dereference_protected_tid_tx(sta, tid);

    /*
     * Start queuing up packets for this aggregation session.
     * We're going to release them once the driver is OK with
     * that.
     */
    clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

    /*
     * Make sure no packets are being processed. This ensures that
     * we have a valid starting sequence number and that in-flight
     * packets have been flushed out and no packets for this TID
     * will go into the driver during the ampdu_action call.
     */
    synchronize_net();

    start_seq_num = sta->tid_seq[tid] >> 4;

    ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
                           &sta->sta, tid, &start_seq_num, 0);
    if (ret) {
#ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "BA request denied - HW unavailable for"
               " tid %d\n", tid);
#endif
        spin_lock_bh(&sta->lock);
        ieee80211_agg_splice_packets(sdata, tid_tx, tid);
        ieee80211_assign_tid_tx(sta, tid, NULL);
        ieee80211_agg_splice_finish(sdata, tid);
        spin_unlock_bh(&sta->lock);

        kfree_rcu(tid_tx, rcu_head);
        return;
    }

    /* activate the timer for the recipient's addBA response */
    mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
#ifdef CONFIG_MAC80211_HT_DEBUG
    printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
#endif

    spin_lock_bh(&sta->lock);
    sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
    sta->ampdu_mlme.addba_req_num[tid]++;
    spin_unlock_bh(&sta->lock);

    /* send AddBA request */
    ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
                                 tid_tx->dialog_token, start_seq_num,
                                 local->hw.max_tx_aggregation_subframes,
                                 tid_tx->timeout);
}
static int init_or_cleanup(int init)
{
	int ret = 0;

	need_conntrack();

	if (!init) goto cleanup;

#ifdef CONFIG_XFRM
	BUG_ON(ip_nat_decode_session != NULL);
	ip_nat_decode_session = nat_decode_session;
#endif
	ret = ip_nat_rule_init();
	if (ret < 0) {
		printk("ip_nat_init: can't setup rules.\n");
		goto cleanup_decode_session;
	}
	ret = nf_register_hook(&ip_nat_in_ops);
	if (ret < 0) {
		printk("ip_nat_init: can't register in hook.\n");
		goto cleanup_rule_init;
	}
	ret = nf_register_hook(&ip_nat_out_ops);
	if (ret < 0) {
		printk("ip_nat_init: can't register out hook.\n");
		goto cleanup_inops;
	}
	ret = nf_register_hook(&ip_nat_adjust_in_ops);
	if (ret < 0) {
		printk("ip_nat_init: can't register adjust in hook.\n");
		goto cleanup_outops;
	}
	ret = nf_register_hook(&ip_nat_adjust_out_ops);
	if (ret < 0) {
		printk("ip_nat_init: can't register adjust out hook.\n");
		goto cleanup_adjustin_ops;
	}
	ret = nf_register_hook(&ip_nat_local_out_ops);
	if (ret < 0) {
		printk("ip_nat_init: can't register local out hook.\n");
		goto cleanup_adjustout_ops;;
	}
	ret = nf_register_hook(&ip_nat_local_in_ops);
	if (ret < 0) {
		printk("ip_nat_init: can't register local in hook.\n");
		goto cleanup_localoutops;
	}
	return ret;

 cleanup:
	nf_unregister_hook(&ip_nat_local_in_ops);
 cleanup_localoutops:
	nf_unregister_hook(&ip_nat_local_out_ops);
 cleanup_adjustout_ops:
	nf_unregister_hook(&ip_nat_adjust_out_ops);
 cleanup_adjustin_ops:
	nf_unregister_hook(&ip_nat_adjust_in_ops);
 cleanup_outops:
	nf_unregister_hook(&ip_nat_out_ops);
 cleanup_inops:
	nf_unregister_hook(&ip_nat_in_ops);
 cleanup_rule_init:
	ip_nat_rule_cleanup();
 cleanup_decode_session:
#ifdef CONFIG_XFRM
	ip_nat_decode_session = NULL;
	synchronize_net();
#endif
	return ret;
}
예제 #18
0
파일: agg-tx.c 프로젝트: 7799/linux
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
				    enum ieee80211_agg_stop_reason reason)
{
	struct ieee80211_local *local = sta->local;
	struct tid_ampdu_tx *tid_tx;
	enum ieee80211_ampdu_mlme_action action;
	int ret;

	lockdep_assert_held(&sta->ampdu_mlme.mtx);

	switch (reason) {
	case AGG_STOP_DECLINED:
	case AGG_STOP_LOCAL_REQUEST:
	case AGG_STOP_PEER_REQUEST:
		action = IEEE80211_AMPDU_TX_STOP_CONT;
		break;
	case AGG_STOP_DESTROY_STA:
		action = IEEE80211_AMPDU_TX_STOP_FLUSH;
		break;
	default:
		WARN_ON_ONCE(1);
		return -EINVAL;
	}

	spin_lock_bh(&sta->lock);

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	if (!tid_tx) {
		spin_unlock_bh(&sta->lock);
		return -ENOENT;
	}

	/*
	 * if we're already stopping ignore any new requests to stop
	 * unless we're destroying it in which case notify the driver
	 */
	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		spin_unlock_bh(&sta->lock);
		if (reason != AGG_STOP_DESTROY_STA)
			return -EALREADY;
		ret = drv_ampdu_action(local, sta->sdata,
				       IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
				       &sta->sta, tid, NULL, 0);
		WARN_ON_ONCE(ret);
		return 0;
	}

	if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
		/* not even started yet! */
		ieee80211_assign_tid_tx(sta, tid, NULL);
		spin_unlock_bh(&sta->lock);
		kfree_rcu(tid_tx, rcu_head);
		return 0;
	}

	set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);

	spin_unlock_bh(&sta->lock);

	ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
	       sta->sta.addr, tid);

	del_timer_sync(&tid_tx->addba_resp_timer);
	del_timer_sync(&tid_tx->session_timer);

	/*
	 * After this packets are no longer handed right through
	 * to the driver but are put onto tid_tx->pending instead,
	 * with locking to ensure proper access.
	 */
	clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);

	/*
	 * There might be a few packets being processed right now (on
	 * another CPU) that have already gotten past the aggregation
	 * check when it was still OPERATIONAL and consequently have
	 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
	 * call into the driver at the same time or even before the
	 * TX paths calls into it, which could confuse the driver.
	 *
	 * Wait for all currently running TX paths to finish before
	 * telling the driver. New packets will not go through since
	 * the aggregation session is no longer OPERATIONAL.
	 */
	synchronize_net();

	tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
					WLAN_BACK_RECIPIENT :
					WLAN_BACK_INITIATOR;
	tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;

	ret = drv_ampdu_action(local, sta->sdata, action,
			       &sta->sta, tid, NULL, 0);

	/* HW shall not deny going back to legacy */
	if (WARN_ON(ret)) {
		/*
		 * We may have pending packets get stuck in this case...
		 * Not bothering with a workaround for now.
		 */
	}

	/*
	 * In the case of AGG_STOP_DESTROY_STA, the driver won't
	 * necessarily call ieee80211_stop_tx_ba_cb(), so this may
	 * seem like we can leave the tid_tx data pending forever.
	 * This is true, in a way, but "forever" is only until the
	 * station struct is actually destroyed. In the meantime,
	 * leaving it around ensures that we don't transmit packets
	 * to the driver on this TID which might confuse it.
	 */

	return 0;
}
예제 #19
0
static void __exit ip_nat_amanda_fini(void)
{
	ip_nat_amanda_hook = NULL;
	/* Make sure noone calls it, meanwhile. */
	synchronize_net();
}
예제 #20
0
static void __exit fini(void)
{
    ip_nat_mms_hook = NULL;
    synchronize_net();
}
예제 #21
0
static void __exit ebtable_broute_fini(void)
{
	rcu_assign_pointer(br_should_route_hook, NULL);
	synchronize_net();
	ebt_unregister_table(&broute_table);
}
static int init_or_cleanup(int init)
{
	int ret = 0;

	if (!init) goto cleanup;

	ret = nf_ct_frag6_init();
	if (ret < 0) {
		printk("nf_conntrack_ipv6: can't initialize frag6.\n");
		goto cleanup_nothing;
	}
	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp6);
	if (ret < 0) {
		printk("nf_conntrack_ipv6: can't register tcp.\n");
		goto cleanup_frag6;
	}

	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_udp6);
	if (ret < 0) {
		printk("nf_conntrack_ipv6: can't register udp.\n");
		goto cleanup_tcp;
	}

	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_icmpv6);
	if (ret < 0) {
		printk("nf_conntrack_ipv6: can't register icmpv6.\n");
		goto cleanup_udp;
	}

	ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6);
	if (ret < 0) {
		printk("nf_conntrack_ipv6: can't register ipv6\n");
		goto cleanup_icmpv6;
	}

	ret = nf_register_hook(&ipv6_conntrack_defrag_ops);
	if (ret < 0) {
		printk("nf_conntrack_ipv6: can't register pre-routing defrag "
		       "hook.\n");
		goto cleanup_ipv6;
	}

	ret = nf_register_hook(&ipv6_conntrack_defrag_local_out_ops);
	if (ret < 0) {
		printk("nf_conntrack_ipv6: can't register local_out defrag "
		       "hook.\n");
		goto cleanup_defragops;
	}

	ret = nf_register_hook(&ipv6_conntrack_in_ops);
	if (ret < 0) {
		printk("nf_conntrack_ipv6: can't register pre-routing hook.\n");
		goto cleanup_defraglocalops;
	}

	ret = nf_register_hook(&ipv6_conntrack_local_out_ops);
	if (ret < 0) {
		printk("nf_conntrack_ipv6: can't register local out hook.\n");
		goto cleanup_inops;
	}

	ret = nf_register_hook(&ipv6_conntrack_out_ops);
	if (ret < 0) {
		printk("nf_conntrack_ipv6: can't register post-routing hook.\n");
		goto cleanup_inandlocalops;
	}

	ret = nf_register_hook(&ipv6_conntrack_local_in_ops);
	if (ret < 0) {
		printk("nf_conntrack_ipv6: can't register local in hook.\n");
		goto cleanup_inoutandlocalops;
	}

#ifdef CONFIG_SYSCTL
	nf_ct_ipv6_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
	if (nf_ct_ipv6_sysctl_header == NULL) {
		printk("nf_conntrack: can't register to sysctl.\n");
		ret = -ENOMEM;
		goto cleanup_localinops;
	}
#endif
	return ret;

 cleanup:
	synchronize_net();
#ifdef CONFIG_SYSCTL
 	unregister_sysctl_table(nf_ct_ipv6_sysctl_header);
 cleanup_localinops:
#endif
	nf_unregister_hook(&ipv6_conntrack_local_in_ops);
 cleanup_inoutandlocalops:
	nf_unregister_hook(&ipv6_conntrack_out_ops);
 cleanup_inandlocalops:
	nf_unregister_hook(&ipv6_conntrack_local_out_ops);
 cleanup_inops:
	nf_unregister_hook(&ipv6_conntrack_in_ops);
 cleanup_defraglocalops:
	nf_unregister_hook(&ipv6_conntrack_defrag_local_out_ops);
 cleanup_defragops:
	nf_unregister_hook(&ipv6_conntrack_defrag_ops);
 cleanup_ipv6:
	nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
 cleanup_icmpv6:
	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmpv6);
 cleanup_udp:
	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp6);
 cleanup_tcp:
	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp6);
 cleanup_frag6:
	nf_ct_frag6_cleanup();
 cleanup_nothing:
	return ret;
}
예제 #23
0
파일: llc_input.c 프로젝트: 020gzh/linux
void llc_remove_pack(int type)
{
	if (type == LLC_DEST_SAP || type == LLC_DEST_CONN)
		llc_type_handlers[type - 1] = NULL;
	synchronize_net();
}
예제 #24
0
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
				    enum ieee80211_back_parties initiator,
				    bool tx)
{
	struct ieee80211_local *local = sta->local;
	struct tid_ampdu_tx *tid_tx;
	int ret;

	lockdep_assert_held(&sta->ampdu_mlme.mtx);

	spin_lock_bh(&sta->lock);

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	if (!tid_tx) {
		spin_unlock_bh(&sta->lock);
		return -ENOENT;
	}

	/* if we're already stopping ignore any new requests to stop */
	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		spin_unlock_bh(&sta->lock);
		return -EALREADY;
	}

	if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
		/* not even started yet! */
		ieee80211_assign_tid_tx(sta, tid, NULL);
		spin_unlock_bh(&sta->lock);
		kfree_rcu(tid_tx, rcu_head);
		return 0;
	}

	set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);

	spin_unlock_bh(&sta->lock);

#ifdef CONFIG_MAC80211_HT_DEBUG
	printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
	       sta->sta.addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */

	del_timer_sync(&tid_tx->addba_resp_timer);

	/*
	 * After this packets are no longer handed right through
	 * to the driver but are put onto tid_tx->pending instead,
	 * with locking to ensure proper access.
	 */
	clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);

	/*
	 * There might be a few packets being processed right now (on
	 * another CPU) that have already gotten past the aggregation
	 * check when it was still OPERATIONAL and consequently have
	 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
	 * call into the driver at the same time or even before the
	 * TX paths calls into it, which could confuse the driver.
	 *
	 * Wait for all currently running TX paths to finish before
	 * telling the driver. New packets will not go through since
	 * the aggregation session is no longer OPERATIONAL.
	 */
	synchronize_net();

	tid_tx->stop_initiator = initiator;
	tid_tx->tx_stop = tx;

	ret = drv_ampdu_action(local, sta->sdata,
			       IEEE80211_AMPDU_TX_STOP,
			       &sta->sta, tid, NULL, 0);

	/* HW shall not deny going back to legacy */
	if (WARN_ON(ret)) {
		/*
		 * We may have pending packets get stuck in this case...
		 * Not bothering with a workaround for now.
		 */
	}

	return ret;
}
예제 #25
0
/* This returns 0 if everything went fine.
 * It will return 1 if the group was killed as a result.
 * A negative return indicates failure.
 *
 * The RTNL lock must be held.
 */
static int unregister_vlan_dev(struct net_device *real_dev,
			       unsigned short vlan_id)
{
	struct net_device *dev = NULL;
	int real_dev_ifindex = real_dev->ifindex;
	struct vlan_group *grp;
	int i, ret;

#ifdef VLAN_DEBUG
	printk(VLAN_DBG "%s: VID: %i\n", __FUNCTION__, vlan_id);
#endif

	/* sanity check */
	if (vlan_id >= VLAN_VID_MASK)
		return -EINVAL;

	ASSERT_RTNL();
	grp = vlan_find_group(real_dev_ifindex);

	ret = 0;

	if (grp) {
		dev = vlan_group_get_device(grp, vlan_id);
		if (dev) {
			/* Remove proc entry */
			vlan_proc_rem_dev(dev);

			/* Take it out of our own structures, but be sure to
			 * interlock with HW accelerating devices or SW vlan
			 * input packet processing.
			 */
			if (real_dev->features &
			    (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER)) {
				real_dev->vlan_rx_kill_vid(real_dev, vlan_id);
			}

			vlan_group_set_device(grp, vlan_id, NULL);
			synchronize_net();


			/* Caller unregisters (and if necessary, puts)
			 * VLAN device, but we get rid of the reference to
			 * real_dev here.
			 */
			dev_put(real_dev);

			/* If the group is now empty, kill off the
			 * group.
			 */
			for (i = 0; i < VLAN_VID_MASK; i++)
				if (vlan_group_get_device(grp, i))
					break;

			if (i == VLAN_VID_MASK) {
				if (real_dev->features & NETIF_F_HW_VLAN_RX)
					real_dev->vlan_rx_register(real_dev, NULL);

				hlist_del_rcu(&grp->hlist);

				/* Free the group, after all cpu's are done. */
				call_rcu(&grp->rcu, vlan_rcu_free);

				grp = NULL;
				ret = 1;
			}
		}
	}

	return ret;
}
예제 #26
0
static int init_or_cleanup(int init)
{
#ifdef CONFIG_PROC_FS
	struct proc_dir_entry *proc, *proc_exp, *proc_stat;
#endif
	int ret = 0;

	if (!init) goto cleanup;

	ret = ip_conntrack_init();
	if (ret < 0)
		goto cleanup_nothing;

#ifdef CONFIG_PROC_FS
	ret = -ENOMEM;
	proc = proc_net_fops_create("ip_conntrack", 0440, &ct_file_ops);
	if (!proc) goto cleanup_init;

	proc_exp = proc_net_fops_create("ip_conntrack_expect", 0440,
					&exp_file_ops);
	if (!proc_exp) goto cleanup_proc;

	proc_stat = create_proc_entry("ip_conntrack", S_IRUGO, proc_net_stat);
	if (!proc_stat)
		goto cleanup_proc_exp;

	proc_stat->proc_fops = &ct_cpu_seq_fops;
	proc_stat->owner = THIS_MODULE;
#endif

	ret = nf_register_hook(&ip_conntrack_defrag_ops);
	if (ret < 0) {
		printk("ip_conntrack: can't register pre-routing defrag hook.\n");
		goto cleanup_proc_stat;
	}
	ret = nf_register_hook(&ip_conntrack_defrag_local_out_ops);
	if (ret < 0) {
		printk("ip_conntrack: can't register local_out defrag hook.\n");
		goto cleanup_defragops;
	}
	ret = nf_register_hook(&ip_conntrack_in_ops);
	if (ret < 0) {
		printk("ip_conntrack: can't register pre-routing hook.\n");
		goto cleanup_defraglocalops;
	}
	ret = nf_register_hook(&ip_conntrack_local_out_ops);
	if (ret < 0) {
		printk("ip_conntrack: can't register local out hook.\n");
		goto cleanup_inops;
	}
	ret = nf_register_hook(&ip_conntrack_out_ops);
	if (ret < 0) {
		printk("ip_conntrack: can't register post-routing hook.\n");
		goto cleanup_inandlocalops;
	}
	ret = nf_register_hook(&ip_conntrack_local_in_ops);
	if (ret < 0) {
		printk("ip_conntrack: can't register local in hook.\n");
		goto cleanup_inoutandlocalops;
	}
#ifdef CONFIG_SYSCTL
	ip_ct_sysctl_header = register_sysctl_table(ip_ct_net_table, 0);
	if (ip_ct_sysctl_header == NULL) {
		printk("ip_conntrack: can't register to sysctl.\n");
		goto cleanup;
	}
#endif

	return ret;

 cleanup:
#ifdef CONFIG_SYSCTL
 	unregister_sysctl_table(ip_ct_sysctl_header);
#endif
	nf_unregister_hook(&ip_conntrack_local_in_ops);
 cleanup_inoutandlocalops:
	nf_unregister_hook(&ip_conntrack_out_ops);
 cleanup_inandlocalops:
	nf_unregister_hook(&ip_conntrack_local_out_ops);
 cleanup_inops:
	nf_unregister_hook(&ip_conntrack_in_ops);
 cleanup_defraglocalops:
	nf_unregister_hook(&ip_conntrack_defrag_local_out_ops);
 cleanup_defragops:
	/* Frag queues may hold fragments with skb->dst == NULL */
	ip_ct_no_defrag = 1;
	synchronize_net();
	local_bh_disable();
	ipfrag_flush();
	local_bh_enable();
	nf_unregister_hook(&ip_conntrack_defrag_ops);
 cleanup_proc_stat:
#ifdef CONFIG_PROC_FS
	proc_net_remove("ip_conntrack_stat");
cleanup_proc_exp:
	proc_net_remove("ip_conntrack_expect");
 cleanup_proc:
	proc_net_remove("ip_conntrack");
 cleanup_init:
#endif /* CONFIG_PROC_FS */
	ip_conntrack_cleanup();
 cleanup_nothing:
	return ret;
}
예제 #27
0
/*
 * Mangle the "Transport:" header:
 *   - Replace all occurences of "client_port=<spec>"
 *   - Handle destination parameter
 *
 * In:
 *   ct, ctinfo = conntrack context
 *   skb        = packet
 *   tranoff    = Transport header offset from TCP data
 *   tranlen    = Transport header length (incl. CRLF)
 *   rport_lo   = replacement low  port (host endian)
 *   rport_hi   = replacement high port (host endian)
 *
 * Returns packet size difference.
 *
 * Assumes that a complete transport header is present, ending with CR or LF
 */
static int
rtsp_mangle_tran(enum ip_conntrack_info ctinfo,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
		 unsigned int protoff,
#endif
		 struct nf_conntrack_expect* rtp_exp,
		 struct nf_conntrack_expect* rtcp_exp,
		 struct ip_ct_rtsp_expect* prtspexp,
		 struct sk_buff* skb, uint tranoff, uint tranlen)
{
	char*  ptcp;
	uint   tcplen;
	char*  ptran;
	char   rbuf1[16];	  /* Replacement buffer (one port) */
	uint   rbuf1len;	  /* Replacement len (one port) */
	char   rbufa[16];	  /* Replacement buffer (all ports) */
	uint   rbufalen;	  /* Replacement len (all ports) */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
	union nf_inet_addr newip;
#else
	u_int32_t  newip;
#endif
	u_int16_t loport, hiport;
	uint      off = 0;
	uint      diff;		   /* Number of bytes we removed */

	struct nf_conn *ct = rtp_exp->master;
	/* struct nf_conn *ct = nf_ct_get(skb, &ctinfo); */
	struct nf_conntrack_tuple *rtp_t;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
	char szextaddr[INET6_ADDRSTRLEN];
#else
	char szextaddr[INET_ADDRSTRLEN];
#endif
	uint extaddrlen;
	int  is_stun;

	get_skb_tcpdata(skb, &ptcp, &tcplen);
	ptran = ptcp+tranoff;

	if (tranoff+tranlen > tcplen || tcplen-tranoff < tranlen ||
	    tranlen < 10 || !iseol(ptran[tranlen-1]) ||
	    nf_strncasecmp(ptran, "Transport:", 10) != 0) {
		pr_info("sanity check failed\n");
		return 0;
	}
	off += 10;
	SKIP_WSPACE(ptcp+tranoff, tranlen, off);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
	newip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
	rtp_t = &rtp_exp->tuple;
	rtp_t->dst.u3 = newip;
	if (rtcp_exp) {
		rtcp_exp->tuple.dst.u3 = newip;
	}
	extaddrlen = rtsp_sprintf_addr(ct, szextaddr, &newip, true); // FIXME handle extip
	pr_debug("stunaddr=%s (auto)\n", szextaddr);
#else
	newip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
	rtp_t = &rtp_exp->tuple;
	rtp_t->dst.u3.ip = newip;
	if (rtcp_exp) {
		rtcp_exp->tuple.dst.u3.ip = newip;
	}
	extaddrlen = extip ? sprintf(szextaddr, "%pI4", &extip)
			   : sprintf(szextaddr, "%pI4", &newip);
	pr_debug("stunaddr=%s (%s)\n", szextaddr, (extip?"forced":"auto"));
#endif
	hiport = 0;
	rbuf1len = rbufalen = 0;
	switch (prtspexp->pbtype) {
	case pb_single:
		for (loport = prtspexp->loport; loport != 0; loport++) { /* XXX: improper wrap? */
			rtp_t->dst.u.udp.port = htons(loport);
			if (nf_ct_expect_related(rtp_exp) == 0) {
				pr_debug("using port %hu\n", loport);
				break;
			}
		}
		if (loport != 0) {
			rbuf1len = sprintf(rbuf1, "%hu", loport);
			rbufalen = sprintf(rbufa, "%hu", loport);
		}
		break;
	case pb_range:
		for (loport = prtspexp->loport; loport != 0; loport += 2) { /* XXX: improper wrap? */
			rtp_t->dst.u.udp.port = htons(loport);
			if (nf_ct_expect_related(rtp_exp) != 0) {
				continue;
			}
			hiport = loport + 1;
			rtcp_exp->tuple.dst.u.udp.port = htons(hiport);
			if (nf_ct_expect_related(rtcp_exp) != 0) {
				nf_ct_unexpect_related(rtp_exp);
				continue;
			}

			/* FIXME: invalid print in case of ipv6 */
			pr_debug("nat expect_related %pI4:%u-%u-%pI4:%u-%u\n",
				 &rtp_exp->tuple.src.u3.ip,
				 ntohs(rtp_exp->tuple.src.u.udp.port),
				 ntohs(rtcp_exp->tuple.src.u.udp.port),
				 &rtp_exp->tuple.dst.u3.ip,
				 ntohs(rtp_exp->tuple.dst.u.udp.port),
				 ntohs(rtcp_exp->tuple.dst.u.udp.port));
			break;
		}
		if (loport != 0) {
			rbuf1len = sprintf(rbuf1, "%hu", loport);
			rbufalen = sprintf(rbufa, "%hu-%hu", loport, hiport);
		}
		break;
	case pb_discon:
		for (loport = prtspexp->loport; loport != 0; loport++) { /* XXX: improper wrap? */
			rtp_t->dst.u.udp.port = htons(loport);
			if (nf_ct_expect_related(rtp_exp) == 0) {
				pr_debug("using port %hu (1 of 2)\n", loport);
				break;
			}
		}
		for (hiport = prtspexp->hiport; hiport != 0; hiport++) { /* XXX: improper wrap? */
			rtp_t->dst.u.udp.port = htons(hiport);
			if (nf_ct_expect_related(rtp_exp) == 0) {
				pr_debug("using port %hu (2 of 2)\n", hiport);
				break;
			}
		}
		if (loport != 0 && hiport != 0) {
			rbuf1len = sprintf(rbuf1, "%hu", loport);
			rbufalen = sprintf(rbufa, hiport == loport+1 ?
					   "%hu-%hu":"%hu/%hu", loport, hiport);
		}
		break;
	}

	if (rbuf1len == 0)
		return 0;   /* cannot get replacement port(s) */

	/* Transport: tran;field;field=val,tran;field;field=val,...
	   `off` is set to the start of Transport value from start of line
	*/
	while (off < tranlen) {
		uint        saveoff;
		const char* pparamend;
		uint        nextparamoff;

		pparamend = memchr(ptran+off, ',', tranlen-off);
		pparamend = (pparamend == NULL) ? ptran+tranlen : pparamend+1;
		nextparamoff = pparamend-ptran;

		/*
		 * We pass over each param twice.  On the first pass, we look for a
		 * destination= field.  It is handled by the security policy.  If it
		 * is present, allowed, and equal to our external address, we assume
		 * that STUN is being used and we leave the client_port= field alone.
		 */
		is_stun = 0;
		saveoff = off;
		while (off < nextparamoff) {
			const char* pfieldend;
			uint        nextfieldoff;

			pfieldend = memchr(ptran+off, ';', nextparamoff-off);
			nextfieldoff = (pfieldend == NULL) ? nextparamoff : pfieldend-ptran+1;

			if (dstact != DSTACT_NONE && strncmp(ptran+off, "destination=", 12) == 0) {
				if (strncmp(ptran+off+12, szextaddr, extaddrlen) == 0)
					is_stun = 1;

				if (dstact == DSTACT_STRIP || (dstact == DSTACT_AUTO && !is_stun)) {
					uint dstoff = (ptran-ptcp)+off;
					uint dstlen = nextfieldoff-off;
					char* pdstrep = NULL;
					uint dstreplen = 0;
					diff = dstlen;
					if (dstact == DSTACT_AUTO && !is_stun) {
						pr_debug("RTSP: replace dst addr\n");
						dstoff += 12;
						dstlen -= 13;
						pdstrep = szextaddr;
						dstreplen = extaddrlen;
						diff = nextfieldoff-off-13-extaddrlen;
					}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
					if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
								      dstoff, dstlen, pdstrep, dstreplen)) {
#else
					if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
								      dstoff, dstlen, pdstrep, dstreplen)) {
#endif
						/* mangle failed, all we can do is bail */
						nf_ct_unexpect_related(rtp_exp);
						if (rtcp_exp)
							nf_ct_unexpect_related(rtcp_exp);
						return 0;
					}
					get_skb_tcpdata(skb, &ptcp, &tcplen);
					ptran = ptcp+tranoff;
					tranlen -= diff;
					nextparamoff -= diff;
					nextfieldoff -= diff;
				}
			}

			off = nextfieldoff;
		}

		if (is_stun)
			continue;

		off = saveoff;
		while (off < nextparamoff) {
			const char* pfieldend;
			uint        nextfieldoff;

			pfieldend = memchr(ptran+off, ';', nextparamoff-off);
			nextfieldoff = (pfieldend == NULL) ? nextparamoff : pfieldend-ptran+1;

			if (strncmp(ptran+off, "client_port=", 12) == 0) {
				u_int16_t port;
				uint	  numlen;
				uint      origoff;
				uint      origlen;
				char*     rbuf = rbuf1;
				uint      rbuflen = rbuf1len;

				off += 12;
				origoff = (ptran-ptcp)+off;
				origlen = 0;
				numlen = nf_strtou16(ptran+off, &port);
				off += numlen;
				origlen += numlen;
				if (port != prtspexp->loport) {
					pr_debug("multiple ports found, port %hu ignored\n", port);
				} else {
					if (ptran[off] == '-' || ptran[off] == '/') {
						off++;
						origlen++;
						numlen = nf_strtou16(ptran+off, &port);
						off += numlen;
						origlen += numlen;
						rbuf = rbufa;
						rbuflen = rbufalen;
					}

					/*
					 * note we cannot just memcpy() if the sizes are the same.
					 * the mangle function does skb resizing, checks for a
					 * cloned skb, and updates the checksums.
					 *
					 * parameter 4 below is offset from start of tcp data.
					 */
					diff = origlen-rbuflen;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
					if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
								      origoff, origlen, rbuf, rbuflen)) {
#else
					if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
								      origoff, origlen, rbuf, rbuflen)) {
#endif
						/* mangle failed, all we can do is bail */
						nf_ct_unexpect_related(rtp_exp);
						if (rtcp_exp)
							nf_ct_unexpect_related(rtcp_exp);
						return 0;
					}
					get_skb_tcpdata(skb, &ptcp, &tcplen);
					ptran = ptcp+tranoff;
					tranlen -= diff;
					nextparamoff -= diff;
					nextfieldoff -= diff;
				}
			}

			off = nextfieldoff;
		}

		off = nextparamoff;
	}

	return 1;
}

static uint
help_out(struct sk_buff *skb, enum ip_conntrack_info ctinfo,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
	 unsigned int protoff,
#endif
	 unsigned int matchoff, unsigned int matchlen,
	 struct ip_ct_rtsp_expect* prtspexp,
	 struct nf_conntrack_expect* rtp_exp,
	 struct nf_conntrack_expect* rtcp_exp)
{
	char* ptcp;
	uint  tcplen;
	uint  hdrsoff;
	uint  hdrslen;
	uint  lineoff;
	uint  linelen;
	uint  off;
	int   dir = CTINFO2DIR(ctinfo);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
	union nf_inet_addr saddr = rtp_exp->master->tuplehash[dir].tuple.src.u3;
#else
	__be32 saddr = rtp_exp->master->tuplehash[dir].tuple.src.u3.ip;
#endif

	//struct iphdr* iph = (struct iphdr*)(*pskb)->nh.iph;
	//struct tcphdr* tcph = (struct tcphdr*)((void*)iph + iph->ihl*4);

	get_skb_tcpdata(skb, &ptcp, &tcplen);
	hdrsoff = matchoff;//exp->seq - ntohl(tcph->seq);
	hdrslen = matchlen;
	off = hdrsoff;
	pr_debug("NAT rtsp help_out\n");

	while (nf_mime_nextline(ptcp, hdrsoff+hdrslen, &off, &lineoff, &linelen)) {
		if (linelen == 0)
			break;

		if (off > hdrsoff+hdrslen) {
			pr_info("!! overrun !!");
			break;
		}
		pr_debug("hdr: len=%u, %.*s", linelen, (int)linelen, ptcp+lineoff);

		if (nf_strncasecmp(ptcp+lineoff, "Transport:", 10) == 0) {
			uint oldtcplen = tcplen;
			pr_debug("hdr: Transport\n");
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
			if (!rtsp_mangle_tran(ctinfo, protoff, rtp_exp, rtcp_exp,
					      prtspexp, skb, lineoff, linelen)) {
#else
			if (!rtsp_mangle_tran(ctinfo, rtp_exp, rtcp_exp, prtspexp,
					      skb, lineoff, linelen)) {
#endif
				pr_debug("hdr: Transport mangle failed");
				break;
			}
			rtp_exp->expectfn = nf_nat_rtsp_expected;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
			rtp_exp->saved_addr = saddr;
#else
			rtp_exp->saved_ip = saddr;
#endif
			rtp_exp->saved_proto.udp.port = htons(prtspexp->loport);
			rtp_exp->dir = !dir;
			if (rtcp_exp) {
				rtcp_exp->expectfn = nf_nat_rtsp_expected;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
				rtcp_exp->saved_addr = saddr;
#else
				rtcp_exp->saved_ip = saddr;
#endif
				rtcp_exp->saved_proto.udp.port = htons(prtspexp->hiport);
				rtcp_exp->dir = !dir;
			}
			get_skb_tcpdata(skb, &ptcp, &tcplen);
			hdrslen -= (oldtcplen-tcplen);
			off -= (oldtcplen-tcplen);
			lineoff -= (oldtcplen-tcplen);
			linelen -= (oldtcplen-tcplen);
			pr_debug("rep: len=%u, %.*s", linelen, (int)linelen, ptcp+lineoff);
		}
	}

	return NF_ACCEPT;
}

static unsigned int
nf_nat_rtsp(struct sk_buff *skb, enum ip_conntrack_info ctinfo,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
	    unsigned int protoff,
#endif
	    unsigned int matchoff, unsigned int matchlen,
	    struct ip_ct_rtsp_expect* prtspexp,
	    struct nf_conntrack_expect* rtp_exp,
	    struct nf_conntrack_expect* rtcp_exp)
{
	int dir = CTINFO2DIR(ctinfo);
	int rc = NF_ACCEPT;

	switch (dir) {
	case IP_CT_DIR_ORIGINAL:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
		rc = help_out(skb, ctinfo, protoff, matchoff, matchlen, prtspexp,
			      rtp_exp, rtcp_exp);
#else
		rc = help_out(skb, ctinfo, matchoff, matchlen, prtspexp,
			      rtp_exp, rtcp_exp);
#endif
		break;
	case IP_CT_DIR_REPLY:
		pr_debug("unmangle ! %u\n", ctinfo);
		/* XXX: unmangle */
		rc = NF_ACCEPT;
		break;
	}
	//UNLOCK_BH(&ip_rtsp_lock);

	return rc;
}

static void nf_nat_rtsp_expected(struct nf_conn* ct, struct nf_conntrack_expect *exp)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
	struct nf_nat_range range;
#else
	struct nf_nat_ipv4_range range;
#endif

	/* This must be a fresh one. */
	BUG_ON(ct->status & IPS_NAT_DONE_MASK);

	/* For DST manip, map port here to where it's expected. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
	range.min_proto = range.max_proto = exp->saved_proto;
	range.min_addr = range.max_addr = exp->saved_addr;
#else
	range.min = range.max = exp->saved_proto;
	range.min_ip = range.max_ip = exp->saved_ip;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
	range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
#else
	range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
	nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
#endif

	/* Change src to where master sends to, but only if the connection
	 * actually came from the same source. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
	if (nf_inet_addr_cmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
			     &ct->master->tuplehash[exp->dir].tuple.src.u3)) {
		range.min_addr = range.max_addr
			= ct->master->tuplehash[!exp->dir].tuple.dst.u3;
#else
	if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip ==
	    ct->master->tuplehash[exp->dir].tuple.src.u3.ip) {
		range.min_ip = range.max_ip
			= ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
		range.flags = NF_NAT_RANGE_MAP_IPS;
		nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
#else
		range.flags = IP_NAT_RANGE_MAP_IPS;
		nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
#endif
	}
}


static void __exit fini(void)
{
	rcu_assign_pointer(nf_nat_rtsp_hook, NULL);
	synchronize_net();
}

static int __init init(void)
{
	printk("nf_nat_rtsp v" IP_NF_RTSP_VERSION " loading\n");

	BUG_ON(nf_nat_rtsp_hook);
	rcu_assign_pointer(nf_nat_rtsp_hook, nf_nat_rtsp);

	if (stunaddr != NULL)
		extip = in_aton(stunaddr);

	if (destaction != NULL) {
		if (strcmp(destaction, "auto") == 0)
			dstact = DSTACT_AUTO;

		if (strcmp(destaction, "strip") == 0)
			dstact = DSTACT_STRIP;

		if (strcmp(destaction, "none") == 0)
			dstact = DSTACT_NONE;
	}

	return 0;
}
예제 #28
0
static void __exit ebtable_broute_fini(void)
{
	br_should_route_hook = NULL;
	synchronize_net();
	ebt_unregister_table(&broute_table);
}
예제 #29
0
파일: agg-tx.c 프로젝트: mcr/linux-2.6
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
	struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
	struct ieee80211_local *local = sta->local;
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	u16 start_seq_num;
	int ret;

	lockdep_assert_held(&sta->ampdu_mlme.mtx);

	/*
	 * While we're asking the driver about the aggregation,
	 * stop the AC queue so that we don't have to worry
	 * about frames that came in while we were doing that,
	 * which would require us to put them to the AC pending
	 * afterwards which just makes the code more complex.
	 */
	ieee80211_stop_queue_agg(local, tid);

	clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

	/*
	 * make sure no packets are being processed to get
	 * valid starting sequence number
	 */
	synchronize_net();

	start_seq_num = sta->tid_seq[tid] >> 4;

	ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
			       &sta->sta, tid, &start_seq_num);
	if (ret) {
#ifdef CONFIG_MAC80211_HT_DEBUG
		printk(KERN_DEBUG "BA request denied - HW unavailable for"
					" tid %d\n", tid);
#endif
		spin_lock_bh(&sta->lock);
		rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
		spin_unlock_bh(&sta->lock);

		ieee80211_wake_queue_agg(local, tid);
		call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
		return;
	}

	/* we can take packets again now */
	ieee80211_wake_queue_agg(local, tid);

	/* activate the timer for the recipient's addBA response */
	mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
#ifdef CONFIG_MAC80211_HT_DEBUG
	printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
#endif

	spin_lock_bh(&sta->lock);
	sta->ampdu_mlme.addba_req_num[tid]++;
	spin_unlock_bh(&sta->lock);

	/* send AddBA request */
	ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
				     tid_tx->dialog_token, start_seq_num,
				     0x40, 5000);
}
예제 #30
0
static void __exit ebtable_broute_fini(void)
{
	rcu_assign_pointer(br_should_route_hook, NULL);
	synchronize_net();
	unregister_pernet_subsys(&broute_net_ops);
}